--- # NOTE: ollama in the 'ai' namespace is currently scaled to 0 replicas (intentionally stopped). # The actual AI workload runs on the openclaw VM (192.168.2.88) via Ollama system service. --- apiVersion: v1 kind: PersistentVolumeClaim metadata: annotations: {} name: ollama-data namespace: ai spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: nfs-vtrak --- apiVersion: apps/v1 kind: Deployment metadata: annotations: {} name: ollama namespace: ai spec: replicas: 1 selector: matchLabels: app: ollama template: metadata: labels: app: ollama spec: containers: - image: ollama/ollama:latest name: ollama ports: - containerPort: 11434 name: http resources: limits: cpu: '8' memory: 24Gi requests: cpu: 500m memory: 2Gi volumeMounts: - mountPath: /root/.ollama name: ollama-storage volumes: - name: ollama-storage persistentVolumeClaim: claimName: ollama-data --- apiVersion: v1 kind: Service metadata: annotations: {} name: ollama namespace: ai spec: ports: - name: http port: 11434 targetPort: 11434 selector: app: ollama