- CLAUDE.md for AI agents to understand the codebase - GITEA-GUIDE.md centralizes all Gitea operations (API, Registry, Auth) - DEVELOPMENT-WORKFLOW.md explains complete dev process - ROADMAP.md, NEXT-SESSION.md for planning - QUICK-REFERENCE.md, TROUBLESHOOTING.md for daily use - 40+ detailed docs in /docs folder - Backend as submodule from Gitea Everything documented for autonomous operation. Co-Authored-By: Claude Sonnet 4.5 (1M context) <noreply@anthropic.com>
15 KiB
15 KiB
Deployments en Kubernetes
Backend API Deployment
# k8s/control-plane/backend-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: aiworker-backend
namespace: control-plane
labels:
app: aiworker-backend
version: v1
spec:
replicas: 2
selector:
matchLabels:
app: aiworker-backend
template:
metadata:
labels:
app: aiworker-backend
version: v1
spec:
serviceAccountName: aiworker-backend
containers:
- name: backend
image: aiworker/backend:latest
imagePullPolicy: Always
ports:
- name: http
containerPort: 3000
- name: mcp
containerPort: 3100
env:
- name: NODE_ENV
value: "production"
- name: PORT
value: "3000"
- name: DB_HOST
value: "mysql.control-plane.svc.cluster.local"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "aiworker"
- name: DB_USER
value: "root"
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: aiworker-secrets
key: db-password
- name: REDIS_HOST
value: "redis.control-plane.svc.cluster.local"
- name: REDIS_PORT
value: "6379"
- name: GITEA_URL
value: "http://gitea.gitea.svc.cluster.local:3000"
- name: GITEA_TOKEN
valueFrom:
secretKeyRef:
name: aiworker-secrets
key: gitea-token
- name: K8S_IN_CLUSTER
value: "true"
resources:
requests:
cpu: "500m"
memory: "1Gi"
limits:
cpu: "2"
memory: "4Gi"
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: aiworker-backend
namespace: control-plane
spec:
selector:
app: aiworker-backend
ports:
- name: http
port: 3000
targetPort: 3000
- name: mcp
port: 3100
targetPort: 3100
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: aiworker-backend
namespace: control-plane
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/websocket-services: "aiworker-backend"
spec:
ingressClassName: nginx
tls:
- hosts:
- api.aiworker.dev
secretName: aiworker-backend-tls
rules:
- host: api.aiworker.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: aiworker-backend
port:
number: 3000
MySQL Deployment
# k8s/control-plane/mysql-deployment.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
namespace: control-plane
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
namespace: control-plane
spec:
replicas: 1
selector:
matchLabels:
app: mysql
strategy:
type: Recreate
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:8.0
ports:
- containerPort: 3306
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: aiworker-secrets
key: db-password
- name: MYSQL_DATABASE
value: "aiworker"
volumeMounts:
- name: mysql-storage
mountPath: /var/lib/mysql
resources:
requests:
cpu: "500m"
memory: "1Gi"
limits:
cpu: "2"
memory: "4Gi"
livenessProbe:
exec:
command:
- mysqladmin
- ping
- -h
- localhost
initialDelaySeconds: 30
periodSeconds: 10
volumes:
- name: mysql-storage
persistentVolumeClaim:
claimName: mysql-pvc
---
apiVersion: v1
kind: Service
metadata:
name: mysql
namespace: control-plane
spec:
selector:
app: mysql
ports:
- port: 3306
targetPort: 3306
type: ClusterIP
Redis Deployment
# k8s/control-plane/redis-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: control-plane
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
name: redis
args:
- --maxmemory
- 2gb
- --maxmemory-policy
- allkeys-lru
resources:
requests:
cpu: "250m"
memory: "512Mi"
limits:
cpu: "1"
memory: "2Gi"
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 15
periodSeconds: 10
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: control-plane
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379
type: ClusterIP
Claude Code Agent Pod Template
# k8s/agents/agent-pod-template.yaml
apiVersion: v1
kind: Pod
metadata:
name: claude-agent-{agent-id}
namespace: agents
labels:
app: claude-agent
agent-id: "{agent-id}"
managed-by: aiworker
spec:
containers:
- name: agent
image: aiworker/claude-agent:latest
env:
- name: AGENT_ID
value: "{agent-id}"
- name: MCP_SERVER_URL
value: "http://aiworker-backend.control-plane.svc.cluster.local:3100"
- name: ANTHROPIC_API_KEY
valueFrom:
secretKeyRef:
name: aiworker-secrets
key: anthropic-api-key
- name: GITEA_URL
value: "http://gitea.gitea.svc.cluster.local:3000"
- name: GIT_SSH_KEY
valueFrom:
secretKeyRef:
name: git-ssh-keys
key: private-key
resources:
requests:
cpu: "500m"
memory: "1Gi"
limits:
cpu: "2"
memory: "4Gi"
volumeMounts:
- name: workspace
mountPath: /workspace
- name: git-config
mountPath: /root/.gitconfig
subPath: .gitconfig
volumes:
- name: workspace
emptyDir: {}
- name: git-config
configMap:
name: git-config
restartPolicy: Never
Preview Deployment Template
// services/kubernetes/templates/preview-deployment.ts
export function generatePreviewDeployment(params: {
taskId: string
projectId: string
projectName: string
image: string
branch: string
envVars: Record<string, string>
}) {
const namespace = `preview-task-${params.taskId.slice(0, 8)}`
const name = `${params.projectName}-preview`
return {
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: {
name,
namespace,
labels: {
app: name,
project: params.projectId,
task: params.taskId,
environment: 'preview',
},
},
spec: {
replicas: 1,
selector: {
matchLabels: {
app: name,
},
},
template: {
metadata: {
labels: {
app: name,
project: params.projectId,
task: params.taskId,
},
},
spec: {
containers: [
{
name: 'app',
image: `${params.image}:${params.branch}`,
ports: [
{
name: 'http',
containerPort: 3000,
},
],
env: Object.entries(params.envVars).map(([key, value]) => ({
name: key,
value,
})),
resources: {
requests: {
cpu: '250m',
memory: '512Mi',
},
limits: {
cpu: '1',
memory: '2Gi',
},
},
},
],
},
},
},
}
}
export function generatePreviewService(params: {
taskId: string
projectName: string
}) {
const namespace = `preview-task-${params.taskId.slice(0, 8)}`
const name = `${params.projectName}-preview`
return {
apiVersion: 'v1',
kind: 'Service',
metadata: {
name,
namespace,
},
spec: {
selector: {
app: name,
},
ports: [
{
port: 80,
targetPort: 3000,
},
],
type: 'ClusterIP',
},
}
}
export function generatePreviewIngress(params: {
taskId: string
projectName: string
}) {
const namespace = `preview-task-${params.taskId.slice(0, 8)}`
const name = `${params.projectName}-preview`
const host = `task-${params.taskId.slice(0, 8)}.preview.aiworker.dev`
return {
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
name,
namespace,
annotations: {
'cert-manager.io/cluster-issuer': 'letsencrypt-prod',
},
},
spec: {
ingressClassName: 'nginx',
tls: [
{
hosts: [host],
secretName: `${name}-tls`,
},
],
rules: [
{
host,
http: {
paths: [
{
path: '/',
pathType: 'Prefix',
backend: {
service: {
name,
port: {
number: 80,
},
},
},
},
],
},
},
],
},
}
}
Kubernetes Client Implementation
// services/kubernetes/client.ts
import { KubeConfig, AppsV1Api, CoreV1Api, NetworkingV1Api } from '@kubernetes/client-node'
import { logger } from '../../utils/logger'
export class K8sClient {
private kc: KubeConfig
private appsApi: AppsV1Api
private coreApi: CoreV1Api
private networkingApi: NetworkingV1Api
constructor() {
this.kc = new KubeConfig()
if (process.env.K8S_IN_CLUSTER === 'true') {
this.kc.loadFromCluster()
} else {
this.kc.loadFromDefault()
}
this.appsApi = this.kc.makeApiClient(AppsV1Api)
this.coreApi = this.kc.makeApiClient(CoreV1Api)
this.networkingApi = this.kc.makeApiClient(NetworkingV1Api)
}
async createPreviewDeployment(params: {
namespace: string
taskId: string
projectId: string
image: string
branch: string
envVars: Record<string, string>
}) {
const { namespace, taskId, projectId } = params
// Create namespace
await this.createNamespace(namespace, {
project: projectId,
environment: 'preview',
taskId,
})
// Create deployment
const deployment = generatePreviewDeployment(params)
await this.appsApi.createNamespacedDeployment(namespace, deployment)
// Create service
const service = generatePreviewService(params)
await this.coreApi.createNamespacedService(namespace, service)
// Create ingress
const ingress = generatePreviewIngress(params)
await this.networkingApi.createNamespacedIngress(namespace, ingress)
logger.info(`Created preview deployment for task ${taskId}`)
return {
namespace,
url: ingress.spec.rules[0].host,
}
}
async deletePreviewDeployment(namespace: string) {
await this.deleteNamespace(namespace)
logger.info(`Deleted preview deployment namespace: ${namespace}`)
}
async createNamespace(name: string, labels: Record<string, string> = {}) {
try {
await this.coreApi.createNamespace({
metadata: {
name,
labels: {
'managed-by': 'aiworker',
...labels,
},
},
})
logger.info(`Created namespace: ${name}`)
} catch (error: any) {
if (error.statusCode !== 409) { // Ignore if already exists
throw error
}
}
}
async deleteNamespace(name: string) {
await this.coreApi.deleteNamespace(name)
}
async createAgentPod(agentId: string) {
const podSpec = {
metadata: {
name: `claude-agent-${agentId.slice(0, 8)}`,
namespace: 'agents',
labels: {
app: 'claude-agent',
'agent-id': agentId,
},
},
spec: {
containers: [
{
name: 'agent',
image: 'aiworker/claude-agent:latest',
env: [
{ name: 'AGENT_ID', value: agentId },
{
name: 'MCP_SERVER_URL',
value: 'http://aiworker-backend.control-plane.svc.cluster.local:3100',
},
{
name: 'ANTHROPIC_API_KEY',
valueFrom: {
secretKeyRef: {
name: 'aiworker-secrets',
key: 'anthropic-api-key',
},
},
},
],
resources: {
requests: { cpu: '500m', memory: '1Gi' },
limits: { cpu: '2', memory: '4Gi' },
},
},
],
restartPolicy: 'Never',
},
}
await this.coreApi.createNamespacedPod('agents', podSpec)
logger.info(`Created agent pod: ${agentId}`)
return {
podName: podSpec.metadata.name,
namespace: 'agents',
}
}
async deletePod(namespace: string, podName: string) {
await this.coreApi.deleteNamespacedPod(podName, namespace)
}
async getPodLogs(namespace: string, podName: string, tailLines = 100) {
const response = await this.coreApi.readNamespacedPodLog(
podName,
namespace,
undefined,
undefined,
undefined,
undefined,
undefined,
undefined,
undefined,
tailLines
)
return response.body
}
async execInPod(params: {
namespace: string
podName: string
command: string[]
}) {
// Implementation using WebSocketStream
const exec = new Exec(this.kc)
const stream = await exec.exec(
params.namespace,
params.podName,
'agent',
params.command,
process.stdout,
process.stderr,
process.stdin,
true // tty
)
return stream
}
}
Deployment Script
#!/bin/bash
# deploy-all.sh
set -e
echo "🚀 Deploying AiWorker to Kubernetes..."
# Apply secrets (should be done once manually with real values)
echo "📦 Creating secrets..."
kubectl apply -f k8s/secrets/
# Deploy control-plane
echo "🎛️ Deploying control-plane..."
kubectl apply -f k8s/control-plane/
# Deploy agents namespace
echo "🤖 Setting up agents namespace..."
kubectl apply -f k8s/agents/
# Deploy Gitea
echo "📚 Deploying Gitea..."
kubectl apply -f k8s/gitea/
# Wait for pods
echo "⏳ Waiting for pods to be ready..."
kubectl wait --for=condition=ready pod -l app=aiworker-backend -n control-plane --timeout=300s
kubectl wait --for=condition=ready pod -l app=mysql -n control-plane --timeout=300s
kubectl wait --for=condition=ready pod -l app=redis -n control-plane --timeout=300s
echo "✅ Deployment complete!"
echo "📍 Backend API: https://api.aiworker.dev"
echo "📍 Gitea: https://git.aiworker.dev"