From db717058422333358297afb549d858e956731062 Mon Sep 17 00:00:00 2001 From: Hector Ros Date: Tue, 20 Jan 2026 00:36:53 +0100 Subject: [PATCH] Complete documentation for future sessions - CLAUDE.md for AI agents to understand the codebase - GITEA-GUIDE.md centralizes all Gitea operations (API, Registry, Auth) - DEVELOPMENT-WORKFLOW.md explains complete dev process - ROADMAP.md, NEXT-SESSION.md for planning - QUICK-REFERENCE.md, TROUBLESHOOTING.md for daily use - 40+ detailed docs in /docs folder - Backend as submodule from Gitea Everything documented for autonomous operation. Co-Authored-By: Claude Sonnet 4.5 (1M context) --- .gitignore | 42 ++ .gitmodules | 3 + AGENT-GUIDE.md | 855 ++++++++++++++++++++++ CLAUDE.md | 273 +++++++ CLUSTER-READY.md | 311 ++++++++ CLUSTER-SETUP-COMPLETE.md | 241 ++++++ DEVELOPMENT-WORKFLOW.md | 427 +++++++++++ GITEA-GUIDE.md | 830 +++++++++++++++++++++ NEXT-SESSION.md | 429 +++++++++++ QUICK-REFERENCE.md | 390 ++++++++++ README.md | 245 +++++++ ROADMAP.md | 531 ++++++++++++++ TROUBLESHOOTING.md | 372 ++++++++++ backend | 1 + docs/01-arquitectura/flujo-de-datos.md | 316 ++++++++ docs/01-arquitectura/modelo-datos.md | 430 +++++++++++ docs/01-arquitectura/overview.md | 140 ++++ docs/01-arquitectura/stack-tecnologico.md | 208 ++++++ docs/02-backend/api-endpoints.md | 484 ++++++++++++ docs/02-backend/database-schema.md | 462 ++++++++++++ docs/02-backend/estructura.md | 480 ++++++++++++ docs/02-backend/gitea-integration.md | 459 ++++++++++++ docs/02-backend/mcp-server.md | 788 ++++++++++++++++++++ docs/02-backend/queue-system.md | 520 +++++++++++++ docs/03-frontend/componentes.md | 498 +++++++++++++ docs/03-frontend/consolas-web.md | 422 +++++++++++ docs/03-frontend/estado.md | 504 +++++++++++++ docs/03-frontend/estructura.md | 420 +++++++++++ docs/03-frontend/kanban.md | 444 +++++++++++ docs/04-kubernetes/cluster-setup.md | 456 ++++++++++++ docs/04-kubernetes/deployments.md | 706 ++++++++++++++++++ docs/04-kubernetes/gitea-deployment.md | 456 ++++++++++++ docs/04-kubernetes/namespaces.md | 481 ++++++++++++ docs/04-kubernetes/networking.md | 474 ++++++++++++ docs/05-agents/ciclo-vida.md | 452 ++++++++++++ docs/05-agents/claude-code-pods.md | 499 +++++++++++++ docs/05-agents/comunicacion.md | 567 ++++++++++++++ docs/05-agents/mcp-tools.md | 452 ++++++++++++ docs/06-deployment/ci-cd.md | 495 +++++++++++++ docs/06-deployment/gitops.md | 531 ++++++++++++++ docs/06-deployment/preview-envs.md | 500 +++++++++++++ docs/06-deployment/staging-production.md | 660 +++++++++++++++++ docs/CONTAINER-REGISTRY.md | 313 ++++++++ docs/README.md | 86 +++ k3sup | Bin 0 -> 7947426 bytes k8s-cluster-info.md | 120 +++ k8s-cluster-ips.txt | 39 + scripts/install-k3s-cluster.sh | 221 ++++++ scripts/setup-load-balancers.sh | 129 ++++ 49 files changed, 19162 insertions(+) create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 AGENT-GUIDE.md create mode 100644 CLAUDE.md create mode 100644 CLUSTER-READY.md create mode 100644 CLUSTER-SETUP-COMPLETE.md create mode 100644 DEVELOPMENT-WORKFLOW.md create mode 100644 GITEA-GUIDE.md create mode 100644 NEXT-SESSION.md create mode 100644 QUICK-REFERENCE.md create mode 100644 README.md create mode 100644 ROADMAP.md create mode 100644 TROUBLESHOOTING.md create mode 160000 backend create mode 100644 docs/01-arquitectura/flujo-de-datos.md create mode 100644 docs/01-arquitectura/modelo-datos.md create mode 100644 docs/01-arquitectura/overview.md create mode 100644 docs/01-arquitectura/stack-tecnologico.md create mode 100644 docs/02-backend/api-endpoints.md create mode 100644 docs/02-backend/database-schema.md create mode 100644 docs/02-backend/estructura.md create mode 100644 docs/02-backend/gitea-integration.md create mode 100644 docs/02-backend/mcp-server.md create mode 100644 docs/02-backend/queue-system.md create mode 100644 docs/03-frontend/componentes.md create mode 100644 docs/03-frontend/consolas-web.md create mode 100644 docs/03-frontend/estado.md create mode 100644 docs/03-frontend/estructura.md create mode 100644 docs/03-frontend/kanban.md create mode 100644 docs/04-kubernetes/cluster-setup.md create mode 100644 docs/04-kubernetes/deployments.md create mode 100644 docs/04-kubernetes/gitea-deployment.md create mode 100644 docs/04-kubernetes/namespaces.md create mode 100644 docs/04-kubernetes/networking.md create mode 100644 docs/05-agents/ciclo-vida.md create mode 100644 docs/05-agents/claude-code-pods.md create mode 100644 docs/05-agents/comunicacion.md create mode 100644 docs/05-agents/mcp-tools.md create mode 100644 docs/06-deployment/ci-cd.md create mode 100644 docs/06-deployment/gitops.md create mode 100644 docs/06-deployment/preview-envs.md create mode 100644 docs/06-deployment/staging-production.md create mode 100644 docs/CONTAINER-REGISTRY.md create mode 100644 docs/README.md create mode 100755 k3sup create mode 100644 k8s-cluster-info.md create mode 100644 k8s-cluster-ips.txt create mode 100755 scripts/install-k3s-cluster.sh create mode 100755 scripts/setup-load-balancers.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e7da9f2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,42 @@ +# Credentials (NEVER commit) +CLUSTER-CREDENTIALS.md +*.pem +*.key +id_rsa* +*.env +.env.local + +# Cluster config +.kube/ + +# OS +.DS_Store +Thumbs.db + +# IDEs +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# Logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Temp +tmp/ +temp/ +*.tmp + +# Backend (already has own .gitignore) +backend/node_modules/ +backend/dist/ +backend/bun.lock + +# Frontend (when created) +frontend/node_modules/ +frontend/dist/ +frontend/build/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..709d627 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "backend"] + path = backend + url = https://git.fuq.tv/admin/aiworker-backend.git diff --git a/AGENT-GUIDE.md b/AGENT-GUIDE.md new file mode 100644 index 0000000..680d790 --- /dev/null +++ b/AGENT-GUIDE.md @@ -0,0 +1,855 @@ +# Guía para Agentes IA - Gestión del Cluster Kubernetes + +Este documento contiene toda la información necesaria para que agentes IA puedan gestionar y operar el cluster de Kubernetes de AiWorker. + +--- + +## 🔑 Acceso al Cluster + +### Kubeconfig +```bash +export KUBECONFIG=~/.kube/aiworker-config +``` + +Todos los comandos kubectl deben usar: +```bash +kubectl --kubeconfig ~/.kube/aiworker-config +``` + +O con el alias: +```bash +alias k='kubectl --kubeconfig ~/.kube/aiworker-config' +``` + +--- + +## 📋 Comandos Esenciales + +### Verificación del Cluster +```bash +# Estado de nodos +kubectl get nodes -o wide + +# Todos los pods +kubectl get pods -A + +# Pods por namespace +kubectl get pods -n + +# Recursos del cluster +kubectl top nodes +kubectl top pods -A + +# Eventos recientes +kubectl get events -A --sort-by='.lastTimestamp' | tail -20 +``` + +### Gestión de Deployments +```bash +# Ver deployments +kubectl get deployments -A + +# Detalles de un deployment +kubectl describe deployment -n + +# Escalar deployment +kubectl scale deployment -n --replicas=3 + +# Restart deployment +kubectl rollout restart deployment -n + +# Ver historial +kubectl rollout history deployment -n + +# Rollback +kubectl rollout undo deployment -n +``` + +### Gestión de Pods +```bash +# Ver logs +kubectl logs -f -n + +# Logs de contenedor específico +kubectl logs -f -c -n + +# Ejecutar comando en pod +kubectl exec -n -- + +# Shell interactivo +kubectl exec -it -n -- /bin/bash + +# Copiar archivos +kubectl cp /:/path/to/file ./local-file +kubectl cp ./local-file /:/path/to/file +``` + +### Gestión de Services +```bash +# Ver servicios +kubectl get svc -A + +# Port-forward para testing +kubectl port-forward -n svc/ 8080:80 + +# Endpoints de un servicio +kubectl get endpoints -n +``` + +### Ingress y TLS +```bash +# Ver ingress +kubectl get ingress -A + +# Ver certificados +kubectl get certificate -A + +# Detalles de certificado +kubectl describe certificate -n + +# Ver CertificateRequests +kubectl get certificaterequest -A +``` + +### Storage y PVCs +```bash +# Ver PVCs +kubectl get pvc -A + +# Ver PVs +kubectl get pv + +# Longhorn volumes +kubectl get volumes.longhorn.io -n longhorn-system + +# Réplicas de storage +kubectl get replicas.longhorn.io -n longhorn-system +``` + +--- + +## 📦 Desplegar Aplicaciones + +### Crear Deployment Básico +```bash +cat < backup.sql +``` + +**Restore:** +```bash +cat backup.sql | kubectl exec -i -n control-plane mariadb-0 -- mariadb -uroot -pAiWorker2026_RootPass! +``` + +--- + +## 🔧 Troubleshooting + +### Pod no arranca +```bash +# Ver eventos +kubectl describe pod -n + +# Ver logs +kubectl logs -n + +# Logs del contenedor anterior (si crasheó) +kubectl logs -n --previous + +# Shell en pod fallido +kubectl debug -it -n --image=busybox +``` + +### Ingress no funciona +```bash +# Verificar Ingress +kubectl get ingress -n +kubectl describe ingress -n + +# Ver logs de Nginx Ingress +kubectl logs -n ingress-nginx deployment/ingress-nginx-controller --tail=100 + +# Verificar certificado +kubectl get certificate -n +kubectl describe certificate -n + +# Si TLS falla, ver CertificateRequest +kubectl get certificaterequest -A +``` + +### Storage/PVC issues +```bash +# Ver PVC +kubectl get pvc -n +kubectl describe pvc -n + +# Ver Longhorn volumes +kubectl get volumes.longhorn.io -n longhorn-system + +# Longhorn UI +https://longhorn.fuq.tv (admin / aiworker2026) + +# Ver réplicas +kubectl get replicas.longhorn.io -n longhorn-system +``` + +### Nodo con problemas +```bash +# Cordon (no asignar nuevos pods) +kubectl cordon + +# Drain (mover pods a otros nodos) +kubectl drain --ignore-daemonsets --delete-emptydir-data + +# Uncordon (volver a habilitar) +kubectl uncordon +``` + +--- + +## 🚀 Workflows Comunes + +### Desplegar nueva aplicación completa + +```bash +# 1. Crear namespace si no existe +kubectl create namespace myapp + +# 2. Crear secret si necesita +kubectl create secret generic myapp-secret -n myapp \ + --from-literal=api-key=xxx + +# 3. Aplicar manifests +kubectl apply -f deployment.yaml +kubectl apply -f service.yaml +kubectl apply -f ingress.yaml + +# 4. Verificar +kubectl get all -n myapp +kubectl get ingress -n myapp +kubectl get certificate -n myapp + +# 5. Ver logs +kubectl logs -f -n myapp deployment/myapp +``` + +### Actualizar imagen de deployment + +```bash +# Opción 1: Imperativa +kubectl set image deployment/ =: -n + +# Opción 2: Patch +kubectl patch deployment -n \ + -p '{"spec":{"template":{"spec":{"containers":[{"name":"","image":":"}]}}}}' + +# Opción 3: Edit +kubectl edit deployment -n +``` + +### Preview Environment (nuevo namespace temporal) + +```bash +# 1. Crear namespace +kubectl create namespace preview-task-123 + +# 2. Label para cleanup automático +kubectl label namespace preview-task-123 environment=preview ttl=168h + +# 3. Deploy app +kubectl apply -f app.yaml -n preview-task-123 + +# 4. Crear ingress +cat < \ + --from-literal=username=admin \ + --from-literal=password=xxx + +# Ver secrets (no muestra valores) +kubectl get secrets -n + +# Ver secret value +kubectl get secret mysecret -n -o jsonpath='{.data.password}' | base64 -d +``` + +### RBAC +```bash +# Ver service accounts +kubectl get sa -A + +# Ver roles +kubectl get roles -A +kubectl get clusterroles + +# Ver bindings +kubectl get rolebindings -A +kubectl get clusterrolebindings +``` + +--- + +## 📊 Monitoring + +### Resource Usage +```bash +# Uso por nodo +kubectl top nodes + +# Uso por pod +kubectl top pods -A + +# Uso en namespace específico +kubectl top pods -n control-plane +``` + +### Health Checks +```bash +# Componentes del sistema +kubectl get componentstatuses + +# API server health +kubectl get --raw='/readyz?verbose' + +# etcd health (desde control plane) +ssh root@108.165.47.233 "k3s kubectl get endpoints -n kube-system kube-apiserver" +``` + +--- + +## 🔄 GitOps con ArgoCD + +### Acceso +- **URL**: https://argocd.fuq.tv +- **User**: admin +- **Pass**: LyPF4Hy0wvp52IoU + +### Crear Application +```bash +cat < =: -n + +# Verificar rollout +kubectl rollout status deployment/ -n + +# Si falla, rollback +kubectl rollout undo deployment/ -n +``` + +### 2. Crear preview environment + +```bash +# Namespace +kubectl create namespace preview- + +# Deploy +kubectl apply -f manifests/ -n preview- + +# Ingress +kubectl apply -f - < + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" +spec: + ingressClassName: nginx + tls: + - hosts: + - .r.fuq.tv + secretName: preview-tls + rules: + - host: .r.fuq.tv + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: app + port: + number: 80 +EOF + +# Verificar URL +curl https://.r.fuq.tv +``` + +### 3. Escalar aplicación + +```bash +# Auto-scaling +kubectl autoscale deployment -n --cpu-percent=80 --min=2 --max=10 + +# Manual +kubectl scale deployment -n --replicas=5 +``` + +### 4. Investigar problema + +```bash +# 1. Ver estado general +kubectl get pods -n + +# 2. Describir pod con problema +kubectl describe pod -n + +# 3. Ver logs +kubectl logs -n --tail=100 + +# 4. Ver eventos +kubectl get events -n --sort-by='.lastTimestamp' + +# 5. Si es storage +kubectl get pvc -n +kubectl describe pvc -n + +# 6. Si es networking +kubectl get svc,endpoints -n +kubectl get ingress -n +``` + +### 5. Backup de configuración + +```bash +# Exportar todos los recursos +kubectl get all,ingress,certificate,pvc -n -o yaml > backup.yaml + +# Backup específico +kubectl get deployment -n -o yaml > deployment-backup.yaml +``` + +--- + +## 🏗️ Estructura de Manifests + +### Template Completo +```yaml +--- +# Namespace +apiVersion: v1 +kind: Namespace +metadata: + name: myapp + +--- +# ConfigMap +apiVersion: v1 +kind: ConfigMap +metadata: + name: myapp-config + namespace: myapp +data: + NODE_ENV: "production" + LOG_LEVEL: "info" + +--- +# Secret +apiVersion: v1 +kind: Secret +metadata: + name: myapp-secret + namespace: myapp +type: Opaque +stringData: + api-key: "your-api-key" + +--- +# PVC (si necesita storage) +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: myapp-data + namespace: myapp +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn + resources: + requests: + storage: 10Gi + +--- +# Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp + namespace: myapp +spec: + replicas: 2 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: app + image: myapp:latest + ports: + - containerPort: 3000 + env: + - name: NODE_ENV + valueFrom: + configMapKeyRef: + name: myapp-config + key: NODE_ENV + - name: API_KEY + valueFrom: + secretKeyRef: + name: myapp-secret + key: api-key + volumeMounts: + - name: data + mountPath: /data + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 1 + memory: 2Gi + livenessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /ready + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 5 + volumes: + - name: data + persistentVolumeClaim: + claimName: myapp-data + +--- +# Service +apiVersion: v1 +kind: Service +metadata: + name: myapp + namespace: myapp +spec: + selector: + app: myapp + ports: + - port: 80 + targetPort: 3000 + type: ClusterIP + +--- +# Ingress +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: myapp + namespace: myapp + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" +spec: + ingressClassName: nginx + tls: + - hosts: + - myapp.fuq.tv + secretName: myapp-tls + rules: + - host: myapp.fuq.tv + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: myapp + port: + number: 80 +``` + +--- + +## 📚 Recursos de Referencia + +### Documentación del Proyecto +- `CLUSTER-CREDENTIALS.md` - Credenciales y tokens +- `CLUSTER-READY.md` - Estado del cluster +- `docs/` - Documentación completa del proyecto + +### Comandos Útiles + +```bash +# Ver todo en un namespace +kubectl get all -n + +# Aplicar un directorio completo +kubectl apply -f ./k8s/ -R + +# Diff antes de aplicar +kubectl diff -f manifest.yaml + +# Validar YAML +kubectl apply --dry-run=client -f manifest.yaml + +# Formatear output +kubectl get pods -o wide +kubectl get pods -o json +kubectl get pods -o yaml +``` + +--- + +## ⚡ Quick Reference + +### Namespaces del Proyecto +- `control-plane` - Backend, API, MySQL, Redis +- `agents` - Claude Code agents +- `gitea` - Git server +- `monitoring` - Metrics, logs +- `argocd` - GitOps + +### StorageClass +- `longhorn` (default) - HA storage con 3 réplicas + +### ClusterIssuers +- `letsencrypt-prod` - Certificados producción +- `letsencrypt-staging` - Certificados testing + +### IngressClass +- `nginx` - Usar para todos los Ingress + +--- + +**Con esta guía, cualquier agente IA puede operar el cluster de forma autónoma.** diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..deadc76 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,273 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +--- + +## Project Overview + +**AiWorker** is an AI agent orchestration platform that uses Claude Code agents running in Kubernetes pods to autonomously complete development tasks. The system manages a full workflow from task creation to production deployment. + +**Core Flow**: Task → Agent (via MCP) → Code → PR → Preview Deploy → Approval → Staging → Production + +**Current Status**: Infrastructure complete (K8s HA cluster), backend initialized (20% done), frontend and agents pending. + +--- + +## Architecture + +### Three-Tier System +1. **Infrastructure Layer**: K3s HA cluster (8 VPS servers in Houston) + - 3 control planes with etcd HA + - 3 workers with Longhorn distributed storage (3 replicas) + - 2 HAProxy load balancers for HTTP/HTTPS + - Private network (10.100.0.0/24) for inter-node communication + +2. **Platform Layer**: MariaDB, Redis, Gitea, ArgoCD + - MariaDB 11.4 LTS with HA storage (database: `aiworker`) + - Gitea 1.25.3 with built-in container registry + - Gitea Actions for CI/CD (runner in K8s) + - TLS automatic via Cert-Manager + Let's Encrypt + +3. **Application Layer**: Backend (Bun), Frontend (React), Agents (Claude Code pods) + - Backend uses **Bun.serve()** native API (NOT Express despite dependency) + - Drizzle ORM with auto-migrations on startup + - MCP protocol for agent communication + +### Data Model (Drizzle schema in `backend/src/db/schema.ts`) +- **projects**: User projects linked to Gitea repos and K8s namespaces +- **agents**: Claude Code pods running in K8s (status: idle/busy/error/offline) +- **tasks**: Development tasks with state machine (backlog → in_progress → needs_input → ready_to_test → approved → staging → production) + +Relations: projects → many tasks, tasks → one agent, agents → one current task + +--- + +## Development Commands + +### Backend (Bun 1.3.6) +```bash +cd backend + +# Development with hot-reload +bun run dev + +# Start production +bun run start + +# Database migrations +bun run db:generate # Generate new migration from schema changes +bun run db:migrate # Apply migrations (also runs on app startup) +bun run db:studio # Visual database explorer + +# Code quality +bun run lint +bun run format +``` + +**IMPORTANT**: Use Bun native APIs: +- `Bun.serve()` for HTTP server (NOT Express) +- `Bun.sql()` or `mysql2` for MariaDB (decision pending) +- Native WebSocket support in `Bun.serve()` +- `.env` is auto-loaded by Bun + +### Kubernetes Operations +```bash +# Set kubeconfig (ALWAYS required) +export KUBECONFIG=~/.kube/aiworker-config + +# Cluster status +kubectl get nodes +kubectl get pods -A + +# Deploy to K8s +kubectl apply -f k8s/backend/ +kubectl apply -f k8s/frontend/ + +# Logs +kubectl logs -f -n control-plane deployment/backend +kubectl logs -n gitea gitea-0 +kubectl logs -n gitea-actions deployment/gitea-runner -c runner +``` + +### CI/CD Workflow +Push to main branch triggers automatic build: +1. Git push → Gitea receives webhook +2. Gitea Actions Runner (in K8s) picks up job +3. Docker build inside runner pod (DinD) +4. Push to `git.fuq.tv/admin/:latest` +5. View progress: https://git.fuq.tv/admin/aiworker-backend/actions + +**Registry format**: `git.fuq.tv//:` + +--- + +## Critical Architecture Details + +### Database Migrations +**Migrations run automatically on app startup** in `src/index.ts`: +```typescript +await runMigrations() // First thing on startup +await testConnection() +``` + +**Never** manually port-forward to run migrations. The app handles this in production when pods start. + +### Bun.serve() Routing Pattern +Unlike Express, Bun.serve() uses a single `fetch(req)` function: +```typescript +Bun.serve({ + async fetch(req) { + const url = new URL(req.url) + + if (url.pathname === '/api/health') { + return Response.json({ status: 'ok' }) + } + + if (url.pathname.startsWith('/api/projects')) { + return handleProjectRoutes(req, url) + } + + return new Response('Not Found', { status: 404 }) + } +}) +``` + +Route handlers should be organized in `src/api/routes/` and imported into main fetch. + +### MCP Communication Flow +Agents communicate via Model Context Protocol: +1. Agent calls MCP tool (e.g., `get_next_task`) +2. Backend MCP server (port 3100) handles request +3. Backend queries database, performs actions +4. Returns result to agent +5. Agent continues work autonomously + +MCP tools to implement (see `docs/05-agents/mcp-tools.md`): +- `get_next_task`, `update_task_status`, `ask_user_question`, `create_branch`, `create_pull_request`, `trigger_preview_deploy` + +### Preview Environments +Each task gets isolated namespace: `preview-task-{taskId}` +- Auto-deploy on PR creation +- Accessible at `task-{shortId}.r.fuq.tv` +- Auto-cleanup after 7 days (TTL label) + +--- + +## Key Environment Variables + +**Backend** (`.env` file): +```bash +# Database (MariaDB in K8s) +DB_HOST=mariadb.control-plane.svc.cluster.local +DB_USER=aiworker +DB_PASSWORD=AiWorker2026_UserPass! +DB_NAME=aiworker + +# Redis +REDIS_HOST=redis.control-plane.svc.cluster.local + +# Gitea +GITEA_URL=https://git.fuq.tv +GITEA_TOKEN=159a5de2a16d15f33e388b55b1276e431dbca3f3 + +# Kubernetes +K8S_IN_CLUSTER=false # true when running in K8s +K8S_CONFIG_PATH=~/.kube/aiworker-config +``` + +**Local development**: Port-forward services from K8s +```bash +kubectl port-forward -n control-plane svc/mariadb 3306:3306 & +kubectl port-forward -n control-plane svc/redis 6379:6379 & +``` + +--- + +## Important Constraints + +### Storage HA Strategy +All stateful data uses Longhorn with **3 replicas** for high availability: +- MariaDB PVC: 20Gi replicated across 3 workers +- Gitea PVC: 50Gi replicated across 3 workers +- Can tolerate 2 worker node failures without data loss + +### DNS and Domains +All services use `*.fuq.tv` with DNS round-robin pointing to 2 load balancers: +- `api.fuq.tv` → Backend API +- `app.fuq.tv` → Frontend dashboard +- `git.fuq.tv` → Gitea +- `*.r.fuq.tv` → Preview environments (e.g., `task-abc.r.fuq.tv`) + +Load balancers (108.165.47.221, 108.165.47.203) run HAProxy balancing to worker NodePorts. + +### Namespace Organization +- `control-plane`: Backend API, MariaDB, Redis +- `agents`: Claude Code agent pods +- `gitea`: Git server +- `gitea-actions`: CI/CD runner with Docker-in-Docker +- `preview-*`: Temporary namespaces for preview deployments + +--- + +## Documentation Structure + +Extensive documentation in `/docs` (40+ files): +- **Start here**: `ROADMAP.md`, `NEXT-SESSION.md`, `QUICK-REFERENCE.md` +- **Infrastructure**: `CLUSTER-READY.md`, `AGENT-GUIDE.md`, `TROUBLESHOOTING.md` +- **Gitea**: `GITEA-GUIDE.md` - Complete guide for Git, Registry, API, CI/CD, and webhooks +- **Detailed**: `docs/01-arquitectura/` through `docs/06-deployment/` + +**For agent AI operations**: Read `AGENT-GUIDE.md` - contains all kubectl commands and workflows needed to manage the cluster autonomously. + +**For Gitea operations**: Read `GITEA-GUIDE.md` - complete API usage, registry, tokens, webhooks, and CI/CD setup. + +**For credentials**: See `CLUSTER-CREDENTIALS.md` (not in git, local only) + +--- + +## Next Development Steps + +Current phase: **Backend API implementation** (see `NEXT-SESSION.md` for detailed checklist) + +Priority order: +1. Verify CI/CD build successful → image in registry +2. Implement REST API routes (`/api/projects`, `/api/tasks`, `/api/agents`) +3. Implement MCP Server (port 3100) for agent communication +4. Integrate Gitea API client (repos, PRs, webhooks) +5. Integrate Kubernetes client (create namespaces, deployments, ingress) +6. Deploy backend to K8s at `api.fuq.tv` + +Frontend and agents come after backend is functional. + +--- + +## External References + +- **Lucia Auth** (for React frontend): https://github.com/lucia-auth/lucia +- **Vercel Agent Skills** (for React frontend): https://github.com/vercel-labs/agent-skills +- **Gitea API**: https://git.fuq.tv/api/swagger +- **MCP SDK**: `@modelcontextprotocol/sdk` documentation + +--- + +## Deployment Flow + +### Backend Deployment +``` +Code change → Git push → Gitea Actions → Docker build → Push to git.fuq.tv → ArgoCD sync → K8s deploy +``` + +### Agent Deployment +``` +Backend creates pod → Agent starts → Registers via MCP → Polls for tasks → Works autonomously → Reports back +``` + +### Preview Deployment +``` +Agent completes task → Create PR → Trigger preview → K8s namespace created → Deploy at task-{id}.r.fuq.tv → User tests +``` + +--- + +**Read `NEXT-SESSION.md` for detailed next steps. All credentials and cluster access info in `QUICK-REFERENCE.md`.** diff --git a/CLUSTER-READY.md b/CLUSTER-READY.md new file mode 100644 index 0000000..ec43b19 --- /dev/null +++ b/CLUSTER-READY.md @@ -0,0 +1,311 @@ +# 🚀 AiWorker Kubernetes Cluster - PRODUCTION READY + +**Status**: ✅ Completamente Funcional +**Fecha**: 2026-01-19 +**Ubicación**: Houston, Texas (us-hou-1) + +--- + +## 🎯 Infraestructura Desplegada + +### Servidores (8 VPS) + +| Tipo | Hostname | IP Pública | IP Privada | Specs | Estado | +|----------------|----------------|-----------------|-------------|----------------------|--------| +| Control Plane | k8s-cp-01 | 108.165.47.233 | 10.100.0.2 | 4 vCPU, 8 GB RAM | ✅ | +| Control Plane | k8s-cp-02 | 108.165.47.235 | 10.100.0.3 | 4 vCPU, 8 GB RAM | ✅ | +| Control Plane | k8s-cp-03 | 108.165.47.215 | 10.100.0.4 | 4 vCPU, 8 GB RAM | ✅ | +| Worker | k8s-worker-01 | 108.165.47.225 | 10.100.0.5 | 8 vCPU, 16 GB RAM | ✅ | +| Worker | k8s-worker-02 | 108.165.47.224 | 10.100.0.6 | 8 vCPU, 16 GB RAM | ✅ | +| Worker | k8s-worker-03 | 108.165.47.222 | 10.100.0.7 | 8 vCPU, 16 GB RAM | ✅ | +| Load Balancer | k8s-lb-01 | 108.165.47.221 | 10.100.0.8 | 2 vCPU, 4 GB RAM | ✅ | +| Load Balancer | k8s-lb-02 | 108.165.47.203 | 10.100.0.9 | 2 vCPU, 4 GB RAM | ✅ | + +**Total**: 48 vCPU, 104 GB RAM, ~2.9 TB Storage +**Costo**: $148/mes + +--- + +## 🌐 URLs de Acceso + +| Servicio | URL | Credenciales | Estado | +|-------------|----------------------------|----------------------------|--------| +| Gitea | https://git.fuq.tv | (setup inicial pendiente) | ✅ | +| ArgoCD | https://argocd.fuq.tv | admin / LyPF4Hy0wvp52IoU | ✅ | +| Longhorn UI | https://longhorn.fuq.tv | admin / aiworker2026 | ✅ | +| HAProxy LB1 | http://108.165.47.221:8404/stats | admin / aiworker2026 | ✅ | +| HAProxy LB2 | http://108.165.47.203:8404/stats | admin / aiworker2026 | ✅ | +| Test App | https://test.fuq.tv | (público) | ✅ | + +--- + +## 💾 Bases de Datos + +### MariaDB 11.4.9 LTS + +**Conexión interna (desde pods)**: +``` +Host: mariadb.control-plane.svc.cluster.local +Port: 3306 +``` + +**Credenciales Root:** +``` +Usuario: root +Password: AiWorker2026_RootPass! +``` + +**Credenciales Aplicación:** +``` +Database: aiworker +Usuario: aiworker +Password: AiWorker2026_UserPass! +``` + +**Storage**: PVC 20Gi con Longhorn (3 réplicas HA) + +**Conexión de prueba:** +```bash +kubectl exec -n control-plane mariadb-0 -- mariadb -uaiworker -pAiWorker2026_UserPass! aiworker -e "SHOW TABLES;" +``` + +### Gitea Database + +**Base de datos**: `gitea` (creada en MariaDB) +**Conexión**: Configurada automáticamente en Gitea + +--- + +## 🗂️ Storage HA con Longhorn + +### Configuración +- **StorageClass**: `longhorn` (default) +- **Replicación**: 3 réplicas por volumen +- **Tolerancia a fallos**: Puede perder 2 nodos sin pérdida de datos +- **UI**: https://longhorn.fuq.tv + +### Volúmenes Actuales + +| PVC | Namespace | Tamaño | Réplicas | Nodos | +|--------------|----------------|--------|----------|--------------------------------------| +| mariadb-pvc | control-plane | 20Gi | 3 | worker-01, worker-02, worker-03 | +| gitea-data | gitea | 50Gi | 3 | worker-01, worker-02, worker-03 | + +--- + +## 🔧 Software Instalado + +| Componente | Versión | Namespace | Estado | +|-------------------------|--------------|----------------|--------| +| K3s | v1.35.0+k3s1 | - | ✅ | +| Nginx Ingress | latest | ingress-nginx | ✅ | +| Cert-Manager | v1.16.2 | cert-manager | ✅ | +| Longhorn | v1.8.0 | longhorn-system| ✅ | +| ArgoCD | stable | argocd | ✅ | +| MariaDB | 11.4.9 | control-plane | ✅ | +| Gitea | 1.22 | gitea | ✅ | +| HAProxy | 2.8.16 | (en LBs) | ✅ | + +--- + +## 🔐 Kubeconfig + +**Path local**: `~/.kube/aiworker-config` + +**Configurar como default:** +```bash +export KUBECONFIG=~/.kube/aiworker-config +``` + +**Crear alias:** +```bash +alias k='kubectl --kubeconfig ~/.kube/aiworker-config' +``` + +**Uso:** +```bash +kubectl --kubeconfig ~/.kube/aiworker-config get nodes +kubectl --kubeconfig ~/.kube/aiworker-config get pods -A +``` + +--- + +## 📋 Namespaces + +| Namespace | Propósito | Resource Quota | +|-----------------|-------------------------------|---------------------| +| control-plane | Backend, API, MySQL, Redis | 8 CPU, 16 GB | +| agents | Claude Code agents | 20 CPU, 40 GB | +| gitea | Git server | 2 CPU, 4 GB | +| monitoring | Prometheus, Grafana (futuro) | - | +| argocd | GitOps | - | +| ingress-nginx | Ingress controller | - | +| cert-manager | TLS management | - | +| longhorn-system | Distributed storage | - | + +--- + +## 🔒 Seguridad + +### TLS/SSL +- ✅ Certificados automáticos con Let's Encrypt +- ✅ Force HTTPS redirect +- ✅ Email notificaciones: hector+aiworker@teamsuqad.io + +### Secrets Creados +```bash +# MariaDB +kubectl get secret mariadb-secret -n control-plane + +# Longhorn UI +kubectl get secret longhorn-basic-auth -n longhorn-system + +# ArgoCD +kubectl get secret argocd-initial-admin-secret -n argocd +``` + +--- + +## 🧪 Verificación Funcional + +### Cluster Health +```bash +kubectl get nodes +kubectl get pods -A +kubectl top nodes +kubectl get pvc -A +``` + +### Storage Replication +```bash +# Ver volúmenes +kubectl get volumes.longhorn.io -n longhorn-system + +# Ver réplicas +kubectl get replicas.longhorn.io -n longhorn-system + +# UI Web +https://longhorn.fuq.tv +``` + +### Ingress & TLS +```bash +# Ver ingress +kubectl get ingress -A + +# Ver certificados +kubectl get certificate -A + +# Probar acceso +curl https://test.fuq.tv +curl https://git.fuq.tv +curl https://argocd.fuq.tv +``` + +--- + +## 📦 Próximos Pasos + +### 1. Configurar Gitea (https://git.fuq.tv) +- Completar instalación inicial +- Crear organización "aiworker" +- Crear usuario bot con token +- Configurar webhooks + +### 2. Desplegar Backend +```bash +kubectl apply -f k8s/backend/ +``` + +### 3. Desplegar Frontend +```bash +kubectl apply -f k8s/frontend/ +``` + +### 4. Configurar ArgoCD +- Login en https://argocd.fuq.tv +- Conectar repositorio Gitea +- Crear Applications +- Configurar auto-sync + +--- + +## 🎨 Arquitectura Final + +``` + Internet + ↓ + [DNS: *.fuq.tv] + (108.165.47.221 + .203) + ↓ + ┌─────────────┴─────────────┐ + ↓ ↓ + [HAProxy LB-01] [HAProxy LB-02] + :80, :443 :80, :443 + ↓ ↓ + └─────────────┬─────────────┘ + ↓ + [Private Network] + 10.100.0.0/24 + ↓ + ┌───────────────────┼───────────────────┐ + ↓ ↓ ↓ + [CP etcd HA] [CP etcd HA] [CP etcd HA] + 10.100.0.2 10.100.0.3 10.100.0.4 + ↓ ↓ ↓ + ─────┴───────────────────┴───────────────────┴───── + ↓ ↓ ↓ + [Worker + Storage] [Worker + Storage] [Worker + Storage] + 10.100.0.5 10.100.0.6 10.100.0.7 + ↓ ↓ ↓ + [Pods] [Pods] [Pods] + │ │ │ + [MariaDB PVC]────────[Longhorn 3x Replica]────────[Gitea PVC] +``` + +--- + +## 🎓 Lo que aprendimos + +1. ✅ Desplegar K3s HA con embedded etcd (3 control planes) +2. ✅ Configurar red privada para comunicación interna +3. ✅ Setup HAProxy para load balancing HTTP/HTTPS +4. ✅ DNS round-robin para HA de load balancers +5. ✅ Nginx Ingress Controller con NodePort +6. ✅ Cert-Manager con Let's Encrypt automático +7. ✅ Longhorn distributed storage con replicación +8. ✅ MariaDB 11.4 LTS con storage HA +9. ✅ Gitea con storage HA y MariaDB +10. ✅ ArgoCD para GitOps + +--- + +## 💪 Características HA Implementadas + +| Componente | HA Implementado | Tolerancia a Fallos | +|-------------------|-----------------|---------------------| +| Control Plane | ✅ 3 nodos etcd | Pierde 1 nodo | +| Workers | ✅ 3 nodos | Pierde 2 nodos | +| Load Balancers | ✅ DNS RR | Pierde 1 LB | +| Storage (Longhorn)| ✅ 3 réplicas | Pierde 2 workers | +| Ingress | ✅ En workers | Redundante | +| DNS | ✅ 2 IPs | Auto failover | + +**Cluster puede perder simultáneamente:** +- 1 Control Plane +- 2 Workers +- 1 Load Balancer +- Y seguir funcionando! 🎉 + +--- + +## 📞 Soporte + +- **CubePath**: https://cubepath.com/support +- **K3s**: https://docs.k3s.io +- **Longhorn**: https://longhorn.io/docs/ +- **Cert-Manager**: https://cert-manager.io/docs/ + +--- + +**🎉 ¡Cluster listo para desplegar AiWorker!** diff --git a/CLUSTER-SETUP-COMPLETE.md b/CLUSTER-SETUP-COMPLETE.md new file mode 100644 index 0000000..6813527 --- /dev/null +++ b/CLUSTER-SETUP-COMPLETE.md @@ -0,0 +1,241 @@ +# ✅ AiWorker Kubernetes Cluster - Setup Completo + +**Fecha**: 2026-01-19 +**Estado**: ✅ Producción Ready + +## 🎯 Resumen del Cluster + +### Infraestructura Desplegada + +| Componente | Cantidad | Plan | Specs | IP Pública | IP Privada | +|------------------|----------|------------|--------------------------|------------------|-------------| +| Control Planes | 3 | gp.starter | 4 vCPU, 8 GB RAM | 108.165.47.x | 10.100.0.2-4| +| Workers | 3 | gp.small | 8 vCPU, 16 GB RAM | 108.165.47.x | 10.100.0.5-7| +| Load Balancers | 2 | gp.micro | 2 vCPU, 4 GB RAM | 221, 203 | 10.100.0.8-9| +| **Total** | **8** | | **48 vCPU, 104 GB RAM** | | | + +### Software Stack + +| Componente | Versión | Estado | Propósito | +|-------------------------|--------------|--------|-------------------------------------| +| K3s | v1.35.0+k3s1 | ✅ | Kubernetes distribution | +| Nginx Ingress | latest | ✅ | HTTP/HTTPS routing | +| Cert-Manager | v1.16.2 | ✅ | TLS certificates automation | +| ArgoCD | stable | ✅ | GitOps continuous delivery | +| HAProxy | 2.8.16 | ✅ | Load balancing (on LB nodes) | +| Metrics Server | included | ✅ | Resource metrics | +| CoreDNS | included | ✅ | Cluster DNS | +| Local Path Provisioner | included | ✅ | Dynamic storage | + +## 🌐 Arquitectura de Red + +``` + Internet + ↓ + [DNS: *.fuq.tv] + ↓ + ┌─────────────┴─────────────┐ + ↓ ↓ + [LB-01: .221] [LB-02: .203] + HAProxy HA HAProxy HA + ↓ ↓ + └─────────────┬─────────────┘ + ↓ + [Private Network 10.100.0.0/24] + ↓ + ┌───────────────────┼───────────────────┐ + ↓ ↓ ↓ + [CP-01: .2] [CP-02: .3] [CP-03: .4] + K3s + etcd K3s + etcd K3s + etcd + ↓ ↓ ↓ + ─────┴───────────────────┴───────────────────┴───── + ↓ ↓ ↓ + [Worker-01: .5] [Worker-02: .6] [Worker-03: .7] + Nginx Ingress Nginx Ingress Nginx Ingress + ↓ ↓ ↓ + [Pods] [Pods] [Pods] +``` + +## 🔐 Accesos + +### Kubernetes +```bash +# Kubeconfig +export KUBECONFIG=~/.kube/aiworker-config + +# Comandos +kubectl get nodes +kubectl get pods -A +kubectl get ingress -A +``` + +### ArgoCD +- **URL**: https://argocd.fuq.tv +- **Usuario**: admin +- **Password**: `LyPF4Hy0wvp52IoU` + +### HAProxy Stats +- **LB-01**: http://108.165.47.221:8404/stats +- **LB-02**: http://108.165.47.203:8404/stats +- **Credentials**: admin / aiworker2026 + +## 📋 DNS Configuración + +**Configurado en fuq.tv:** +``` +*.fuq.tv A 108.165.47.221 +*.fuq.tv A 108.165.47.203 +*.r.fuq.tv A 108.165.47.221 +*.r.fuq.tv A 108.165.47.203 +``` + +**Subdominios disponibles:** +- `app.fuq.tv` - Dashboard frontend +- `api.fuq.tv` - Backend API +- `git.fuq.tv` - Gitea server +- `argocd.fuq.tv` - ArgoCD UI +- `*.r.fuq.tv` - Preview environments (task-123.r.fuq.tv) + +## 🧪 Verificación + +### Test Application +```bash +# HTTP (redirect a HTTPS) +curl http://test.fuq.tv + +# HTTPS con TLS +curl https://test.fuq.tv + +# Verificar certificado +curl -v https://test.fuq.tv 2>&1 | grep "issuer" +``` + +### Cluster Health +```bash +# Nodes +kubectl get nodes -o wide + +# System pods +kubectl get pods -A + +# Certificates +kubectl get certificate -A + +# Ingresses +kubectl get ingress -A +``` + +## 📁 Namespaces Creados + +| Namespace | Propósito | Resource Quota | +|-----------------|----------------------------------------|-----------------------| +| control-plane | Backend, API, MySQL, Redis | 8 CPU, 16 GB RAM | +| agents | Claude Code agent pods | 20 CPU, 40 GB RAM | +| gitea | Git server | 2 CPU, 4 GB RAM | +| monitoring | Prometheus, Grafana (futuro) | - | +| argocd | GitOps controller | - | +| ingress-nginx | Ingress controller | - | +| cert-manager | TLS management | - | + +## 💰 Costos Mensuales + +``` +Control Planes: 3 × $15 = $45 +Workers: 3 × $29 = $87 +Load Balancers: 2 × $8 = $16 +───────────────────────────── +Total: $148/mes +``` + +## 🔄 Alta Disponibilidad + +✅ **Control Plane**: 3 nodos con etcd distribuido - tolera 1 fallo +✅ **Workers**: 3 nodos - workload distribuido +✅ **Load Balancers**: 2 nodos con DNS round-robin - tolera 1 fallo +✅ **Ingress**: Corriendo en todos los workers - redundante +✅ **Storage**: Local path provisioner en cada nodo + +## 🚀 Próximos Pasos + +1. **Desplegar Gitea** + ```bash + kubectl apply -f k8s/gitea/ + ``` + +2. **Desplegar Backend** + ```bash + kubectl apply -f k8s/backend/ + ``` + +3. **Desplegar Frontend** + ```bash + kubectl apply -f k8s/frontend/ + ``` + +4. **Configurar ArgoCD** + - Conectar repositorio Git + - Crear Applications + - Configurar auto-sync + +## 📝 Archivos Importantes + +- `CLUSTER-CREDENTIALS.md` - Credenciales y accesos (⚠️ NO COMMITEAR) +- `k8s-cluster-info.md` - Info técnica del cluster +- `scripts/install-k3s-cluster.sh` - Script instalación completa +- `scripts/setup-load-balancers.sh` - Script configuración LBs +- `docs/` - Documentación completa del proyecto + +## 🔧 Mantenimiento + +### Backup etcd +```bash +ssh root@108.165.47.233 "k3s etcd-snapshot save" +``` + +### Actualizar K3s +```bash +# En cada nodo (empezar por workers, luego control planes) +ssh root@ "curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.X.X+k3s1 sh -" +``` + +### Monitoreo +```bash +# Resource usage +kubectl top nodes +kubectl top pods -A + +# Logs +kubectl logs -f -n + +# Events +kubectl get events -A --sort-by='.lastTimestamp' +``` + +## 🎉 Estado Final + +**Cluster Status**: ✅ Production Ready + +**Capacidad Total**: +- 48 vCPUs +- 104 GB RAM +- ~2.5 TB Storage +- HA en todos los componentes críticos + +**Probado**: +- ✅ Cluster HA funcional +- ✅ Nginx Ingress routing +- ✅ TLS automático con Let's Encrypt +- ✅ DNS resolution +- ✅ Load balancing +- ✅ Private network communication + +**Listo para**: +- ✅ Desplegar aplicaciones +- ✅ GitOps con ArgoCD +- ✅ Auto-scaling de pods +- ✅ Certificados TLS automáticos +- ✅ Preview environments + +--- + +**¡Cluster AiWorker listo para producción! 🚀** diff --git a/DEVELOPMENT-WORKFLOW.md b/DEVELOPMENT-WORKFLOW.md new file mode 100644 index 0000000..3de68a5 --- /dev/null +++ b/DEVELOPMENT-WORKFLOW.md @@ -0,0 +1,427 @@ +# 🔄 Development Workflow - Cómo Trabajamos + +Flujo completo de desarrollo usando Gitea, container registry y CI/CD automático. + +--- + +## 📋 Overview del Workflow + +``` +Local Dev → Git Push → Gitea Actions → Docker Build → Registry Push → K8s Deploy +``` + +**Principios**: +- ✅ **CI/CD automático** - Cada push buildea automáticamente +- ✅ **Registry integrado** - Imágenes en Gitea, no Docker Hub +- ✅ **GitOps** - ArgoCD sincroniza desde Git +- ✅ **Sin Docker local** - Builds en el cluster +- ✅ **Preview automático** - Cada tarea tiene su environment + +--- + +## 🛠️ Workflow Típico: Nueva Feature + +### 1. Desarrollo Local + +```bash +# Backend +cd backend +bun run dev # Hot reload activado + +# Test localmente +curl http://localhost:3000/api/health +``` + +**Conexión a servicios K8s** (solo para desarrollo): +```bash +# Terminal 1: MariaDB +kubectl port-forward -n control-plane svc/mariadb 3306:3306 + +# Terminal 2: Redis +kubectl port-forward -n control-plane svc/redis 6379:6379 + +# Ahora el backend local puede conectar a DB/Redis en K8s +``` + +### 2. Commit y Push + +```bash +git add . +git commit -m "Add feature X + +Detailed description + +Co-Authored-By: Claude Sonnet 4.5 (1M context) " +git push origin main +``` + +### 3. CI/CD Automático (Gitea Actions) + +**Qué pasa automáticamente**: +1. Gitea recibe push +2. Workflow `.gitea/workflows/build.yml` se ejecuta +3. Runner en K8s (pod con Docker) buildea imagen +4. Push a `git.fuq.tv/admin/:latest` +5. Tag adicional con commit hash: `git.fuq.tv/admin/:` + +**Monitorear**: +```bash +# Ver en UI +open https://git.fuq.tv/admin/aiworker-backend/actions + +# Ver logs del runner +kubectl logs -n gitea-actions deployment/gitea-runner -c runner --tail=100 -f +``` + +**Tiempo típico**: 2-5 minutos (primer build más lento) + +### 4. Verificar Imagen en Registry + +```bash +# Ver en UI +open https://git.fuq.tv/admin/-/packages + +# O via API +curl -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + https://git.fuq.tv/api/v1/packages/admin/container +``` + +**Debe aparecer**: `aiworker-backend` con tags `latest` y `` + +### 5. Deploy en K8s (manual por ahora, ArgoCD después) + +```bash +# Aplicar manifests +kubectl apply -f k8s/backend/ + +# O update imagen específica +kubectl set image deployment/backend backend=git.fuq.tv/admin/aiworker-backend:latest -n control-plane + +# Verificar rollout +kubectl rollout status deployment/backend -n control-plane + +# Ver logs +kubectl logs -f deployment/backend -n control-plane +``` + +### 6. Verificar en Producción + +```bash +# Health check +curl https://api.fuq.tv/api/health + +# Test endpoints +curl https://api.fuq.tv/api/projects +``` + +--- + +## 🌿 Workflow: Feature Branch + +### 1. Crear Branch + +```bash +git checkout -b feature/nueva-funcionalidad +``` + +### 2. Desarrollar y Commit + +```bash +# Hacer cambios +bun run dev # Test local + +git add . +git commit -m "Implement nueva funcionalidad" +git push origin feature/nueva-funcionalidad +``` + +### 3. Crear Pull Request + +**Opción A - UI**: +1. https://git.fuq.tv/admin/aiworker-backend +2. "New Pull Request" +3. Base: main ← Compare: feature/nueva-funcionalidad + +**Opción B - API** (desde backend): +```typescript +const pr = await giteaClient.createPullRequest('admin', 'aiworker-backend', { + title: 'Nueva funcionalidad', + body: 'Descripción detallada', + head: 'feature/nueva-funcionalidad', + base: 'main' +}) +``` + +### 4. Review y Merge + +**Manual** (UI): +- Review code +- Click "Merge Pull Request" + +**Automático** (via backend): +```typescript +await giteaClient.mergePullRequest('admin', 'aiworker-backend', prNumber, 'squash') +``` + +### 5. Deploy Automático post-Merge + +Una vez mergeado a `main`, el CI/CD rebuildeará automáticamente. + +--- + +## 🚀 Workflow: Deploy de Preview + +Para cada tarea, se crea un preview environment aislado. + +### Proceso Completo + +```typescript +// 1. Agente completa tarea +// 2. Crea branch y PR (via MCP) + +// 3. Backend crea preview deployment +const previewNamespace = `preview-task-${taskId.slice(0, 8)}` + +// Crear namespace en K8s +await k8sClient.createNamespace(previewNamespace) + +// Deploy app +await k8sClient.createDeployment({ + namespace: previewNamespace, + name: 'app', + image: `git.fuq.tv/admin/aiworker-backend:${branchName}`, + // ... config +}) + +// Crear ingress +await k8sClient.createIngress({ + namespace: previewNamespace, + host: `task-${taskId.slice(0, 8)}.r.fuq.tv`, + // ... config +}) + +// 4. Usuario accede a: +// https://task-abc12345.r.fuq.tv + +// 5. Si aprueba → merge a staging +// 6. Cleanup automático después de 7 días +``` + +--- + +## 🎨 Workflow: Multi-Repo + +Eventualmente tendremos múltiples repos: + +``` +/admin/aiworker-backend → Backend API +/admin/aiworker-frontend → Dashboard React +/admin/aiworker-agents → Agent Docker image +/admin/aiworker-gitops → ArgoCD manifests +/aiworker/ → Proyectos de usuarios +``` + +**Cada repo**: +- Tiene su propio `.gitea/workflows/build.yml` +- Buildea a `git.fuq.tv//:` +- Deploy independiente + +--- + +## 🔐 Autenticación en Diferentes Contextos + +### 1. Git Clone/Push (HTTPS) + +```bash +# Con token en URL (inseguro pero funciona) +git clone https://admin:159a5de2a16d15f33e388b55b1276e431dbca3f3@git.fuq.tv/admin/myrepo.git + +# O configurar credential helper +git config --global credential.helper store +git clone https://git.fuq.tv/admin/myrepo.git +# Primera vez pedirá user/password, luego lo guarda +``` + +### 2. Docker Registry + +```bash +docker login git.fuq.tv -u admin -p 7401126cfb56ab2aebba17755bdc968c20768c27 +``` + +### 3. Kubernetes Pulls + +**Secret ya creado**: +```bash +# En control-plane y agents +kubectl get secret gitea-registry -n control-plane +kubectl get secret gitea-registry -n agents +``` + +**Usar en deployment**: +```yaml +imagePullSecrets: +- name: gitea-registry +``` + +### 4. API Calls + +**Header**: +```bash +curl -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + https://git.fuq.tv/api/v1/user/repos +``` + +### 5. Webhooks + +**Secret** (para verificar requests): +```bash +# Configurar en webhook +"secret": "webhook-secret-aiworker-2026" + +# Verificar en backend usando HMAC +``` + +--- + +## 📦 Build Strategies + +### Estrategia Actual: Gitea Actions + +**Ventajas**: +- ✅ Sin Docker local necesario +- ✅ Build en cluster (más recursos) +- ✅ Histórico de builds en UI +- ✅ Cacheo de layers + +**Cómo funciona**: +``` +Push → Gitea Actions → Runner Pod (DinD) → Docker build → Push to registry +``` + +**Configuración**: +- Runner: Pod en namespace `gitea-actions` +- Docker-in-Docker (DinD) para builds +- Volumenes compartidos para cache + +### Alternativa Futura: ArgoCD Image Updater + +Cuando esté configurado: +``` +Push → Build → Registry → ArgoCD detecta → Auto-update K8s → Deploy +``` + +--- + +## 🎯 Checklist de Feature Completa + +- [ ] Desarrollo local con `bun run dev` +- [ ] Test manual de endpoints +- [ ] Commit con mensaje descriptivo +- [ ] Push a Gitea +- [ ] Verificar build en Actions (verde ✅) +- [ ] Verificar imagen en registry +- [ ] Deploy en K8s (manual o ArgoCD) +- [ ] Verificar pods running +- [ ] Test en producción (`api.fuq.tv`) +- [ ] Verificar logs sin errores + +--- + +## 🐛 Troubleshooting del Workflow + +### Build Falla en Actions + +```bash +# Ver logs del job +https://git.fuq.tv/admin/aiworker-backend/actions/runs/ + +# Ver runner logs +kubectl logs -n gitea-actions deployment/gitea-runner -c runner --tail=200 + +# Problemas comunes: +# - Dockerfile error → Fix Dockerfile +# - Dependencias faltantes → Update package.json +# - Registry auth → Verificar REGISTRY_TOKEN secret +``` + +**Fix**: Corregir error, commit, push de nuevo. + +### Build OK pero K8s no Pulla Imagen + +```bash +# Verificar secret +kubectl get secret gitea-registry -n control-plane + +# Verificar imagePullSecrets en deployment +kubectl get deployment backend -n control-plane -o yaml | grep imagePullSecrets + +# Ver eventos +kubectl describe pod -n control-plane | grep -i pull +``` + +**Fix**: Recrear secret o agregar `imagePullSecrets` al deployment. + +### Imagen en Registry pero versión vieja en K8s + +```bash +# Force pull nueva imagen +kubectl rollout restart deployment/backend -n control-plane + +# O delete pod para recrear +kubectl delete pod -n control-plane +``` + +**Nota**: Si usas tag `latest`, K8s cachea. Mejor usar tags específicos (`v1.0.0`, `main-abc1234`). + +--- + +## 📊 Monitoring del Workflow + +### CI/CD Health + +```bash +# Runner status +kubectl get pods -n gitea-actions + +# Workflows recientes +open https://git.fuq.tv/admin/aiworker-backend/actions + +# Registry usage +open https://git.fuq.tv/admin/-/packages +``` + +### Deployments + +```bash +# Backend +kubectl get pods -n control-plane +kubectl logs -f deployment/backend -n control-plane + +# Frontend (cuando exista) +kubectl get pods -n control-plane +kubectl logs -f deployment/frontend -n control-plane +``` + +--- + +## 🎓 Best Practices + +1. **Commits frecuentes** - Push pequeños, builds rápidos +2. **Tags semánticos** - `v1.0.0`, `v1.1.0` para releases +3. **Branch strategy** - `main` (prod), `develop` (staging), `feature/*` (features) +4. **PR reviews** - Siempre review antes de merge +5. **Registry cleanup** - Eliminar imágenes viejas periódicamente +6. **Logs** - Siempre verificar logs post-deploy + +--- + +## 🔗 Referencias + +- **Guía Gitea completa**: `GITEA-GUIDE.md` +- **Container Registry**: `docs/CONTAINER-REGISTRY.md` (puedes eliminar después) +- **API Gitea**: `docs/02-backend/gitea-integration.md` (código de ejemplo) +- **CI/CD**: `docs/06-deployment/ci-cd.md` + +--- + +**✅ Con este workflow, el desarrollo es fluido: código local → push → build automático → deploy → verificar.** diff --git a/GITEA-GUIDE.md b/GITEA-GUIDE.md new file mode 100644 index 0000000..893e600 --- /dev/null +++ b/GITEA-GUIDE.md @@ -0,0 +1,830 @@ +# 📚 Gitea - Guía Completa de Uso + +Toda la información de Gitea en un solo lugar: autenticación, API, registry, webhooks, y CI/CD. + +--- + +## 🌐 Acceso Web + +**URL**: https://git.fuq.tv + +**Credenciales Admin**: +- Usuario: `admin` +- Password: `admin123` + +**Primera vez**: Ya configurado, listo para usar. + +--- + +## 🔑 Autenticación y Tokens + +### Tokens Existentes + +| Nombre | Token | Scopes | Uso | +|--------|-------|--------|-----| +| full-access | `159a5de2a16d15f33e388b55b1276e431dbca3f3` | all | API completa | +| docker-registry | `7401126cfb56ab2aebba17755bdc968c20768c27` | write:package, read:package | Container registry | + +### Crear Nuevo Token (CLI) + +```bash +# Desde el pod de Gitea +kubectl exec -n gitea gitea-0 -- su git -c "gitea admin user generate-access-token \ + --username admin \ + --scopes all \ + --token-name my-token \ + --raw" +``` + +### Crear Token (Web UI) + +1. Login en https://git.fuq.tv +2. Perfil → Settings → Applications +3. Generate New Token +4. Seleccionar scopes necesarios +5. Copiar token (solo se muestra una vez) + +**Scopes importantes**: +- `write:repository` - Crear repos, push +- `write:package` - Push imágenes Docker +- `read:package` - Pull imágenes Docker +- `write:issue` - Gestionar issues +- `write:user` - Operaciones de usuario +- `all` - Acceso completo + +--- + +## 🐳 Container Registry + +### Configuración + +**Registry URL**: `git.fuq.tv` +**Formato de imágenes**: `git.fuq.tv//:` + +### Login Docker + +```bash +# Con token de registry +docker login git.fuq.tv -u admin -p 7401126cfb56ab2aebba17755bdc968c20768c27 + +# O de forma segura +echo "7401126cfb56ab2aebba17755bdc968c20768c27" | docker login git.fuq.tv -u admin --password-stdin +``` + +### Build y Push + +```bash +# Build +docker build -t git.fuq.tv/admin/aiworker-backend:v1.0.0 . + +# Push +docker push git.fuq.tv/admin/aiworker-backend:v1.0.0 + +# Tag como latest +docker tag git.fuq.tv/admin/aiworker-backend:v1.0.0 git.fuq.tv/admin/aiworker-backend:latest +docker push git.fuq.tv/admin/aiworker-backend:latest +``` + +### Pull + +```bash +docker pull git.fuq.tv/admin/aiworker-backend:latest +``` + +### Ver Imágenes (UI) + +1. https://git.fuq.tv +2. Perfil → Packages +3. O: https://git.fuq.tv/admin/-/packages + +### Kubernetes Pull Secret + +**Ya creado** en namespaces `control-plane` y `agents`: + +```bash +# Verificar +kubectl get secret gitea-registry -n control-plane + +# Crear en nuevo namespace +kubectl create secret docker-registry gitea-registry \ + --docker-server=git.fuq.tv \ + --docker-username=admin \ + --docker-password=7401126cfb56ab2aebba17755bdc968c20768c27 \ + --docker-email=hector@teamsuqad.io \ + -n +``` + +**Uso en deployment**: +```yaml +spec: + template: + spec: + imagePullSecrets: + - name: gitea-registry + containers: + - name: app + image: git.fuq.tv/admin/myapp:latest +``` + +--- + +## 🔌 API de Gitea + +**Base URL**: `https://git.fuq.tv/api/v1` +**Documentación**: https://git.fuq.tv/api/swagger + +### Autenticación API + +**Header**: +``` +Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3 +``` + +### Ejemplos de Uso + +#### Crear Repositorio + +```bash +curl -X POST "https://git.fuq.tv/api/v1/user/repos" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "my-new-repo", + "description": "My description", + "private": false, + "auto_init": true + }' +``` + +#### Listar Repositorios + +```bash +curl "https://git.fuq.tv/api/v1/user/repos" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" +``` + +#### Crear Branch + +```bash +curl -X POST "https://git.fuq.tv/api/v1/repos/admin/myrepo/branches" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{ + "new_branch_name": "feature-x", + "old_branch_name": "main" + }' +``` + +#### Crear Pull Request + +```bash +curl -X POST "https://git.fuq.tv/api/v1/repos/admin/myrepo/pulls" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{ + "title": "My PR", + "body": "Description", + "head": "feature-x", + "base": "main" + }' +``` + +#### Merge Pull Request + +```bash +curl -X POST "https://git.fuq.tv/api/v1/repos/admin/myrepo/pulls/1/merge" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{ + "Do": "merge", + "merge_when_checks_succeed": false + }' +``` + +### Cliente TypeScript (Backend) + +**Ubicación**: `backend/src/services/gitea/client.ts` + +**Uso**: +```typescript +import { GiteaClient } from './services/gitea/client' + +const gitea = new GiteaClient({ + url: 'https://git.fuq.tv', + token: process.env.GITEA_TOKEN, + owner: 'admin' +}) + +// Crear repo +const repo = await gitea.createRepo('my-app', { + description: 'My application', + private: false, + autoInit: true +}) + +// Crear branch +await gitea.createBranch('admin', 'my-app', 'feature-auth', 'main') + +// Crear PR +const pr = await gitea.createPullRequest('admin', 'my-app', { + title: 'Add authentication', + body: 'Implements JWT auth', + head: 'feature-auth', + base: 'main' +}) +``` + +**Referencia completa**: `docs/02-backend/gitea-integration.md` + +--- + +## 🪝 Webhooks + +### Configurar Webhook (API) + +```bash +curl -X POST "https://git.fuq.tv/api/v1/repos/admin/myrepo/hooks" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{ + "type": "gitea", + "config": { + "url": "https://api.fuq.tv/api/webhooks/gitea", + "content_type": "json", + "secret": "webhook-secret-123" + }, + "events": ["push", "pull_request"], + "active": true + }' +``` + +### Handler Backend + +```typescript +// backend/src/api/routes/webhooks.ts +export async function handleGiteaWebhook(req: Request) { + const signature = req.headers.get('x-gitea-signature') + const event = req.headers.get('x-gitea-event') + const payload = await req.json() + + // Verify signature + // ... verification logic + + switch (event) { + case 'push': + await handlePushEvent(payload) + break + case 'pull_request': + await handlePREvent(payload) + break + } + + return Response.json({ success: true }) +} +``` + +**Eventos importantes**: +- `push` - Nuevo commit +- `pull_request` - PR creado/actualizado +- `pull_request_closed` - PR cerrado/mergeado + +--- + +## 🔄 Gitea Actions (CI/CD) + +### Workflow File + +**Ubicación**: `.gitea/workflows/.yml` + +**Ejemplo** (Build Docker image): +```yaml +name: Build and Push + +on: + push: + branches: [main] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build image + run: docker build -t git.fuq.tv/admin/myapp:latest . + + - name: Login to registry + run: echo "${{ secrets.REGISTRY_TOKEN }}" | docker login git.fuq.tv -u admin --password-stdin + + - name: Push image + run: docker push git.fuq.tv/admin/myapp:latest +``` + +### Secrets en Repositorio + +**Crear secret** (API): +```bash +curl -X PUT "https://git.fuq.tv/api/v1/repos/admin/myrepo/actions/secrets/MY_SECRET" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{"data":"my-secret-value"}' +``` + +**Crear secret** (Web UI): +1. Repo → Settings → Secrets → Actions +2. Add Secret +3. Name: `REGISTRY_TOKEN` +4. Value: `7401126cfb56ab2aebba17755bdc968c20768c27` + +**Uso en workflow**: +```yaml +- name: Use secret + run: echo "${{ secrets.REGISTRY_TOKEN }}" +``` + +### Ver Workflows + +**UI**: https://git.fuq.tv/admin//actions + +**Runner logs** (K8s): +```bash +kubectl logs -n gitea-actions deployment/gitea-runner -c runner --tail=100 +``` + +### Runner Status + +**Verificar runner activo**: +```bash +kubectl get pods -n gitea-actions + +# Logs +kubectl logs -n gitea-actions deployment/gitea-runner -c runner + +# Restart si es necesario +kubectl rollout restart deployment/gitea-runner -n gitea-actions +``` + +**Ver en UI**: https://git.fuq.tv/admin/runners + +--- + +## 👥 Gestión de Usuarios + +### Crear Usuario (CLI) + +```bash +kubectl exec -n gitea gitea-0 -- su git -c "gitea admin user create \ + --username myuser \ + --password mypass123 \ + --email user@example.com \ + --admin" +``` + +### Cambiar Password + +```bash +kubectl exec -n gitea gitea-0 -- su git -c "gitea admin user change-password \ + --username admin \ + --password newpassword" +``` + +### Listar Usuarios + +```bash +kubectl exec -n gitea gitea-0 -- su git -c "gitea admin user list" +``` + +--- + +## 📂 Organizaciones + +### Crear Organización (API) + +```bash +curl -X POST "https://git.fuq.tv/api/v1/orgs" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{ + "username": "aiworker", + "full_name": "AiWorker Organization", + "description": "AiWorker platform repos" + }' +``` + +### Crear Repo en Organización + +```bash +curl -X POST "https://git.fuq.tv/api/v1/org/aiworker/repos" \ + -H "Authorization: token 159a5de2a16d15f33e388b55b1276e431dbca3f3" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "my-project", + "auto_init": true + }' +``` + +--- + +## 🔧 Configuración de Gitea + +### Ver Configuración Actual + +```bash +kubectl exec -n gitea gitea-0 -- cat /data/gitea/conf/app.ini +``` + +### Configuración Importante + +**Database**: +```ini +[database] +DB_TYPE = mysql +HOST = mariadb.control-plane.svc.cluster.local:3306 +NAME = gitea +USER = root +PASSWD = AiWorker2026_RootPass! +``` + +**Server**: +```ini +[server] +DOMAIN = git.fuq.tv +ROOT_URL = https://git.fuq.tv/ +HTTP_PORT = 3000 +SSH_PORT = 2222 +``` + +**Packages** (Container Registry): +```ini +[packages] +ENABLED = true +``` + +### Restart Gitea + +```bash +kubectl delete pod gitea-0 -n gitea +# Esperar a que se recree automáticamente +``` + +--- + +## 🔐 SSH Access a Repos + +### SSH URL Format + +``` +ssh://git@git.fuq.tv:2222//.git +``` + +### SSH Clone + +```bash +# Nota: Puerto 2222, no 22 +git clone ssh://git@git.fuq.tv:2222/admin/aiworker-backend.git +``` + +### Agregar SSH Key (UI) + +1. Login → Settings → SSH/GPG Keys +2. Add Key +3. Paste public key + +### Agregar SSH Key (CLI) + +```bash +# Generar key +ssh-keygen -t ed25519 -C "agent@aiworker.dev" -f ~/.ssh/gitea_key + +# Agregar a Gitea (manual en UI o via API) +``` + +--- + +## 🎬 Gitea Actions - Guía Completa + +### Runner en K8s + +**Namespace**: `gitea-actions` +**Pod**: `gitea-runner-*` + +**Status**: +```bash +kubectl get pods -n gitea-actions +kubectl logs -n gitea-actions deployment/gitea-runner -c runner +``` + +### Workflow Syntax + +Compatible con GitHub Actions. Ubicación: `.gitea/workflows/*.yml` + +**Triggers**: +```yaml +on: + push: + branches: [main, develop] + tags: ['v*'] + pull_request: + branches: [main] + schedule: + - cron: '0 0 * * *' # Diario +``` + +**Jobs**: +```yaml +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Run script + run: echo "Hello" +``` + +### Actions Disponibles + +Compatible con GitHub Actions marketplace: +- `actions/checkout@v4` +- `docker/build-push-action@v5` +- `docker/login-action@v3` +- Y muchas más + +### Secrets en Actions + +**Acceso**: +```yaml +- name: Use secret + run: echo "${{ secrets.MY_SECRET }}" + env: + API_KEY: ${{ secrets.API_KEY }} +``` + +**Secrets necesarios para builds**: +- `REGISTRY_TOKEN` - Ya configurado en `aiworker-backend` + +### Variables de Entorno Automáticas + +```yaml +${{ github.repository }} # admin/aiworker-backend +${{ github.sha }} # Commit hash +${{ github.ref }} # refs/heads/main +${{ github.actor }} # Usuario que hizo push +${{ github.event_name }} # push, pull_request, etc. +``` + +### Debug de Workflows + +```bash +# Ver en UI +https://git.fuq.tv/admin//actions/runs/ + +# Ver runner logs +kubectl logs -n gitea-actions deployment/gitea-runner -c runner --tail=200 + +# Ver Docker daemon logs +kubectl logs -n gitea-actions deployment/gitea-runner -c dind --tail=50 +``` + +--- + +## 🛠️ Operaciones Comunes desde Backend + +### Inicializar Proyecto Nuevo + +```typescript +// 1. Crear repo en Gitea +const repo = await giteaClient.createRepo('my-project', { + description: 'Project description', + autoInit: true +}) + +// 2. Setup webhooks +await giteaClient.createWebhook('admin', 'my-project', { + url: 'https://api.fuq.tv/api/webhooks/gitea', + events: ['push', 'pull_request'] +}) + +// 3. Guardar en DB +await db.insert(projects).values({ + id: crypto.randomUUID(), + name: 'my-project', + giteaRepoUrl: repo.clone_url, + giteaOwner: 'admin', + giteaRepoName: 'my-project' +}) +``` + +### Workflow de Tarea Completa + +```typescript +// 1. Obtener tarea +const task = await db.query.tasks.findFirst({ + where: eq(tasks.state, 'backlog') +}) + +// 2. Crear branch para tarea +await giteaClient.createBranch( + project.giteaOwner, + project.giteaRepoName, + `task-${task.id}`, + 'main' +) + +// 3. Agente trabaja (commits via git)... + +// 4. Crear PR +const pr = await giteaClient.createPullRequest( + project.giteaOwner, + project.giteaRepoName, + { + title: task.title, + body: task.description, + head: `task-${task.id}`, + base: 'main' + } +) + +// 5. Guardar PR URL +await db.update(tasks) + .set({ prUrl: pr.html_url }) + .where(eq(tasks.id, task.id)) +``` + +### Merge Automático + +```typescript +await giteaClient.mergePullRequest( + 'admin', + 'my-project', + prNumber, + 'squash' // o 'merge', 'rebase' +) +``` + +--- + +## 🗂️ Estructura de Archivos en Gitea + +### En el Pod + +```bash +# Datos de Gitea +/data/gitea/ +├── conf/app.ini # Configuración +├── log/ # Logs +├── git/repositories/ # Repos Git +├── git/lfs/ # Git LFS +├── packages/ # Container registry +└── attachments/ # Uploads + +# Ejecutable +/usr/local/bin/gitea +``` + +### Comandos CLI de Gitea + +```bash +# Todos los comandos deben ejecutarse como usuario 'git' +kubectl exec -n gitea gitea-0 -- su git -c "gitea " + +# Ejemplos: +gitea admin user list +gitea admin user create --username x --password y --email z +gitea admin regenerate keys +gitea dump # Backup completo +``` + +--- + +## 📊 Monitoring y Maintenance + +### Logs de Gitea + +```bash +# Logs del contenedor +kubectl logs -n gitea gitea-0 --tail=100 -f + +# Logs de aplicación (dentro del pod) +kubectl exec -n gitea gitea-0 -- tail -f /data/gitea/log/gitea.log +``` + +### Health Check + +```bash +# HTTP health +curl https://git.fuq.tv/api/healthz + +# Database connection +kubectl exec -n gitea gitea-0 -- su git -c "gitea doctor check --run" +``` + +### Backup + +```bash +# Backup completo (crea archivo .zip) +kubectl exec -n gitea gitea-0 -- su git -c "gitea dump -f /tmp/gitea-backup.zip" + +# Copiar backup fuera +kubectl cp gitea/gitea-0:/tmp/gitea-backup.zip ./gitea-backup-$(date +%Y%m%d).zip +``` + +### Storage + +**PVC**: 50Gi con Longhorn (3 réplicas HA) + +```bash +# Ver PVC +kubectl get pvc -n gitea + +# Ver volumen Longhorn +kubectl get volumes.longhorn.io -n longhorn-system | grep gitea + +# Ver réplicas +kubectl get replicas.longhorn.io -n longhorn-system | grep $(kubectl get pvc -n gitea gitea-data -o jsonpath='{.spec.volumeName}') +``` + +--- + +## 🚀 Quick Actions + +### Crear Proyecto Completo (Script) + +```bash +#!/bin/bash +PROJECT_NAME="my-project" +TOKEN="159a5de2a16d15f33e388b55b1276e431dbca3f3" + +# 1. Crear repo +curl -X POST "https://git.fuq.tv/api/v1/user/repos" \ + -H "Authorization: token $TOKEN" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"$PROJECT_NAME\",\"auto_init\":true}" + +# 2. Crear webhook +curl -X POST "https://git.fuq.tv/api/v1/repos/admin/$PROJECT_NAME/hooks" \ + -H "Authorization: token $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"type":"gitea","config":{"url":"https://api.fuq.tv/webhooks/gitea","content_type":"json"},"events":["push","pull_request"],"active":true}' + +# 3. Clone +git clone https://git.fuq.tv/admin/$PROJECT_NAME.git +cd $PROJECT_NAME + +# 4. Crear workflow +mkdir -p .gitea/workflows +cat > .gitea/workflows/build.yml << 'EOF' +name: Build +on: [push] +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: echo "Build steps here" +EOF + +git add .gitea && git commit -m "Add CI/CD" && git push +``` + +--- + +## 🎯 Resumen de URLs y Credenciales + +### Web UI +- **Gitea**: https://git.fuq.tv +- **Login**: admin / admin123 +- **Settings**: https://git.fuq.tv/user/settings +- **Packages**: https://git.fuq.tv/admin/-/packages +- **Actions**: https://git.fuq.tv/admin//actions + +### API +- **Base**: https://git.fuq.tv/api/v1 +- **Swagger**: https://git.fuq.tv/api/swagger +- **Token Full**: `159a5de2a16d15f33e388b55b1276e431dbca3f3` + +### Registry +- **URL**: git.fuq.tv +- **Token**: `7401126cfb56ab2aebba17755bdc968c20768c27` +- **Format**: `git.fuq.tv//:` + +### SSH +- **URL**: `ssh://git@git.fuq.tv:2222//.git` +- **Port**: 2222 (no 22) + +--- + +## 💡 Tips + +1. **Tokens**: Full access token para API, registry token solo para Docker +2. **Branches**: Siempre desde `main` a menos que especifique otro +3. **PRs**: Usar `squash` merge para historial limpio +4. **Webhooks**: Verificar que `ALLOWED_HOST_LIST` incluye tu dominio +5. **Actions**: Primer build tarda más (descarga imágenes) +6. **Registry**: Las imágenes se guardan en Longhorn HA storage + +--- + +**📖 Referencia completa**: `docs/02-backend/gitea-integration.md` y `docs/CONTAINER-REGISTRY.md` + +**🔧 Para implementar en backend**: Ver código de ejemplo en `docs/02-backend/gitea-integration.md` líneas 5-400 diff --git a/NEXT-SESSION.md b/NEXT-SESSION.md new file mode 100644 index 0000000..a0b92d8 --- /dev/null +++ b/NEXT-SESSION.md @@ -0,0 +1,429 @@ +# 📋 Próxima Sesión - Checklist Ejecutable + +**Objetivo**: Completar Backend API y MCP Server básico +**Tiempo estimado**: 2-3 horas + +--- + +## ✅ PRE-REQUISITOS (Verificar antes de empezar) + +```bash +# 1. Cluster funcionando +export KUBECONFIG=~/.kube/aiworker-config +kubectl get nodes +# Debe mostrar 6 nodos Ready + +# 2. Servicios corriendo +kubectl get pods -n control-plane +# mariadb-0: Running +# redis-xxx: Running + +kubectl get pods -n gitea +# gitea-0: Running + +# 3. Backend local +cd backend +bun --version +# 1.3.6 + +# 4. Gitea accesible +curl -I https://git.fuq.tv +# HTTP/2 200 +``` + +**Si algo falla, consulta**: `CLUSTER-READY.md` y `TROUBLESHOOTING.md` + +--- + +## 🎯 PASO 1: Verificar CI/CD (15 min) + +### 1.1 Revisar último build +```bash +# Ver en Gitea Actions +open https://git.fuq.tv/admin/aiworker-backend/actions +``` + +**Opciones**: +- ✅ **Si build exitoso**: Continuar a paso 2 +- ❌ **Si build fallido**: Ver logs, corregir, push de nuevo + +### 1.2 Verificar imagen en registry +```bash +# Vía UI +open https://git.fuq.tv/admin/-/packages + +# Vía API +curl https://git.fuq.tv/api/v1/packages/admin/container?type=container +``` + +**Debe existir**: `aiworker-backend` con tag `latest` + +### 1.3 Si no hay imagen, build manual +```bash +# Desde un nodo del cluster (si Docker local no funciona) +ssh root@108.165.47.225 # worker-01 + +cd /tmp +git clone https://git.fuq.tv/admin/aiworker-backend.git +cd aiworker-backend + +docker build -t git.fuq.tv/admin/aiworker-backend:latest . +docker login git.fuq.tv -u admin -p 7401126cfb56ab2aebba17755bdc968c20768c27 +docker push git.fuq.tv/admin/aiworker-backend:latest +``` + +--- + +## 🎯 PASO 2: Implementar API Routes (45 min) + +### 2.1 Crear estructura de routes +```bash +cd backend/src/api +mkdir routes + +# Archivos a crear: +# - routes/projects.ts +# - routes/tasks.ts +# - routes/agents.ts +# - routes/index.ts +``` + +### 2.2 Implementar Projects API + +**Archivo**: `src/api/routes/projects.ts` + +**Endpoints necesarios**: +```typescript +GET /api/projects // List all +GET /api/projects/:id // Get one +POST /api/projects // Create +PATCH /api/projects/:id // Update +DELETE /api/projects/:id // Delete +``` + +**Referencia**: `docs/02-backend/api-endpoints.md` (líneas 15-80) + +**Conectar con Bun.serve()**: +```typescript +// En src/index.ts +import { handleProjectRoutes } from './api/routes/projects' + +if (url.pathname.startsWith('/api/projects')) { + return handleProjectRoutes(req, url) +} +``` + +### 2.3 Implementar Tasks API + +**Archivo**: `src/api/routes/tasks.ts` + +**Endpoints principales**: +```typescript +GET /api/tasks // List with filters +GET /api/tasks/:id // Get details +POST /api/tasks // Create +PATCH /api/tasks/:id // Update +POST /api/tasks/:id/respond // Respond to question +``` + +### 2.4 Probar APIs localmente +```bash +# Terminal 1: Port-forward MariaDB +kubectl port-forward -n control-plane svc/mariadb 3306:3306 & + +# Terminal 2: Port-forward Redis +kubectl port-forward -n control-plane svc/redis 6379:6379 & + +# Terminal 3: Run backend +cd backend +bun run dev + +# Terminal 4: Test +curl http://localhost:3000/api/health +curl http://localhost:3000/api/projects +``` + +--- + +## 🎯 PASO 3: MCP Server Básico (60 min) + +### 3.1 Crear estructura MCP +```bash +mkdir -p src/services/mcp +# Archivos: +# - services/mcp/server.ts +# - services/mcp/tools.ts +# - services/mcp/handlers.ts +``` + +### 3.2 Implementar herramientas básicas + +**Herramientas mínimas para MVP**: +1. `get_next_task` - Obtener siguiente tarea +2. `update_task_status` - Actualizar estado +3. `create_branch` - Crear rama en Gitea +4. `create_pull_request` - Crear PR + +**Referencia**: `docs/05-agents/mcp-tools.md` + +**Template básico**: +```typescript +// src/services/mcp/server.ts +import { Server } from '@modelcontextprotocol/sdk/server/index.js' + +export class MCPServer { + private server: Server + + constructor() { + this.server = new Server({ + name: 'aiworker-mcp', + version: '1.0.0' + }, { + capabilities: { tools: {} } + }) + + this.setupHandlers() + } + + // Implementar handlers... +} +``` + +### 3.3 Conectar MCP con Bun.serve() + +**Opciones**: +- **A**: Puerto separado (3100) para MCP +- **B**: Ruta `/mcp` en mismo server + +**Recomendación**: Opción A (puerto 3100) + +--- + +## 🎯 PASO 4: Integración con Gitea (30 min) + +### 4.1 Cliente API de Gitea + +**Archivo**: `src/services/gitea/client.ts` + +**Operaciones necesarias**: +```typescript +- createRepo(name, description) +- createBranch(owner, repo, branch, from) +- createPullRequest(owner, repo, {title, body, head, base}) +- mergePullRequest(owner, repo, number) +``` + +**Usar**: +- Axios para HTTP requests +- Base URL: `https://git.fuq.tv/api/v1` +- Token: Variable de entorno `GITEA_TOKEN` + +**Referencia**: `docs/02-backend/gitea-integration.md` (líneas 10-200) + +### 4.2 Test de integración +```bash +# Crear un repo de prueba vía API +bun run src/test-gitea.ts +``` + +--- + +## 🎯 PASO 5: Deploy Backend en K8s (30 min) + +### 5.1 Crear manifests + +**Directorio**: `k8s/backend/` + +**Archivos necesarios**: +```yaml +# deployment.yaml +# service.yaml +# ingress.yaml +# secrets.yaml +``` + +**Template deployment**: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend + namespace: control-plane +spec: + replicas: 2 + selector: + matchLabels: + app: backend + template: + spec: + imagePullSecrets: + - name: gitea-registry + containers: + - name: backend + image: git.fuq.tv/admin/aiworker-backend:latest + ports: + - containerPort: 3000 + - containerPort: 3100 # MCP + env: + - name: DB_HOST + value: mariadb.control-plane.svc.cluster.local + # ... más env vars +``` + +### 5.2 Crear secrets +```bash +kubectl create secret generic backend-secrets -n control-plane \ + --from-literal=jwt-secret=your-secret \ + --from-literal=anthropic-api-key=your-key +``` + +### 5.3 Deploy +```bash +kubectl apply -f k8s/backend/ +kubectl get pods -n control-plane +kubectl logs -f deployment/backend -n control-plane +``` + +### 5.4 Crear Ingress +```yaml +# Para api.fuq.tv +host: api.fuq.tv +backend: backend:3000 +``` + +### 5.5 Verificar +```bash +curl https://api.fuq.tv/api/health +``` + +--- + +## 🎯 PASO 6: Test End-to-End (15 min) + +### 6.1 Crear proyecto vía API +```bash +curl -X POST https://api.fuq.tv/api/projects \ + -H "Content-Type: application/json" \ + -d '{ + "name": "test-project", + "description": "First project" + }' +``` + +### 6.2 Crear tarea +```bash +curl -X POST https://api.fuq.tv/api/tasks \ + -H "Content-Type: application/json" \ + -d '{ + "projectId": "xxx", + "title": "Test task", + "description": "First automated task" + }' +``` + +### 6.3 Verificar en DB +```bash +kubectl exec -n control-plane mariadb-0 -- \ + mariadb -uaiworker -pAiWorker2026_UserPass! aiworker \ + -e "SELECT * FROM projects; SELECT * FROM tasks;" +``` + +--- + +## 📝 NOTAS IMPORTANTES + +### Desarrollo Local vs K8s + +**Local (desarrollo)**: +- Port-forward para MariaDB y Redis +- `bun run dev` con hot-reload +- Cambios rápidos + +**K8s (testing/producción)**: +- Build → Push → Deploy +- Migrations automáticas en startup +- Logs con kubectl + +### Migrations + +**SIEMPRE** automáticas en el código: +```typescript +// src/index.ts +await runMigrations() // Al inicio +``` + +**NUNCA** manuales con port-forward + +### Secrets + +**Desarrollo**: `.env` (git-ignored) +**Producción**: Kubernetes Secrets + +```bash +kubectl create secret generic app-secrets -n namespace \ + --from-env-file=.env.production +``` + +--- + +## 🐛 TROUBLESHOOTING + +### Si backend no arranca +```bash +# Ver logs +kubectl logs -n control-plane deployment/backend + +# Verificar DB connection +kubectl exec -n control-plane mariadb-0 -- \ + mariadb -uaiworker -pAiWorker2026_UserPass! -e "SELECT 1" + +# Verificar Redis +kubectl exec -n control-plane deployment/redis -- redis-cli ping +``` + +### Si Actions no funciona +```bash +# Ver runner +kubectl get pods -n gitea-actions +kubectl logs -n gitea-actions deployment/gitea-runner -c runner + +# Restart runner +kubectl rollout restart deployment/gitea-runner -n gitea-actions +``` + +### Si Ingress no resuelve +```bash +# Verificar DNS +dig api.fuq.tv +short +# Debe mostrar: 108.165.47.221 y 108.165.47.203 + +# Verificar certificado +kubectl get certificate -n control-plane + +# Logs de Ingress +kubectl logs -n ingress-nginx deployment/ingress-nginx-controller --tail=50 +``` + +--- + +## ✅ CHECKLIST DE SESIÓN + +Al final de cada sesión, verificar: + +- [ ] Código commitado y pusheado a Gitea +- [ ] Build de CI/CD exitoso +- [ ] Pods corriendo en K8s +- [ ] Endpoints accesibles vía HTTPS +- [ ] Documentación actualizada +- [ ] Credenciales documentadas en lugar seguro +- [ ] Tests básicos pasando + +--- + +## 🎉 META + +**Completado**: Infraestructura HA + Backend base +**Próximo hito**: Backend API funcional + MCP Server +**Hito final**: Sistema completo de agentes autónomos + +**¡Excelente progreso! Sigue el roadmap y lo tendrás listo pronto! 🚀** diff --git a/QUICK-REFERENCE.md b/QUICK-REFERENCE.md new file mode 100644 index 0000000..1760e33 --- /dev/null +++ b/QUICK-REFERENCE.md @@ -0,0 +1,390 @@ +# ⚡ Quick Reference - Comandos y URLs + +Referencia rápida de todo lo importante en un solo lugar. + +--- + +## 🌐 URLs + +| Servicio | URL | Credenciales | +|----------|-----|--------------| +| Gitea | https://git.fuq.tv | admin / admin123 | +| ArgoCD | https://argocd.fuq.tv | admin / LyPF4Hy0wvp52IoU | +| Longhorn | https://longhorn.fuq.tv | admin / aiworker2026 | +| Backend Repo | https://git.fuq.tv/admin/aiworker-backend | - | +| Actions | https://git.fuq.tv/admin/aiworker-backend/actions | - | +| Packages | https://git.fuq.tv/admin/-/packages | - | +| Test App | https://test.fuq.tv | - | +| HAProxy Stats | http://108.165.47.221:8404/stats | admin / aiworker2026 | + +--- + +## 🔑 Tokens y Secrets + +```bash +# Gitea Full Access +159a5de2a16d15f33e388b55b1276e431dbca3f3 + +# Gitea Registry +7401126cfb56ab2aebba17755bdc968c20768c27 + +# K3s Token +K10e74a5aacfaf4e2e0a291c3b369db8588cf0b9c2590a4d66e04ab960e24fcb4db::server:bc53704a9707d3cd9188af9e558ab50c +``` + +--- + +## 💾 Base de Datos + +### MariaDB (Interno) +```bash +Host: mariadb.control-plane.svc.cluster.local +Port: 3306 +User: aiworker +Pass: AiWorker2026_UserPass! +DB: aiworker +``` + +### Conexión desde pod +```bash +kubectl exec -n control-plane mariadb-0 -- \ + mariadb -uaiworker -pAiWorker2026_UserPass! aiworker +``` + +### Bases de datos +- `aiworker` - App principal +- `gitea` - Gitea data + +--- + +## ☸️ Kubernetes + +### Kubeconfig +```bash +export KUBECONFIG=~/.kube/aiworker-config +``` + +### Comandos Básicos +```bash +# Nodos +kubectl get nodes + +# Todos los pods +kubectl get pods -A + +# Pods en namespace +kubectl get pods -n control-plane + +# Logs +kubectl logs -f -n + +# Exec +kubectl exec -it -n -- /bin/sh + +# Port-forward +kubectl port-forward svc/ 3000:3000 -n +``` + +### Namespaces Principales +- `control-plane` - Backend, DB, Redis +- `agents` - Claude agents +- `gitea` - Git server +- `gitea-actions` - CI/CD runner +- `ingress-nginx` - Ingress +- `cert-manager` - TLS +- `longhorn-system` - Storage +- `argocd` - GitOps + +--- + +## 🐳 Container Registry + +### Login +```bash +docker login git.fuq.tv -u admin -p 7401126cfb56ab2aebba17755bdc968c20768c27 +``` + +### Push +```bash +docker build -t git.fuq.tv/admin/: . +docker push git.fuq.tv/admin/: +``` + +### K8s Secret +```bash +kubectl create secret docker-registry gitea-registry \ + --docker-server=git.fuq.tv \ + --docker-username=admin \ + --docker-password=7401126cfb56ab2aebba17755bdc968c20768c27 \ + -n +``` + +--- + +## 🖥️ SSH a Servidores + +```bash +# Control Planes +ssh root@108.165.47.233 # k8s-cp-01 +ssh root@108.165.47.235 # k8s-cp-02 +ssh root@108.165.47.215 # k8s-cp-03 + +# Workers +ssh root@108.165.47.225 # k8s-worker-01 +ssh root@108.165.47.224 # k8s-worker-02 +ssh root@108.165.47.222 # k8s-worker-03 + +# Load Balancers +ssh root@108.165.47.221 # k8s-lb-01 +ssh root@108.165.47.203 # k8s-lb-02 +``` + +--- + +## 📦 Backend Development + +### Local +```bash +cd backend + +# Install deps +bun install + +# Dev mode +bun run dev + +# Generate migrations +bun run db:generate + +# Build +bun build src/index.ts --outdir dist +``` + +### Port-forwards para desarrollo local +```bash +# Terminal 1: MariaDB +kubectl port-forward -n control-plane svc/mariadb 3306:3306 + +# Terminal 2: Redis +kubectl port-forward -n control-plane svc/redis 6379:6379 + +# Terminal 3: Backend +cd backend && bun run dev +``` + +--- + +## 🔄 Git Workflow + +### Commits +```bash +git add . +git commit -m "Description + +Co-Authored-By: Claude Sonnet 4.5 (1M context) " +git push +``` + +### Ver Actions +```bash +open https://git.fuq.tv/admin/aiworker-backend/actions +``` + +### Build manual (si Actions falla) +```bash +# En un worker node +ssh root@108.165.47.225 + +cd /tmp +git clone https://git.fuq.tv/admin/aiworker-backend.git +cd aiworker-backend + +docker build -t git.fuq.tv/admin/aiworker-backend:latest . +docker login git.fuq.tv -u admin -p 7401126cfb56ab2aebba17755bdc968c20768c27 +docker push git.fuq.tv/admin/aiworker-backend:latest +``` + +--- + +## 🚀 Deploy + +### Aplicar manifests +```bash +kubectl apply -f k8s/backend/ +kubectl apply -f k8s/frontend/ +``` + +### Ver estado +```bash +kubectl get all -n control-plane +kubectl logs -f deployment/backend -n control-plane +``` + +### Rollout +```bash +# Update image +kubectl set image deployment/backend backend=git.fuq.tv/admin/aiworker-backend:v2.0.0 -n control-plane + +# Restart +kubectl rollout restart deployment/backend -n control-plane + +# Status +kubectl rollout status deployment/backend -n control-plane + +# Rollback +kubectl rollout undo deployment/backend -n control-plane +``` + +--- + +## 📊 Monitoring + +### Resources +```bash +kubectl top nodes +kubectl top pods -A +kubectl top pods -n control-plane +``` + +### Logs +```bash +# Tail logs +kubectl logs -f -n + +# Logs recientes +kubectl logs --tail=100 -n + +# Logs de todos los containers +kubectl logs -n --all-containers +``` + +### Events +```bash +kubectl get events -A --sort-by='.lastTimestamp' | tail -20 +kubectl get events -n +``` + +--- + +## 🗄️ Database + +### Query rápido +```bash +kubectl exec -n control-plane mariadb-0 -- \ + mariadb -uaiworker -pAiWorker2026_UserPass! aiworker \ + -e "SHOW TABLES;" +``` + +### Backup +```bash +kubectl exec -n control-plane mariadb-0 -- \ + mariadb-dump -uaiworker -pAiWorker2026_UserPass! aiworker \ + > backup-$(date +%Y%m%d).sql +``` + +### Restore +```bash +cat backup.sql | kubectl exec -i -n control-plane mariadb-0 -- \ + mariadb -uaiworker -pAiWorker2026_UserPass! aiworker +``` + +--- + +## 🔐 Secrets Management + +### Create secret +```bash +kubectl create secret generic my-secret -n \ + --from-literal=key1=value1 \ + --from-literal=key2=value2 +``` + +### View secret +```bash +kubectl get secret my-secret -n -o yaml +kubectl get secret my-secret -n -o jsonpath='{.data.key1}' | base64 -d +``` + +--- + +## 🛠️ CubePath CLI + +### Ver servidores +```bash +cubecli vps list +cubecli project list +``` + +### SSH a servidor +```bash +cubecli vps list | grep k8s- +ssh root@ +``` + +--- + +## 🌍 DNS + +**Configurado**: +``` +*.fuq.tv → 108.165.47.221, 108.165.47.203 +*.r.fuq.tv → 108.165.47.221, 108.165.47.203 +``` + +**Verificar**: +```bash +dig api.fuq.tv +short +dig test.fuq.tv +short +``` + +--- + +## 📁 ESTRUCTURA DEL PROYECTO + +``` +teamSquadAiWorker/ +├── backend/ # Bun backend +│ ├── src/ +│ ├── Dockerfile +│ └── .gitea/workflows/ +├── frontend/ # React (por crear) +├── docs/ # Documentación completa +├── scripts/ # Scripts de instalación +├── k8s/ # Manifests Kubernetes (por crear) +├── ROADMAP.md # Plan general +├── NEXT-SESSION.md # Próximos pasos detallados +├── TROUBLESHOOTING.md # Solución de problemas +├── QUICK-REFERENCE.md # Este archivo +├── CLUSTER-READY.md # Estado del cluster +└── CLUSTER-CREDENTIALS.md # Credenciales (sensible) +``` + +--- + +## 🎯 INICIO RÁPIDO DE SESIÓN + +```bash +# 1. Verificar cluster +export KUBECONFIG=~/.kube/aiworker-config +kubectl get nodes + +# 2. Ver servicios +kubectl get pods -n control-plane +kubectl get pods -n gitea + +# 3. Backend local +cd backend +bun run dev + +# 4. Ver Actions +open https://git.fuq.tv/admin/aiworker-backend/actions + +# 5. Continuar donde te quedaste +code NEXT-SESSION.md +``` + +--- + +## 🎊 FIN DE QUICK REFERENCE + +**Todo lo importante en un solo lugar. ¡Guarda este archivo!** diff --git a/README.md b/README.md new file mode 100644 index 0000000..e64563b --- /dev/null +++ b/README.md @@ -0,0 +1,245 @@ +# 🤖 AiWorker - AI Agent Orchestration Platform + +Sistema de orquestación de agentes IA (Claude Code) para automatización del ciclo completo de desarrollo de software. + +**Estado**: 🚧 En desarrollo - Infraestructura completa ✅ + +--- + +## 🎯 ¿Qué es AiWorker? + +Plataforma que permite a agentes IA (Claude Code) trabajar autónomamente en tareas de desarrollo: + +1. **Dashboard Web** - Kanban board para gestionar tareas +2. **Agentes Autónomos** - Claude Code en pods de Kubernetes +3. **Comunicación MCP** - Agentes piden/dan información +4. **Deployments Automáticos** - Preview, staging, production +5. **GitOps** - Todo versionado en Git + +**Flujo completo**: +``` +Tarea → Agente → Código → PR → Preview Deploy → Aprobación → Staging → Production +``` + +--- + +## 📚 DOCUMENTACIÓN + +### 🚀 Start Here +- **[ROADMAP.md](ROADMAP.md)** - Plan general y progreso +- **[NEXT-SESSION.md](NEXT-SESSION.md)** - Próximos pasos detallados +- **[DEVELOPMENT-WORKFLOW.md](DEVELOPMENT-WORKFLOW.md)** - Cómo trabajamos (Git, CI/CD, Deploy) +- **[QUICK-REFERENCE.md](QUICK-REFERENCE.md)** - Comandos y URLs importantes +- **[GITEA-GUIDE.md](GITEA-GUIDE.md)** - Guía completa de Gitea (API, Registry, CI/CD) + +### 🏗️ Infraestructura +- **[CLUSTER-READY.md](CLUSTER-READY.md)** - Estado del cluster K8s +- **[CLUSTER-CREDENTIALS.md](CLUSTER-CREDENTIALS.md)** - Credenciales (⚠️ sensible) +- **[AGENT-GUIDE.md](AGENT-GUIDE.md)** - Guía para agentes IA +- **[TROUBLESHOOTING.md](TROUBLESHOOTING.md)** - Solución de problemas + +### 📖 Documentación Completa +- **[docs/](docs/)** - 40+ archivos de documentación detallada + - `01-arquitectura/` - Diseño del sistema + - `02-backend/` - Backend (Bun + Express + MCP) + - `03-frontend/` - Frontend (React 19.2 + TailwindCSS) + - `04-kubernetes/` - Kubernetes setup + - `05-agents/` - Claude Code agents + - `06-deployment/` - CI/CD y GitOps + - `CONTAINER-REGISTRY.md` - Uso del registry + +--- + +## ✅ COMPLETADO + +### Infraestructura Kubernetes HA +- ✅ Cluster K3s v1.35.0+k3s1 (Houston, Texas) +- ✅ 3 Control Planes + 3 Workers + 2 Load Balancers +- ✅ Longhorn Storage HA (3 réplicas) +- ✅ Nginx Ingress + Cert-Manager (TLS automático) +- ✅ Red privada con HAProxy +- ✅ DNS: *.fuq.tv (round-robin HA) + +### Servicios Base +- ✅ MariaDB 11.4 LTS +- ✅ Redis 7 +- ✅ Gitea 1.25.3 + Container Registry +- ✅ ArgoCD +- ✅ Gitea Actions Runner (CI/CD) + +### Backend Inicial +- ✅ Estructura del proyecto +- ✅ Bun 1.3.6 con Bun.serve() +- ✅ Database schema (Drizzle ORM) +- ✅ Dockerfile +- ✅ Workflow CI/CD + +**Costo**: $148/mes | **Capacidad**: 48 vCPU, 104 GB RAM + +--- + +## 🚧 EN DESARROLLO + +- [ ] Backend API routes completas +- [ ] MCP Server para agentes +- [ ] Frontend React 19.2 +- [ ] Pods de agentes Claude Code +- [ ] Preview environments automáticos + +--- + +## ⚡ QUICK START + +### Acceder al Cluster +```bash +export KUBECONFIG=~/.kube/aiworker-config +kubectl get nodes +kubectl get pods -A +``` + +### Desarrollo Local (Backend) +```bash +cd backend + +# Port-forward services +kubectl port-forward -n control-plane svc/mariadb 3306:3306 & +kubectl port-forward -n control-plane svc/redis 6379:6379 & + +# Run dev server +bun run dev + +# Test +curl http://localhost:3000/api/health +``` + +### Ver Actions CI/CD +```bash +open https://git.fuq.tv/admin/aiworker-backend/actions +``` + +### Deploy en K8s (cuando esté listo) +```bash +kubectl apply -f k8s/backend/ +kubectl get pods -n control-plane +``` + +--- + +## 🏗️ ARQUITECTURA + +``` + Internet + ↓ + [DNS: *.fuq.tv] + ↓ + ┌────────────┴────────────┐ + ↓ ↓ + [LB-01] [LB-02] + HAProxy HAProxy + ↓ ↓ + └────────────┬────────────┘ + ↓ + [Private Network 10.100.0.0/24] + ↓ + ┌───────────────┼───────────────┐ + ↓ ↓ ↓ + [CP etcd] [CP etcd] [CP etcd] + 10.100.0.2 10.100.0.3 10.100.0.4 + ↓ ↓ ↓ + ─────┴───────────────┴───────────────┴───── + ↓ ↓ ↓ + [Worker+Storage] [Worker+Storage] [Worker+Storage] + 10.100.0.5 10.100.0.6 10.100.0.7 + ↓ ↓ ↓ + [Apps] [Apps] [Apps] + │ │ │ + [Longhorn 3x Replica Storage HA] +``` + +--- + +## 📦 STACK TECNOLÓGICO + +| Layer | Tecnología | Versión | +|-------|-----------|---------| +| **Runtime** | Bun | 1.3.6 | +| **Backend** | Bun.serve() + Drizzle | - | +| **Frontend** | React + TailwindCSS | 19.2 + 4.x | +| **Database** | MariaDB | 11.4 LTS | +| **Cache** | Redis | 7 | +| **Git** | Gitea | 1.25.3 | +| **Registry** | Gitea Container Registry | - | +| **K8s** | K3s | v1.35.0+k3s1 | +| **Storage** | Longhorn | v1.8.0 | +| **Ingress** | Nginx | latest | +| **TLS** | Cert-Manager | v1.16.2 | +| **GitOps** | ArgoCD | stable | +| **CI/CD** | Gitea Actions | - | + +--- + +## 🎓 LEARN BY DOING - Sesiones + +### Sesión 1 (2026-01-19) - Infraestructura ✅ +- Desplegar cluster K3s HA en CubePath +- Configurar storage, networking, ingress +- Instalar Gitea, MariaDB, Redis +- Setup CI/CD con Gitea Actions +- Inicializar backend + +**Ver**: `CLUSTER-READY.md` para detalles completos + +### Sesión 2 (Próxima) - Backend API +- Completar API routes +- Implementar MCP Server +- Integración con Gitea y K8s +- Deploy backend en cluster + +**Ver**: `NEXT-SESSION.md` para pasos exactos + +--- + +## 📞 SOPORTE + +- **Issues**: Documentadas en `TROUBLESHOOTING.md` +- **CubePath**: https://cubepath.com/support +- **K3s Docs**: https://docs.k3s.io +- **Bun Docs**: https://bun.sh/docs + +--- + +## 🤝 CONTRIBUCIÓN + +Este es un proyecto en desarrollo activo. La documentación se actualiza en cada sesión. + +**Estructura de commits**: +``` +Title line (imperativo) + +Detailed description + +Co-Authored-By: Claude Sonnet 4.5 (1M context) +``` + +--- + +## 📜 LICENSE + +Proyecto privado - AiWorker Platform + +--- + +## 🎉 HIGHLIGHTS + +- **Alta Disponibilidad**: 3 réplicas de todo (storage, control planes) +- **TLS Automático**: Let's Encrypt con Cert-Manager +- **CI/CD Integrado**: Build automático en cada push +- **GitOps Ready**: ArgoCD configurado +- **Escalable**: 48 vCPU, 104 GB RAM disponibles +- **Moderno**: Últimas versiones de todo (K3s, Bun, React 19.2) + +--- + +**🚀 ¡Proyecto con bases sólidas! Listo para construir features increíbles!** + +**Siguiente paso**: Abre `NEXT-SESSION.md` y continúa donde lo dejaste. diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 0000000..aa97665 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,531 @@ +# 🗺️ AiWorker - Roadmap y Próximos Pasos + +**Última actualización**: 2026-01-19 +**Estado actual**: Infraestructura completa, Backend iniciado + +--- + +## ✅ COMPLETADO (Sesión 1 - 2026-01-19) + +### 1. Infraestructura Kubernetes HA +- [x] Cluster K3s desplegado en CubePath (Houston) +- [x] 3 Control Planes + 3 Workers + 2 Load Balancers +- [x] Red privada 10.100.0.0/24 +- [x] Longhorn storage HA (3 réplicas) +- [x] Nginx Ingress + Cert-Manager (TLS automático) +- [x] DNS configurado (*.fuq.tv) + +**Docs**: `CLUSTER-READY.md`, `docs/04-kubernetes/` + +### 2. Bases de Datos y Servicios +- [x] MariaDB 11.4 LTS con storage HA +- [x] Redis 7 desplegado +- [x] Gitea 1.25.3 con Container Registry habilitado +- [x] ArgoCD para GitOps + +**Credenciales**: `CLUSTER-CREDENTIALS.md` +**Acceso Gitea**: https://git.fuq.tv (admin/admin123) + +### 3. Backend Inicial +- [x] Estructura del proyecto creada +- [x] Bun 1.3.6 configurado +- [x] Database schema (projects, agents, tasks) con Drizzle +- [x] Dockerfile multi-stage +- [x] Gitea Actions Runner configurado +- [x] Workflow CI/CD básico + +**Repo**: https://git.fuq.tv/admin/aiworker-backend + +--- + +## 🎯 PRÓXIMOS PASOS + +### FASE 1: Completar Backend + +#### 1.1 Verificar y corregir CI/CD +**Objetivo**: Build automático funcionando +**Tareas**: +- [ ] Verificar build en https://git.fuq.tv/admin/aiworker-backend/actions +- [ ] Corregir errores si los hay +- [ ] Confirmar imagen en registry: `git.fuq.tv/admin/aiworker-backend:latest` + +**Comandos útiles**: +```bash +# Ver runner logs +kubectl logs -n gitea-actions deployment/gitea-runner -c runner --tail=50 + +# Ver packages en Gitea +https://git.fuq.tv/admin/-/packages +``` + +#### 1.2 Implementar API Routes con Bun.serve() +**Objetivo**: Endpoints REST funcionales +**Tareas**: +- [ ] Crear `/api/projects` (CRUD) +- [ ] Crear `/api/tasks` (CRUD) +- [ ] Crear `/api/agents` (list, status) +- [ ] Implementar validación con Zod +- [ ] Health check mejorado con DB/Redis status + +**Referencia**: `docs/02-backend/api-endpoints.md` + +**Estructura**: +``` +src/api/ +├── routes/ +│ ├── projects.ts +│ ├── tasks.ts +│ └── agents.ts +└── middleware/ + ├── auth.ts + └── validate.ts +``` + +#### 1.3 Implementar MCP Server +**Objetivo**: Herramientas para agentes Claude Code +**Tareas**: +- [ ] Instalar `@modelcontextprotocol/sdk` +- [ ] Crear MCP server en puerto 3100 +- [ ] Implementar tools: `get_next_task`, `update_task_status`, etc. +- [ ] Conectar con Gitea API +- [ ] Conectar con Kubernetes API + +**Referencia**: `docs/02-backend/mcp-server.md`, `docs/05-agents/mcp-tools.md` + +**Archivo**: `src/services/mcp/server.ts` + +#### 1.4 Integración con Gitea +**Objetivo**: Gestión de repos y PRs +**Tareas**: +- [ ] Cliente API de Gitea +- [ ] Webhooks handler +- [ ] Operaciones: create repo, create PR, merge, etc. + +**Referencia**: `docs/02-backend/gitea-integration.md` + +**Token Gitea**: `159a5de2a16d15f33e388b55b1276e431dbca3f3` (full access) + +#### 1.5 Integración con Kubernetes +**Objetivo**: Crear/gestionar deployments y namespaces +**Tareas**: +- [ ] Cliente K8s usando `@kubernetes/client-node` +- [ ] Crear namespaces dinámicos +- [ ] Crear deployments de preview +- [ ] Crear ingress automáticos +- [ ] Gestionar pods de agentes + +**Referencia**: `docs/04-kubernetes/deployments.md` + +**Kubeconfig**: `~/.kube/aiworker-config` + +#### 1.6 Sistema de Colas (BullMQ) +**Objetivo**: Jobs asíncronos para deployments +**Tareas**: +- [ ] Setup BullMQ con Redis +- [ ] Queue para tasks +- [ ] Queue para deployments +- [ ] Workers para procesar jobs + +**Referencia**: `docs/02-backend/queue-system.md` + +#### 1.7 WebSocket Real-time +**Objetivo**: Notificaciones en tiempo real +**Tareas**: +- [ ] WebSocket server con Bun.serve() +- [ ] Eventos: `task:created`, `task:status_changed`, etc. +- [ ] Autenticación de conexiones + +**Referencia**: `docs/01-arquitectura/flujo-de-datos.md` + +#### 1.8 Deploy Backend en K8s +**Objetivo**: Backend corriendo en producción +**Tareas**: +- [ ] Crear manifests K8s (deployment, service, ingress) +- [ ] Configurar secrets (DB, Gitea, etc.) +- [ ] Deploy en namespace `control-plane` +- [ ] Verificar en `api.fuq.tv` + +**Comandos**: +```bash +kubectl apply -f k8s/backend/ +kubectl get pods -n control-plane +``` + +--- + +### FASE 2: Frontend React 19.2 + +#### 2.1 Inicializar Proyecto +**Objetivo**: Setup base de React +**Tareas**: +- [ ] Crear proyecto con Vite + React 19.2 +- [ ] Instalar TailwindCSS 4 +- [ ] Configurar TypeScript +- [ ] Estructura de carpetas + +**Stack**: +- React 19.2.0 +- Vite 6.x +- TailwindCSS 4.x +- Bun como package manager + +**Auth**: Lucia Auth (`https://github.com/lucia-auth/lucia`) +**Skills**: Vercel Agent Skills (`https://github.com/vercel-labs/agent-skills`) + +#### 2.2 Componentes Base +**Objetivo**: UI components library +**Tareas**: +- [ ] Layout (Header, Sidebar) +- [ ] Componentes UI (Button, Card, Modal, etc.) +- [ ] TailwindCSS config con tema + +**Referencia**: `docs/03-frontend/componentes.md` + +#### 2.3 Kanban Board +**Objetivo**: Gestión visual de tareas +**Tareas**: +- [ ] Implementar con `@dnd-kit` +- [ ] Columnas por estado (backlog, in_progress, etc.) +- [ ] Drag & drop funcional +- [ ] Filtros y búsqueda + +**Referencia**: `docs/03-frontend/kanban.md` + +#### 2.4 Consolas Web +**Objetivo**: Terminales para agentes +**Tareas**: +- [ ] Implementar con `xterm.js` +- [ ] WebSocket a pods de agentes +- [ ] Tabs manager + +**Referencia**: `docs/03-frontend/consolas-web.md` + +#### 2.5 Deploy Frontend +**Objetivo**: Frontend en producción +**Tareas**: +- [ ] Build para producción +- [ ] Dockerfile con nginx +- [ ] Deploy en `app.fuq.tv` + +--- + +### FASE 3: Agentes Claude Code + +#### 3.1 Docker Image del Agente +**Objetivo**: Imagen base para agentes +**Tareas**: +- [ ] Dockerfile con Claude Code CLI +- [ ] Git config +- [ ] SSH keys setup +- [ ] Script de trabajo (agent-loop.sh) + +**Referencia**: `docs/05-agents/claude-code-pods.md` + +#### 3.2 Gestión de Agentes desde Backend +**Objetivo**: Crear/eliminar pods de agentes +**Tareas**: +- [ ] API endpoint `/agents` (create, delete, list) +- [ ] Auto-scaling basado en tareas pendientes +- [ ] Healthcheck de agentes + +**Referencia**: `docs/05-agents/ciclo-vida.md` + +#### 3.3 Comunicación MCP +**Objetivo**: Agentes conectados al backend +**Tareas**: +- [ ] MCP client en agentes +- [ ] Herramientas implementadas (get_next_task, etc.) +- [ ] Heartbeat system + +**Referencia**: `docs/05-agents/comunicacion.md` + +--- + +### FASE 4: GitOps y Deployments + +#### 4.1 ArgoCD Setup +**Objetivo**: GitOps funcional +**Tareas**: +- [ ] Conectar repos de Gitea a ArgoCD +- [ ] Crear Applications +- [ ] Auto-sync configurado + +**Referencia**: `docs/06-deployment/gitops.md` +**URL**: https://argocd.fuq.tv (admin/LyPF4Hy0wvp52IoU) + +#### 4.2 Preview Environments +**Objetivo**: Deploy automático por tarea +**Tareas**: +- [ ] Lógica para crear namespace temporal +- [ ] Deploy app en `task-{id}.r.fuq.tv` +- [ ] Cleanup automático (TTL) + +**Referencia**: `docs/06-deployment/preview-envs.md` + +#### 4.3 Staging y Production +**Objetivo**: Pipeline completo +**Tareas**: +- [ ] Merge a staging branch +- [ ] Deploy staging automático +- [ ] Aprobación manual para production +- [ ] Rollback capability + +**Referencia**: `docs/06-deployment/staging-production.md` + +--- + +## 📚 DOCUMENTACIÓN EXISTENTE + +### Arquitectura +- `docs/01-arquitectura/overview.md` - Visión general +- `docs/01-arquitectura/stack-tecnologico.md` - Stack completo +- `docs/01-arquitectura/flujo-de-datos.md` - Diagramas de flujo +- `docs/01-arquitectura/modelo-datos.md` - Database schema + +### Backend +- `docs/02-backend/estructura.md` - Estructura del proyecto +- `docs/02-backend/database-schema.md` - Drizzle schema +- `docs/02-backend/mcp-server.md` - MCP implementation +- `docs/02-backend/gitea-integration.md` - Gitea API client +- `docs/02-backend/queue-system.md` - BullMQ +- `docs/02-backend/api-endpoints.md` - REST API specs + +### Frontend +- `docs/03-frontend/estructura.md` - Estructura +- `docs/03-frontend/componentes.md` - Componentes principales +- `docs/03-frontend/estado.md` - React Query + Zustand +- `docs/03-frontend/kanban.md` - Kanban board +- `docs/03-frontend/consolas-web.md` - xterm.js + +### Kubernetes +- `docs/04-kubernetes/cluster-setup.md` - Setup inicial +- `docs/04-kubernetes/namespaces.md` - Estructura +- `docs/04-kubernetes/deployments.md` - Manifests +- `docs/04-kubernetes/gitea-deployment.md` - Gitea en K8s +- `docs/04-kubernetes/networking.md` - Ingress y red + +### Agentes +- `docs/05-agents/claude-code-pods.md` - Pods de agentes +- `docs/05-agents/mcp-tools.md` - Herramientas MCP +- `docs/05-agents/comunicacion.md` - MCP protocol +- `docs/05-agents/ciclo-vida.md` - Lifecycle + +### Deployment +- `docs/06-deployment/ci-cd.md` - Pipelines +- `docs/06-deployment/gitops.md` - ArgoCD +- `docs/06-deployment/preview-envs.md` - Previews +- `docs/06-deployment/staging-production.md` - Promoción + +### Cluster +- `CLUSTER-READY.md` - Estado del cluster +- `CLUSTER-CREDENTIALS.md` - Credenciales (⚠️ sensible) +- `AGENT-GUIDE.md` - Guía para agentes IA +- `docs/CONTAINER-REGISTRY.md` - Uso del registry +- `k8s-cluster-info.md` - Info técnica + +--- + +## 🔑 CREDENCIALES RÁPIDAS + +**Gitea**: +- URL: https://git.fuq.tv +- User: admin / admin123 +- Token: `159a5de2a16d15f33e388b55b1276e431dbca3f3` + +**Registry**: +- URL: git.fuq.tv +- Token: `7401126cfb56ab2aebba17755bdc968c20768c27` + +**ArgoCD**: +- URL: https://argocd.fuq.tv +- User: admin / LyPF4Hy0wvp52IoU + +**Longhorn**: +- URL: https://longhorn.fuq.tv +- User: admin / aiworker2026 + +**MariaDB** (interno): +- Host: mariadb.control-plane.svc.cluster.local:3306 +- DB: aiworker +- User: aiworker / AiWorker2026_UserPass! + +**Kubeconfig**: +```bash +export KUBECONFIG=~/.kube/aiworker-config +``` + +--- + +## ⚡ COMANDOS ÚTILES + +### Cluster +```bash +# Ver nodos +kubectl get nodes -o wide + +# Ver todos los pods +kubectl get pods -A + +# Namespaces +kubectl get ns +``` + +### Backend +```bash +cd backend +bun run dev # Desarrollo local +bun run db:generate # Generar migraciones +``` + +### Gitea +```bash +# Ver Actions +https://git.fuq.tv/admin/aiworker-backend/actions + +# Ver packages/imágenes +https://git.fuq.tv/admin/-/packages +``` + +### Logs +```bash +# MariaDB +kubectl logs -n control-plane mariadb-0 + +# Redis +kubectl logs -n control-plane deployment/redis + +# Gitea +kubectl logs -n gitea gitea-0 + +# Runner +kubectl logs -n gitea-actions deployment/gitea-runner -c runner +``` + +--- + +## 🎯 PRÓXIMA SESIÓN - Plan Sugerido + +### Opción A: Completar Backend (Recomendado) +1. Verificar CI/CD funcional +2. Implementar API routes básicas +3. Implementar MCP Server básico +4. Deploy backend en K8s +5. Probar end-to-end + +**Tiempo estimado**: 2-3 horas + +### Opción B: Frontend Paralelo +1. Inicializar React 19.2 + Vite +2. Setup TailwindCSS +3. Componentes básicos UI +4. Kanban board inicial + +**Tiempo estimado**: 2-3 horas + +### Opción C: Agentes Primero +1. Crear imagen Docker de agente +2. Deploy agente de prueba +3. Conectar con MCP +4. Primera tarea automática + +**Tiempo estimado**: 3-4 horas + +--- + +## 📊 PROGRESO GENERAL + +``` +Infraestructura: ████████████████████ 100% +Backend: ████░░░░░░░░░░░░░░░░ 20% +Frontend: ░░░░░░░░░░░░░░░░░░░░ 0% +Agentes: ░░░░░░░░░░░░░░░░░░░░ 0% +GitOps/Deploy: ██░░░░░░░░░░░░░░░░░░ 10% +────────────────────────────────────────── +Total: ██████░░░░░░░░░░░░░░ 26% +``` + +--- + +## 🚀 QUICK START para Próxima Sesión + +```bash +# 1. Verificar cluster +export KUBECONFIG=~/.kube/aiworker-config +kubectl get nodes + +# 2. Verificar servicios +kubectl get pods -n control-plane +kubectl get pods -n gitea +kubectl get pods -n gitea-actions + +# 3. Acceder a Gitea +open https://git.fuq.tv + +# 4. Continuar con backend +cd backend +bun run dev + +# 5. Ver Actions +open https://git.fuq.tv/admin/aiworker-backend/actions +``` + +--- + +## 🎓 APRENDIZAJES DE ESTA SESIÓN + +### Lo que funcionó bien ✅ +- CubeCLI para gestionar VPS +- K3s con instalación manual (control total) +- Longhorn para storage HA +- Gitea como plataforma todo-en-uno +- Bun.serve() nativo (más simple que Express) + +### Challenges superados 💪 +- Configurar red privada en K3s +- TLS automático con Cert-Manager +- Container Registry en Gitea +- Gitea Actions Runner con DinD +- Auto-migrations en la app + +### Tips para futuras sesiones 💡 +- Port-forward solo para testing, nunca para migrations +- Migrations deben ser automáticas en la app +- Usar TCP probes en vez de exec para MariaDB +- DinD necesita privileged + volumen compartido +- Gitea Actions compatible con GitHub Actions + +--- + +## 📞 REFERENCIAS EXTERNAS + +### Tecnologías +- **Bun**: https://bun.sh/docs +- **K3s**: https://docs.k3s.io +- **Drizzle ORM**: https://orm.drizzle.team/docs +- **Longhorn**: https://longhorn.io/docs/ +- **Gitea**: https://docs.gitea.com +- **Cert-Manager**: https://cert-manager.io/docs/ +- **Lucia Auth**: https://github.com/lucia-auth/lucia +- **Vercel Agent Skills**: https://github.com/vercel-labs/agent-skills + +### APIs +- **MCP Protocol**: `@modelcontextprotocol/sdk` +- **Kubernetes**: `@kubernetes/client-node` +- **Gitea API**: https://git.fuq.tv/api/swagger + +--- + +## 🎯 OBJETIVO FINAL + +Sistema completo de orquestación de agentes IA que automatiza: +1. Usuario crea tarea en Dashboard +2. Agente Claude Code toma tarea vía MCP +3. Agente trabaja: código, commits, PR +4. Deploy automático en preview environment +5. Usuario aprueba → Staging → Production + +**Todo automático, todo con HA, todo monitoreado.** + +--- + +**💪 ¡Hemos construido bases sólidas! El siguiente paso más lógico es completar el Backend para tener la API funcional.** diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md new file mode 100644 index 0000000..b6ba070 --- /dev/null +++ b/TROUBLESHOOTING.md @@ -0,0 +1,372 @@ +# 🔧 Troubleshooting Guide + +Guía rápida de solución de problemas comunes. + +--- + +## 🚨 PROBLEMAS COMUNES + +### 1. No puedo acceder al cluster + +**Síntomas**: `kubectl` no conecta + +**Solución**: +```bash +# Verificar kubeconfig +export KUBECONFIG=~/.kube/aiworker-config +kubectl cluster-info + +# Si falla, re-descargar +ssh root@108.165.47.233 "cat /etc/rancher/k3s/k3s.yaml" | \ + sed 's/127.0.0.1/108.165.47.233/g' > ~/.kube/aiworker-config +``` + +### 2. Pod en CrashLoopBackOff + +**Síntomas**: Pod se reinicia constantemente + +**Diagnóstico**: +```bash +# Ver logs +kubectl logs -n + +# Ver logs del contenedor anterior +kubectl logs -n --previous + +# Describir pod +kubectl describe pod -n +``` + +**Causas comunes**: +- Variable de entorno faltante +- Secret no existe +- No puede conectar a DB +- Puerto ya en uso + +### 3. Ingress no resuelve (502/503/504) + +**Síntomas**: URL da error de gateway + +**Diagnóstico**: +```bash +# Verificar Ingress +kubectl get ingress -n +kubectl describe ingress -n + +# Verificar Service +kubectl get svc -n +kubectl get endpoints -n + +# Logs de Nginx Ingress +kubectl logs -n ingress-nginx deployment/ingress-nginx-controller --tail=100 | grep +``` + +**Verificar**: +- Service selector correcto +- Pod está Running y Ready +- Puerto correcto en Service + +### 4. TLS Certificate no se emite + +**Síntomas**: Certificado en estado `False` + +**Diagnóstico**: +```bash +# Ver certificado +kubectl get certificate -n +kubectl describe certificate -n + +# Ver CertificateRequest +kubectl get certificaterequest -n + +# Ver Challenge (HTTP-01) +kubectl get challenge -n +kubectl describe challenge -n + +# Logs de cert-manager +kubectl logs -n cert-manager deployment/cert-manager --tail=50 +``` + +**Causas comunes**: +- DNS no apunta a los LBs +- Firewall bloquea puerto 80 +- Ingress no tiene annotation de cert-manager + +**Fix**: +```bash +# Verificar DNS +dig +short +# Debe mostrar: 108.165.47.221, 108.165.47.203 + +# Delete y recreate certificate +kubectl delete certificate -n +kubectl delete secret -n +# Recreate ingress +``` + +### 5. PVC en estado Pending + +**Síntomas**: PVC no se bindea + +**Diagnóstico**: +```bash +# Ver PVC +kubectl get pvc -n +kubectl describe pvc -n + +# Ver PVs disponibles +kubectl get pv + +# Ver Longhorn volumes +kubectl get volumes.longhorn.io -n longhorn-system +``` + +**Fix**: +```bash +# Ver Longhorn UI +open https://longhorn.fuq.tv + +# Logs de Longhorn +kubectl logs -n longhorn-system daemonset/longhorn-manager --tail=50 +``` + +### 6. Gitea Actions no ejecuta + +**Síntomas**: Workflow no se trigerea + +**Diagnóstico**: +```bash +# Ver runner +kubectl get pods -n gitea-actions +kubectl logs -n gitea-actions deployment/gitea-runner -c runner --tail=100 + +# Ver en Gitea UI +open https://git.fuq.tv/admin/aiworker-backend/actions +``` + +**Fix**: +```bash +# Restart runner +kubectl rollout restart deployment/gitea-runner -n gitea-actions + +# Verificar runner registrado +kubectl logs -n gitea-actions deployment/gitea-runner -c runner | grep "registered" + +# Push de nuevo para triggear +git commit --allow-empty -m "Trigger workflow" +git push +``` + +### 7. MariaDB no conecta + +**Síntomas**: `Connection refused` o `Access denied` + +**Diagnóstico**: +```bash +# Verificar pod +kubectl get pods -n control-plane mariadb-0 + +# Ver logs +kubectl logs -n control-plane mariadb-0 + +# Test de conexión +kubectl exec -n control-plane mariadb-0 -- \ + mariadb -uaiworker -pAiWorker2026_UserPass! -e "SELECT 1" +``` + +**Credenciales correctas**: +``` +Host: mariadb.control-plane.svc.cluster.local +Port: 3306 +User: aiworker +Pass: AiWorker2026_UserPass! +DB: aiworker +``` + +### 8. Load Balancer no responde + +**Síntomas**: `curl https://` timeout + +**Diagnóstico**: +```bash +# Verificar HAProxy +ssh root@108.165.47.221 "systemctl status haproxy" +ssh root@108.165.47.203 "systemctl status haproxy" + +# Ver stats +open http://108.165.47.221:8404/stats +# Usuario: admin / aiworker2026 + +# Test directo a worker +curl http://108.165.47.225:32388 # NodePort de Ingress +``` + +**Fix**: +```bash +# Restart HAProxy +ssh root@108.165.47.221 "systemctl restart haproxy" +ssh root@108.165.47.203 "systemctl restart haproxy" + +# Verificar config +ssh root@108.165.47.221 "cat /etc/haproxy/haproxy.cfg" +``` + +--- + +## 🔍 COMANDOS DE DIAGNÓSTICO GENERAL + +### Estado del Cluster +```bash +# Nodos +kubectl get nodes -o wide + +# Recursos +kubectl top nodes +kubectl top pods -A + +# Eventos recientes +kubectl get events -A --sort-by='.lastTimestamp' | tail -30 + +# Pods con problemas +kubectl get pods -A | grep -v Running +``` + +### Verificar Conectividad + +```bash +# Desde un pod a otro servicio +kubectl run -it --rm debug --image=busybox --restart=Never -- sh +# Dentro del pod: +wget -O- http://mariadb.control-plane.svc.cluster.local:3306 +``` + +### Limpiar Recursos + +```bash +# Pods completados/fallidos +kubectl delete pods --field-selector=status.phase=Failed -A +kubectl delete pods --field-selector=status.phase=Succeeded -A + +# Preview namespaces viejos +kubectl get ns -l environment=preview +kubectl delete ns +``` + +--- + +## 📞 CONTACTOS Y RECURSOS + +### Soporte +- **CubePath**: https://cubepath.com/support +- **K3s Issues**: https://github.com/k3s-io/k3s/issues +- **Gitea**: https://discourse.gitea.io + +### Logs Centrales +```bash +# Todos los errores recientes +kubectl get events -A --sort-by='.lastTimestamp' | grep -i error | tail -20 +``` + +### Backup Rápido +```bash +# Export toda la configuración +kubectl get all,ingress,certificate,pvc,secret -A -o yaml > cluster-backup.yaml + +# Backup MariaDB +kubectl exec -n control-plane mariadb-0 -- \ + mariadb-dump -uroot -pAiWorker2026_RootPass! --all-databases > backup-$(date +%Y%m%d).sql +``` + +--- + +## 🆘 EMERGENCY PROCEDURES + +### Cluster no responde +```bash +# SSH a control plane +ssh root@108.165.47.233 + +# Ver K3s +systemctl status k3s +journalctl -u k3s -n 100 + +# Restart K3s (último recurso) +systemctl restart k3s +``` + +### Nodo caído +```bash +# Cordon (evitar scheduling) +kubectl cordon + +# Drain (mover pods) +kubectl drain --ignore-daemonsets --delete-emptydir-data + +# Investigar en el nodo +ssh root@ +systemctl status k3s-agent +``` + +### Storage corruption +```bash +# Ver Longhorn UI +open https://longhorn.fuq.tv + +# Ver réplicas +kubectl get replicas.longhorn.io -n longhorn-system + +# Restore desde snapshot (si existe) +# Via Longhorn UI → Volume → Create from Snapshot +``` + +--- + +## 💡 TIPS + +### Desarrollo Rápido +```bash +# Auto-reload en backend +bun --watch src/index.ts + +# Ver logs en tiempo real +kubectl logs -f deployment/backend -n control-plane + +# Port-forward para testing +kubectl port-forward svc/backend 3000:3000 -n control-plane +``` + +### Debug de Networking +```bash +# Test desde fuera del cluster +curl -v https://api.fuq.tv + +# Test desde dentro del cluster +kubectl run curl --image=curlimages/curl -it --rm -- sh +curl http://backend.control-plane.svc.cluster.local:3000/api/health +``` + +### Performance +```bash +# Ver uso de recursos +kubectl top pods -n control-plane +kubectl top nodes + +# Ver pods que más consumen +kubectl top pods -A --sort-by=memory +kubectl top pods -A --sort-by=cpu +``` + +--- + +## 🔗 ENLACES RÁPIDOS + +- **Cluster Info**: `CLUSTER-READY.md` +- **Credenciales**: `CLUSTER-CREDENTIALS.md` +- **Roadmap**: `ROADMAP.md` +- **Próxima sesión**: `NEXT-SESSION.md` +- **Guía para agentes**: `AGENT-GUIDE.md` +- **Container Registry**: `docs/CONTAINER-REGISTRY.md` + +--- + +**Si nada de esto funciona, revisa los docs completos en `/docs` o contacta con el equipo.** diff --git a/backend b/backend new file mode 160000 index 0000000..ebf5d74 --- /dev/null +++ b/backend @@ -0,0 +1 @@ +Subproject commit ebf5d7493351382a2dd2f54664260cec378d4d80 diff --git a/docs/01-arquitectura/flujo-de-datos.md b/docs/01-arquitectura/flujo-de-datos.md new file mode 100644 index 0000000..f2ed197 --- /dev/null +++ b/docs/01-arquitectura/flujo-de-datos.md @@ -0,0 +1,316 @@ +# Flujo de Datos + +## Arquitectura de Comunicación + +``` +┌──────────┐ ┌──────────┐ ┌──────────┐ +│ Frontend │ ◄─────► │ Backend │ ◄─────► │ MySQL │ +└──────────┘ └──────────┘ └──────────┘ + │ │ │ + │ ├──────────────────────┤ + │ │ Redis │ + │ └──────────────────────┘ + │ │ + │ ┌─────┴─────┐ + │ │ │ + │ ┌────▼────┐ ┌───▼────┐ + │ │ Gitea │ │ K8s │ + │ └─────────┘ └───┬────┘ + │ │ + │ ┌────▼────────┐ + └────────────────────┤ Claude Code │ + WebSocket │ Agents │ + └─────────────┘ +``` + +## 1. Flujo Completo: Creación de Tarea + +### 1.1 Usuario Crea Tarea + +``` +Frontend Backend MySQL Redis + │ │ │ │ + │ POST /api/tasks │ │ │ + ├──────────────────────►│ │ │ + │ │ INSERT task │ │ + │ ├──────────────────►│ │ + │ │ │ │ + │ │ PUBLISH task.new │ │ + │ ├───────────────────┼────────────────►│ + │ │ │ │ + │ { taskId, status } │ │ │ + │◄──────────────────────┤ │ │ + │ │ │ │ + │ WS: task_created │ │ │ + │◄──────────────────────┤ │ │ +``` + +**Detalle**: +1. Frontend envía POST a `/api/tasks` con JSON: + ```json + { + "projectId": "uuid", + "title": "Implementar login", + "description": "Crear sistema de autenticación..." + } + ``` + +2. Backend: + - Valida datos + - Inserta en MySQL tabla `tasks` + - Publica evento en Redis: `task:new` + - Añade job a cola BullMQ: `task-queue` + - Responde con task creada + +3. WebSocket notifica a todos los clientes conectados + +### 1.2 Agente Toma Tarea + +``` +Agent (K8s) Backend (MCP) MySQL BullMQ + │ │ │ │ + │ MCP: get_next_task │ │ │ + ├──────────────────────►│ │ │ + │ │ SELECT task │ │ + │ ├──────────────────►│ │ + │ │ │ │ + │ │ UPDATE status │ │ + │ ├──────────────────►│ │ + │ │ │ │ + │ { task details } │ DEQUEUE job │ │ + │◄──────────────────────┤◄─────────────────┼─────────────────┤ + │ │ │ │ +``` + +**Detalle**: +1. Agente llama herramienta MCP `get_next_task` +2. Backend: + - Query: `SELECT * FROM tasks WHERE state='backlog' ORDER BY created_at LIMIT 1` + - Actualiza: `UPDATE tasks SET state='in_progress', assigned_agent_id=?` + - Elimina job de BullMQ +3. Responde con detalles completos de la tarea + +## 2. Flujo: Agente Pide Información + +``` +Agent Backend (MCP) MySQL Frontend (WS) + │ │ │ │ + │ ask_user_question │ │ │ + ├─────────────────────►│ │ │ + │ │ UPDATE task │ │ + │ ├──────────────────►│ │ + │ │ state=needs_input │ │ + │ │ │ │ + │ │ INSERT question │ │ + │ ├──────────────────►│ │ + │ │ │ │ + │ { success: true } │ WS: needs_input │ │ + │◄─────────────────────┤──────────────────┼───────────────────►│ + │ │ │ │ + │ │ │ [Usuario ve] │ + │ │ │ [notificación] │ + │ │ │ │ + │ │ POST /api/tasks/ │ │ + │ │ :id/respond │ │ + │ │◄──────────────────┼────────────────────┤ + │ │ │ │ + │ MCP: check_response │ UPDATE response │ │ + ├─────────────────────►├──────────────────►│ │ + │ │ state=in_progress │ │ + │ { response: "..." } │ │ │ + │◄─────────────────────┤ │ │ +``` + +**Detalle**: +1. Agente detecta necesita info (ej: "¿Qué librería usar para auth?") +2. Llama `ask_user_question(taskId, question)` +3. Backend: + - Actualiza `tasks.state = 'needs_input'` + - Inserta en tabla `task_questions` + - Emite WebSocket `task:needs_input` +4. Frontend muestra notificación y badge en kanban +5. Usuario responde vía UI +6. Backend guarda respuesta +7. Agente hace polling o recibe notificación vía MCP + +## 3. Flujo: Completar Tarea y Deploy Preview + +``` +Agent Backend(MCP) Gitea API MySQL K8s API Frontend + │ │ │ │ │ │ + │ create_branch │ │ │ │ │ + ├─────────────────►│ │ │ │ │ + │ │ POST /repos/│ │ │ │ + │ │ :owner/:repo│ │ │ │ + │ │ /branches │ │ │ │ + │ ├────────────►│ │ │ │ + │ { branch } │ │ │ │ │ + │◄─────────────────┤ │ │ │ │ + │ │ │ │ │ │ + │ [agent works] │ │ │ │ │ + │ [commits code] │ │ │ │ │ + │ │ │ │ │ │ + │ create_pr │ │ │ │ │ + ├─────────────────►│ │ │ │ │ + │ │ POST /pulls │ │ │ │ + │ ├────────────►│ │ │ │ + │ { pr_url } │ │ │ │ │ + │◄─────────────────┤ │ │ │ │ + │ │ │ │ │ │ + │ trigger_preview │ │ │ │ │ + ├─────────────────►│ │ │ │ │ + │ │ UPDATE task │ │ │ │ + │ ├────────────┼────────────►│ │ │ + │ │ │ │ │ │ + │ │ CREATE │ │ CREATE │ │ + │ │ namespace │ │ Deployment │ + │ ├────────────┼────────────┼──────────►│ │ + │ │ │ │ │ │ + │ { preview_url } │ │ WS:ready_to_test │ │ + │◄─────────────────┤─────────────┼───────────┼───────────┼───────────►│ +``` + +**Detalle**: +1. **create_branch**: Backend usa Gitea API para crear rama `task-{id}-feature` +2. **Agente trabaja**: Clone, cambios, commits, push +3. **create_pr**: Crea PR con descripción generada +4. **trigger_preview**: + - Backend crea namespace K8s: `preview-task-{id}` + - Aplica deployment con imagen del proyecto + - Configura ingress con URL: `task-{id}.preview.aiworker.dev` + - Actualiza `tasks.state = 'ready_to_test'` +5. Frontend muestra botón "Ver Preview" con URL + +## 4. Flujo: Merge a Staging + +``` +User (Frontend) Backend Gitea API K8s API ArgoCD + │ │ │ │ │ + │ POST /merge │ │ │ │ + │ taskIds[] │ │ │ │ + ├──────────────►│ │ │ │ + │ │ Validate │ │ │ + │ │ all approved │ │ │ + │ │ │ │ │ + │ │ POST /pulls │ │ │ + │ │ (merge PRs) │ │ │ + │ ├──────────────►│ │ │ + │ │ │ │ │ + │ │ POST /branches│ │ │ + │ │ staging │ │ │ + │ ├──────────────►│ │ │ + │ │ │ │ │ + │ │ Trigger │ Apply │ │ + │ │ ArgoCD sync │ manifests │ │ + │ ├───────────────┼──────────────┼────────────►│ + │ │ │ │ │ + │ { status } │ │ │ [Deploys] │ + │◄──────────────┤ │ │ │ +``` + +**Detalle**: +1. Usuario selecciona 2-3 tareas aprobadas +2. Click "Merge a Staging" +3. Backend: + - Valida todas están en estado `approved` + - Mergea cada PR a `staging` branch + - Actualiza estado a `staging` + - Triggerea ArgoCD sync +4. ArgoCD detecta cambios y deploya a namespace `staging` + +## 5. Comunicación Real-Time (WebSocket) + +### Eventos emitidos por Backend: + +```typescript +// Usuario se conecta +socket.on('connect', () => { + socket.emit('auth', { userId, token }) +}) + +// Backend emite eventos +socket.emit('task:created', { taskId, projectId }) +socket.emit('task:status_changed', { taskId, oldState, newState }) +socket.emit('task:needs_input', { taskId, question }) +socket.emit('task:ready_to_test', { taskId, previewUrl }) +socket.emit('agent:status', { agentId, status, currentTaskId }) +socket.emit('deploy:started', { environment, taskIds }) +socket.emit('deploy:completed', { environment, url }) +``` + +### Cliente subscribe: + +```typescript +socket.on('task:status_changed', (data) => { + // Actualiza UI del kanban + queryClient.invalidateQueries(['tasks']) +}) + +socket.on('task:needs_input', (data) => { + // Muestra notificación + toast.info('Un agente necesita tu ayuda') + // Mueve card a columna "Needs Input" +}) +``` + +## 6. Caching Strategy + +### Redis Cache Keys: + +``` +task:{id} → TTL 5min (task details) +task:list:{projectId} → TTL 2min (lista de tareas) +agent:{id}:status → TTL 30s (estado agente) +project:{id} → TTL 10min (config proyecto) +``` + +### Invalidación: + +```typescript +// Al actualizar tarea +await redis.del(`task:${taskId}`) +await redis.del(`task:list:${projectId}`) + +// Al cambiar estado agente +await redis.setex(`agent:${agentId}:status`, 30, status) +``` + +## 7. Queue System (BullMQ) + +### Colas: + +``` +task-queue → Tareas pendientes de asignar +deploy-queue → Deploys a ejecutar +merge-queue → Merges programados +cleanup-queue → Limpieza de preview envs antiguos +``` + +### Workers: + +```typescript +// task-worker.ts +taskQueue.process(async (job) => { + const { taskId } = job.data + // Notifica agentes disponibles vía MCP + await notifyAgents({ taskId }) +}) + +// deploy-worker.ts +deployQueue.process(async (job) => { + const { taskId, environment } = job.data + await k8sClient.createDeployment(...) +}) +``` + +## Resumen de Protocolos + +| Comunicación | Protocolo | Uso | +|--------------|-----------|-----| +| Frontend ↔ Backend | HTTP REST + WebSocket | CRUD + Real-time | +| Backend ↔ MySQL | TCP/MySQL Protocol | Persistencia | +| Backend ↔ Redis | RESP | Cache + PubSub | +| Backend ↔ Gitea | HTTP REST | Git operations | +| Backend ↔ K8s | HTTP + Kubernetes API | Orquestación | +| Backend ↔ Agents | MCP (stdio/HTTP) | Herramientas | +| Agents ↔ Gitea | Git Protocol (SSH) | Clone/Push | diff --git a/docs/01-arquitectura/modelo-datos.md b/docs/01-arquitectura/modelo-datos.md new file mode 100644 index 0000000..366e195 --- /dev/null +++ b/docs/01-arquitectura/modelo-datos.md @@ -0,0 +1,430 @@ +# Modelo de Datos (MySQL) + +## Diagrama ER + +``` +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ Projects │───────│ Tasks │───────│ Agents │ +└─────────────┘ 1:N └─────────────┘ N:1 └─────────────┘ + │ 1:N + │ + ┌────▼────────┐ + │ Questions │ + └─────────────┘ + +┌─────────────┐ ┌─────────────┐ +│ TaskGroups │───────│ Deploys │ +└─────────────┘ 1:N └─────────────┘ +``` + +## Schema SQL + +### Tabla: projects + +```sql +CREATE TABLE projects ( + id VARCHAR(36) PRIMARY KEY, + name VARCHAR(255) NOT NULL, + description TEXT, + + -- Gitea integration + gitea_repo_id INT, + gitea_repo_url VARCHAR(512), + gitea_owner VARCHAR(100), + gitea_repo_name VARCHAR(100), + default_branch VARCHAR(100) DEFAULT 'main', + + -- Kubernetes + k8s_namespace VARCHAR(63) NOT NULL UNIQUE, + + -- Infrastructure config (JSON) + docker_image VARCHAR(512), + env_vars JSON, + replicas INT DEFAULT 1, + cpu_limit VARCHAR(20) DEFAULT '500m', + memory_limit VARCHAR(20) DEFAULT '512Mi', + + -- MCP config (JSON) + mcp_tools JSON, + mcp_permissions JSON, + + -- Status + status ENUM('active', 'paused', 'archived') DEFAULT 'active', + + -- Timestamps + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + + INDEX idx_status (status), + INDEX idx_k8s_namespace (k8s_namespace) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +``` + +### Tabla: tasks + +```sql +CREATE TABLE tasks ( + id VARCHAR(36) PRIMARY KEY, + project_id VARCHAR(36) NOT NULL, + + -- Task info + title VARCHAR(255) NOT NULL, + description TEXT, + priority ENUM('low', 'medium', 'high', 'urgent') DEFAULT 'medium', + + -- State machine + state ENUM( + 'backlog', + 'in_progress', + 'needs_input', + 'ready_to_test', + 'approved', + 'staging', + 'production', + 'cancelled' + ) DEFAULT 'backlog', + + -- Assignment + assigned_agent_id VARCHAR(36), + assigned_at TIMESTAMP NULL, + + -- Git info + branch_name VARCHAR(255), + pr_number INT, + pr_url VARCHAR(512), + + -- Preview deployment + preview_namespace VARCHAR(63), + preview_url VARCHAR(512), + preview_deployed_at TIMESTAMP NULL, + + -- Metadata + estimated_complexity ENUM('trivial', 'simple', 'medium', 'complex') DEFAULT 'medium', + actual_duration_minutes INT, + + -- Timestamps + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + started_at TIMESTAMP NULL, + completed_at TIMESTAMP NULL, + deployed_staging_at TIMESTAMP NULL, + deployed_production_at TIMESTAMP NULL, + + FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE, + FOREIGN KEY (assigned_agent_id) REFERENCES agents(id) ON DELETE SET NULL, + + INDEX idx_project_state (project_id, state), + INDEX idx_state (state), + INDEX idx_assigned_agent (assigned_agent_id), + INDEX idx_created_at (created_at) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +``` + +### Tabla: task_questions + +```sql +CREATE TABLE task_questions ( + id VARCHAR(36) PRIMARY KEY, + task_id VARCHAR(36) NOT NULL, + + -- Question + question TEXT NOT NULL, + context TEXT, + asked_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + + -- Response + response TEXT, + responded_at TIMESTAMP NULL, + responded_by VARCHAR(36), + + -- Status + status ENUM('pending', 'answered', 'skipped') DEFAULT 'pending', + + FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE, + + INDEX idx_task_status (task_id, status), + INDEX idx_status (status) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +``` + +### Tabla: agents + +```sql +CREATE TABLE agents ( + id VARCHAR(36) PRIMARY KEY, + + -- K8s info + pod_name VARCHAR(253) NOT NULL UNIQUE, + k8s_namespace VARCHAR(63) DEFAULT 'agents', + node_name VARCHAR(253), + + -- Status + status ENUM('idle', 'busy', 'error', 'offline', 'initializing') DEFAULT 'initializing', + current_task_id VARCHAR(36), + + -- Capabilities + capabilities JSON, -- ['javascript', 'python', 'react', ...] + max_concurrent_tasks INT DEFAULT 1, + + -- Health + last_heartbeat TIMESTAMP NULL, + error_message TEXT, + restarts_count INT DEFAULT 0, + + -- Metrics + tasks_completed INT DEFAULT 0, + total_runtime_minutes INT DEFAULT 0, + + -- Timestamps + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + + FOREIGN KEY (current_task_id) REFERENCES tasks(id) ON DELETE SET NULL, + + INDEX idx_status (status), + INDEX idx_pod_name (pod_name), + INDEX idx_last_heartbeat (last_heartbeat) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +``` + +### Tabla: task_groups + +```sql +CREATE TABLE task_groups ( + id VARCHAR(36) PRIMARY KEY, + project_id VARCHAR(36) NOT NULL, + + -- Grouping + task_ids JSON NOT NULL, -- ['task-id-1', 'task-id-2', ...] + + -- Staging + staging_branch VARCHAR(255), + staging_pr_number INT, + staging_pr_url VARCHAR(512), + staging_deployed_at TIMESTAMP NULL, + + -- Production + production_deployed_at TIMESTAMP NULL, + production_rollback_available BOOLEAN DEFAULT TRUE, + + -- Status + status ENUM('pending', 'staging', 'production', 'rolled_back') DEFAULT 'pending', + + -- Metadata + created_by VARCHAR(36), + notes TEXT, + + -- Timestamps + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + + FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE, + + INDEX idx_project_status (project_id, status), + INDEX idx_status (status) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +``` + +### Tabla: deployments + +```sql +CREATE TABLE deployments ( + id VARCHAR(36) PRIMARY KEY, + project_id VARCHAR(36) NOT NULL, + task_group_id VARCHAR(36), + + -- Deployment info + environment ENUM('preview', 'staging', 'production') NOT NULL, + deployment_type ENUM('manual', 'automatic', 'rollback') DEFAULT 'manual', + + -- Git info + branch VARCHAR(255), + commit_hash VARCHAR(40), + + -- K8s info + k8s_namespace VARCHAR(63), + k8s_deployment_name VARCHAR(253), + image_tag VARCHAR(255), + + -- Status + status ENUM('pending', 'in_progress', 'completed', 'failed', 'rolled_back') DEFAULT 'pending', + + -- Results + url VARCHAR(512), + error_message TEXT, + logs TEXT, + + -- Timing + started_at TIMESTAMP NULL, + completed_at TIMESTAMP NULL, + duration_seconds INT, + + -- Metadata + triggered_by VARCHAR(36), + + -- Timestamps + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + + FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE, + FOREIGN KEY (task_group_id) REFERENCES task_groups(id) ON DELETE SET NULL, + + INDEX idx_project_env (project_id, environment), + INDEX idx_status (status), + INDEX idx_created_at (created_at) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +``` + +### Tabla: agent_logs + +```sql +CREATE TABLE agent_logs ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + agent_id VARCHAR(36) NOT NULL, + task_id VARCHAR(36), + + -- Log entry + level ENUM('debug', 'info', 'warn', 'error') DEFAULT 'info', + message TEXT NOT NULL, + metadata JSON, + + -- Timestamp + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + + FOREIGN KEY (agent_id) REFERENCES agents(id) ON DELETE CASCADE, + FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE SET NULL, + + INDEX idx_agent_created (agent_id, created_at), + INDEX idx_task_created (task_id, created_at), + INDEX idx_level (level) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +``` + +## Índices y Optimizaciones + +### Índices Compuestos Importantes + +```sql +-- Búsqueda de tareas por proyecto y estado +CREATE INDEX idx_tasks_project_state ON tasks(project_id, state, created_at); + +-- Búsqueda de agentes disponibles +CREATE INDEX idx_agents_available ON agents(status, last_heartbeat) + WHERE status = 'idle'; + +-- Logs recientes por agente +CREATE INDEX idx_agent_logs_recent ON agent_logs(agent_id, created_at DESC) + USING BTREE; +``` + +### Particionamiento (para logs) + +```sql +-- Particionar agent_logs por mes +ALTER TABLE agent_logs PARTITION BY RANGE (YEAR(created_at) * 100 + MONTH(created_at)) ( + PARTITION p202601 VALUES LESS THAN (202602), + PARTITION p202602 VALUES LESS THAN (202603), + PARTITION p202603 VALUES LESS THAN (202604), + -- ... auto-crear con script + PARTITION p_future VALUES LESS THAN MAXVALUE +); +``` + +## Queries Comunes + +### Obtener siguiente tarea disponible + +```sql +SELECT * FROM tasks +WHERE state = 'backlog' + AND project_id = ? +ORDER BY + priority DESC, + created_at ASC +LIMIT 1 +FOR UPDATE SKIP LOCKED; +``` + +### Agentes disponibles + +```sql +SELECT * FROM agents +WHERE status = 'idle' + AND last_heartbeat > DATE_SUB(NOW(), INTERVAL 60 SECOND) +ORDER BY tasks_completed ASC +LIMIT 1; +``` + +### Dashboard: Métricas de proyecto + +```sql +SELECT + COUNT(*) as total_tasks, + SUM(CASE WHEN state = 'backlog' THEN 1 ELSE 0 END) as backlog, + SUM(CASE WHEN state = 'in_progress' THEN 1 ELSE 0 END) as in_progress, + SUM(CASE WHEN state = 'needs_input' THEN 1 ELSE 0 END) as needs_input, + SUM(CASE WHEN state = 'ready_to_test' THEN 1 ELSE 0 END) as ready_to_test, + SUM(CASE WHEN state = 'production' THEN 1 ELSE 0 END) as completed, + AVG(actual_duration_minutes) as avg_duration +FROM tasks +WHERE project_id = ?; +``` + +### Historial de deployments + +```sql +SELECT + d.*, + tg.task_ids, + COUNT(t.id) as tasks_count +FROM deployments d +LEFT JOIN task_groups tg ON d.task_group_id = tg.id +LEFT JOIN tasks t ON JSON_CONTAINS(tg.task_ids, CONCAT('"', t.id, '"')) +WHERE d.project_id = ? + AND d.environment = 'production' +GROUP BY d.id +ORDER BY d.created_at DESC +LIMIT 20; +``` + +## Migraciones con Drizzle + +```typescript +// drizzle/schema.ts +import { mysqlTable, varchar, text, timestamp, json, int, mysqlEnum } from 'drizzle-orm/mysql-core' + +export const projects = mysqlTable('projects', { + id: varchar('id', { length: 36 }).primaryKey(), + name: varchar('name', { length: 255 }).notNull(), + description: text('description'), + giteaRepoId: int('gitea_repo_id'), + giteaRepoUrl: varchar('gitea_repo_url', { length: 512 }), + // ... resto campos + createdAt: timestamp('created_at').defaultNow(), + updatedAt: timestamp('updated_at').defaultNow().onUpdateNow(), +}) + +export const tasks = mysqlTable('tasks', { + id: varchar('id', { length: 36 }).primaryKey(), + projectId: varchar('project_id', { length: 36 }).notNull().references(() => projects.id), + title: varchar('title', { length: 255 }).notNull(), + state: mysqlEnum('state', [ + 'backlog', 'in_progress', 'needs_input', + 'ready_to_test', 'approved', 'staging', 'production', 'cancelled' + ]).default('backlog'), + // ... resto campos +}) +``` + +## Backup Strategy + +```bash +# Daily backup +mysqldump -u root -p aiworker \ + --single-transaction \ + --quick \ + --lock-tables=false \ + > backup-$(date +%Y%m%d).sql + +# Restore +mysql -u root -p aiworker < backup-20260119.sql +``` diff --git a/docs/01-arquitectura/overview.md b/docs/01-arquitectura/overview.md new file mode 100644 index 0000000..61f175b --- /dev/null +++ b/docs/01-arquitectura/overview.md @@ -0,0 +1,140 @@ +# Overview General - AiWorker + +## Concepto + +AiWorker es un sistema de orquestación de agentes IA que automatiza el ciclo completo de desarrollo de software mediante: + +1. **Dashboard Web**: Interfaz central para gestionar proyectos y tareas +2. **Consolas Web Persistentes**: Terminales web conectadas a pods de Claude Code en K8s +3. **Kanban Board Inteligente**: Gestión visual de tareas con estados automáticos +4. **Agentes Autónomos**: Claude Code trabajando en tareas asignadas +5. **Deployments Automatizados**: Preview, staging y producción orquestados + +## Arquitectura de Alto Nivel + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Dashboard Web │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ Kanban │ │ Consolas │ │ Project │ │ +│ │ Board │ │ Web │ │ Manager │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ +└────────────────────────┬────────────────────────────────────────┘ + │ HTTP/WebSocket +┌────────────────────────▼────────────────────────────────────────┐ +│ Backend (Bun + Express) │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ +│ │ API │ │ MCP │ │ Gitea │ │ K8s │ │ +│ │ Server │ │ Server │ │ Client │ │ Client │ │ +│ └──────────┘ └──────────┘ └──────────┘ └──────────┘ │ +└────────┬───────────────┬─────────────┬─────────────┬───────────┘ + │ │ │ │ + ┌────▼────┐ ┌───▼────┐ ┌───▼────┐ ┌────▼─────┐ + │ MySQL │ │ Redis │ │ Gitea │ │ K8s │ + └─────────┘ └────────┘ └────────┘ └──────────┘ + │ + ┌───────────────────────────────┘ + │ + ┌──────────▼──────────────────────────────────────┐ + │ Kubernetes Cluster │ + │ ┌──────────────┐ ┌─────────────────────────┐ │ + │ │ Agents │ │ Project Namespaces │ │ + │ │ Namespace │ │ ├── dev │ │ + │ │ │ │ ├── preview/ │ │ + │ │ Claude Code │ │ ├── staging │ │ + │ │ Pods │ │ └── production │ │ + │ └──────────────┘ └─────────────────────────┘ │ + └─────────────────────────────────────────────────┘ +``` + +## Componentes Principales + +### 1. Dashboard Web (Frontend) +- **Tecnología**: React 19.2 + TailwindCSS + Vite +- **Funciones**: + - Kanban board para gestión de tareas + - Consolas web interactivas (xterm.js) + - Gestión de proyectos + - Monitoring en tiempo real + +### 2. Backend API +- **Tecnología**: Bun 1.3.6 + Express + TypeScript +- **Funciones**: + - API REST para frontend + - MCP Server para agentes + - Orquestación de tareas + - Integración con Gitea y K8s + +### 3. Base de Datos +- **MySQL 8.0**: Almacenamiento persistente +- **Redis**: Colas, cache, pub/sub + +### 4. Gitea +- **Servidor Git auto-alojado** +- **API compatible con GitHub** +- **Gestión de repos, branches, PRs** + +### 5. Kubernetes Cluster +- **Orquestación de contenedores** +- **Namespaces por proyecto y entorno** +- **Auto-scaling de agentes** + +### 6. Claude Code Agents +- **Pods persistentes en K8s** +- **Conectados vía MCP Server** +- **Workspace aislado por agente** + +## Estados de Tareas + +``` +Backlog → En Progreso → Necesita Respuestas + ↓ + Usuario responde + ↓ + ┌───────────────┘ + ↓ + Listo para Probar + ↓ + (Preview deploy) + ↓ + Aprobado + ↓ + Staging (merge grupal) + ↓ + Producción +``` + +## Flujo de Trabajo Típico + +1. **Usuario crea proyecto** → Sistema crea repo en Gitea + namespace en K8s +2. **Usuario crea tareas** → Se añaden al backlog del kanban +3. **Agente disponible** → Toma siguiente tarea vía MCP +4. **Agente trabaja** → Clone, branch, código, commits +5. **¿Necesita info?** → Cambia estado a "Necesita Respuestas" +6. **Completa tarea** → Push + PR + deploy preview +7. **Usuario prueba** → En ambiente preview aislado +8. **Aprueba** → Marca para staging +9. **Merge grupal** → Agrega 2-3 tareas + merge a staging +10. **Deploy staging** → Tests automáticos +11. **Deploy producción** → Aprobación final + +## Ventajas del Sistema + +✅ **Automatización completa**: Desde tarea hasta producción +✅ **Aislamiento**: Cada tarea en su propio preview environment +✅ **Trazabilidad**: Todo cambio vinculado a tarea y PR +✅ **Escalabilidad**: Agentes auto-escalables en K8s +✅ **Flexibilidad**: Agentes pueden pedir ayuda al usuario +✅ **Control**: Usuario aprueba cada fase importante + +## Seguridad + +- Namespaces aislados en K8s +- RBAC por agente +- Secrets management +- Network policies +- Auditoría de acciones + +## Próximos Pasos + +Ver documentación específica de cada componente en las secciones correspondientes. diff --git a/docs/01-arquitectura/stack-tecnologico.md b/docs/01-arquitectura/stack-tecnologico.md new file mode 100644 index 0000000..086e92e --- /dev/null +++ b/docs/01-arquitectura/stack-tecnologico.md @@ -0,0 +1,208 @@ +# Stack Tecnológico + +## Frontend + +### Core +- **React 19.2**: Framework UI principal +- **Vite**: Build tool y dev server +- **TypeScript**: Type safety +- **TailwindCSS 4.x**: Styling utility-first + +### Librerías UI +- **@dnd-kit/core**: Drag and drop para kanban +- **xterm.js**: Emulador de terminal web +- **lucide-react**: Iconos modernos +- **react-hot-toast**: Notificaciones +- **recharts**: Gráficas y métricas + +### Estado y Data Fetching +- **@tanstack/react-query**: Server state management +- **zustand**: Client state management (ligero y simple) +- **socket.io-client**: WebSocket para real-time + +### Routing +- **react-router-dom**: Navegación SPA + +## Backend + +### Core +- **Bun 1.3.6**: Runtime JavaScript ultra-rápido +- **Express**: Framework HTTP +- **TypeScript**: Type safety + +### Database +- **MySQL 8.0**: Base de datos relacional principal +- **mysql2**: Driver MySQL para Node.js +- **Drizzle ORM**: ORM TypeScript-first moderno + - Type-safe + - Ligero + - Excelente DX con Bun + +### Cache y Colas +- **Redis 7.x**: Cache y message broker +- **BullMQ**: Sistema de colas robusto +- **ioredis**: Cliente Redis + +### Comunicación con Agentes +- **@modelcontextprotocol/sdk**: SDK oficial MCP +- **socket.io**: WebSocket server + +### Integraciones +- **@kubernetes/client-node**: Cliente oficial K8s +- **octokit** (adaptado): Cliente API Gitea +- **axios**: HTTP client + +### Desarrollo +- **tsx**: TypeScript execution +- **nodemon**: Hot reload +- **prettier**: Code formatting +- **eslint**: Linting + +## Infrastructure + +### Containerización +- **Docker 24.x**: Containerización +- **Docker Compose**: Orquestación local + +### Orchestration +- **Kubernetes 1.28+**: Orquestación de contenedores + - **kubectl**: CLI + - **helm**: Package manager + - **kustomize**: Configuration management + +### Git Server +- **Gitea latest**: Servidor Git auto-alojado + - Ligero (~100MB) + - API REST compatible GitHub + - Webhooks nativos + +### CI/CD y GitOps +- **ArgoCD**: GitOps continuous delivery +- **GitHub Actions** (o Gitea Actions): CI pipelines + +### Monitoring y Logging +- **Prometheus**: Métricas +- **Grafana**: Visualización +- **Loki**: Logs aggregation +- **Jaeger**: Distributed tracing (opcional) + +### Networking +- **Nginx Ingress Controller**: Routing +- **cert-manager**: TLS certificates + +## Agentes + +### Claude Code +- **Claude Code CLI**: Herramienta oficial de Anthropic +- **Model**: Claude Sonnet 4.5 +- **MCP Tools**: Comunicación con backend + +## Development Tools + +### Package Management +- **bun**: Package manager principal +- **npm**: Fallback para compatibilidad + +### Testing +- **Vitest**: Unit testing (compatible con Bun) +- **@testing-library/react**: React testing +- **Playwright**: E2E testing + +### Code Quality +- **TypeScript 5.x**: Type checking +- **ESLint**: Linting +- **Prettier**: Formatting +- **husky**: Git hooks + +## Versiones Específicas + +```json +{ + "frontend": { + "react": "19.2.0", + "vite": "^6.0.0", + "typescript": "^5.6.0", + "tailwindcss": "^4.0.0" + }, + "backend": { + "bun": "1.3.6", + "express": "^4.19.0", + "mysql2": "^3.11.0", + "drizzle-orm": "^0.36.0", + "bullmq": "^5.23.0", + "@modelcontextprotocol/sdk": "^1.0.0" + }, + "infrastructure": { + "kubernetes": "1.28+", + "docker": "24.0+", + "gitea": "1.22+", + "redis": "7.2+", + "mysql": "8.0+" + } +} +``` + +## Justificación de Tecnologías + +### ¿Por qué Bun? +- **Velocidad**: 3-4x más rápido que Node.js +- **TypeScript nativo**: Sin configuración adicional +- **APIs modernas**: Compatibilidad Web Standard +- **Tooling integrado**: Bundler, test runner, package manager + +### ¿Por qué MySQL? +- **Madurez**: Batalla-probado en producción +- **Rendimiento**: Excelente para lecturas/escrituras +- **Transacciones**: ACID compliance +- **Ecosistema**: Herramientas maduras (backup, replicación) + +### ¿Por qué Drizzle ORM? +- **Type-safety**: Inferencia total de tipos +- **Performance**: Query builder sin overhead +- **DX**: Migraciones automáticas +- **Bun compatible**: Primera clase + +### ¿Por qué Gitea? +- **Ligero**: Binario único, bajo consumo +- **Auto-alojado**: Control total +- **API familiar**: Compatible con GitHub +- **Simple**: Instalación en minutos + +### ¿Por qué React 19.2 sin Next.js? +- **Simplicidad**: SPA sin server-side complexity +- **Control total**: Sin abstracciones extra +- **Rendimiento**: Nuevo compilador React +- **Features**: Transitions, Server Actions cliente-side + +## Alternativas Consideradas + +| Necesidad | Elegido | Alternativas | Razón | +|-----------|---------|--------------|-------| +| Runtime | Bun | Node, Deno | Velocidad + DX | +| DB | MySQL | PostgreSQL, MongoDB | Familiaridad + Madurez | +| ORM | Drizzle | Prisma, TypeORM | Type-safety + Performance | +| Git | Gitea | GitLab, Gogs | Simplicidad + Features | +| Frontend | React | Vue, Svelte | Ecosistema + React 19 | +| Orchestration | K8s | Docker Swarm, Nomad | Industry standard | + +## Dependencias Críticas + +```bash +# Backend +bun add express mysql2 drizzle-orm ioredis bullmq +bun add @modelcontextprotocol/sdk socket.io +bun add @kubernetes/client-node axios + +# Frontend +bun add react@19.2.0 react-dom@19.2.0 +bun add @tanstack/react-query zustand +bun add socket.io-client xterm +bun add @dnd-kit/core react-router-dom +``` + +## Roadmap Tecnológico + +**Fase 1 (MVP)**: Stack actual +**Fase 2**: Añadir Prometheus + Grafana +**Fase 3**: Implementar tracing con Jaeger +**Fase 4**: Multi-tenancy y sharding de DB diff --git a/docs/02-backend/api-endpoints.md b/docs/02-backend/api-endpoints.md new file mode 100644 index 0000000..8287a6e --- /dev/null +++ b/docs/02-backend/api-endpoints.md @@ -0,0 +1,484 @@ +# API Endpoints + +## Base URL + +``` +http://localhost:3000/api +``` + +## Authentication + +Todos los endpoints (excepto `/health`) requieren autenticación JWT: + +``` +Authorization: Bearer +``` + +--- + +## Projects + +### GET /projects + +Lista todos los proyectos. + +**Response**: +```json +{ + "projects": [ + { + "id": "uuid", + "name": "My Project", + "description": "Project description", + "giteaRepoUrl": "http://gitea/owner/repo", + "k8sNamespace": "my-project", + "status": "active", + "createdAt": "2026-01-19T10:00:00Z" + } + ] +} +``` + +### GET /projects/:id + +Obtiene detalles de un proyecto. + +### POST /projects + +Crea un nuevo proyecto. + +**Body**: +```json +{ + "name": "My New Project", + "description": "Project description", + "dockerImage": "node:20-alpine", + "envVars": { + "NODE_ENV": "production" + }, + "replicas": 2, + "cpuLimit": "1000m", + "memoryLimit": "1Gi" +} +``` + +**Response**: +```json +{ + "project": { + "id": "uuid", + "name": "My New Project", + "giteaRepoUrl": "http://gitea/owner/my-new-project", + "k8sNamespace": "my-new-project-abc123" + } +} +``` + +### PATCH /projects/:id + +Actualiza un proyecto. + +### DELETE /projects/:id + +Elimina un proyecto y todos sus recursos. + +--- + +## Tasks + +### GET /tasks + +Lista tareas con filtros opcionales. + +**Query params**: +- `projectId`: Filtrar por proyecto +- `state`: Filtrar por estado (`backlog`, `in_progress`, etc.) +- `assignedAgentId`: Filtrar por agente +- `limit`: Límite de resultados (default: 50) +- `offset`: Offset para paginación + +**Response**: +```json +{ + "tasks": [ + { + "id": "uuid", + "projectId": "uuid", + "title": "Implement login", + "description": "Create authentication system", + "state": "in_progress", + "priority": "high", + "assignedAgentId": "agent-123", + "branchName": "task-abc-implement-login", + "prNumber": 42, + "prUrl": "http://gitea/owner/repo/pulls/42", + "previewUrl": "https://task-abc.preview.aiworker.dev", + "createdAt": "2026-01-19T10:00:00Z" + } + ], + "total": 10, + "limit": 50, + "offset": 0 +} +``` + +### GET /tasks/:id + +Obtiene detalles completos de una tarea incluyendo preguntas. + +**Response**: +```json +{ + "task": { + "id": "uuid", + "title": "Implement login", + "state": "needs_input", + "questions": [ + { + "id": "q-uuid", + "question": "Which auth library should I use?", + "context": "Need to choose between JWT or session-based", + "askedAt": "2026-01-19T11:00:00Z", + "status": "pending" + } + ], + "project": { + "name": "My Project", + "giteaRepoUrl": "..." + } + } +} +``` + +### POST /tasks + +Crea una nueva tarea. + +**Body**: +```json +{ + "projectId": "uuid", + "title": "Implement feature X", + "description": "Detailed description...", + "priority": "high" +} +``` + +### PATCH /tasks/:id + +Actualiza una tarea. + +**Body**: +```json +{ + "state": "approved", + "notes": "Looks good!" +} +``` + +### POST /tasks/:id/respond + +Responde a una pregunta del agente. + +**Body**: +```json +{ + "questionId": "q-uuid", + "response": "Use JWT with jsonwebtoken library" +} +``` + +**Response**: +```json +{ + "success": true, + "question": { + "id": "q-uuid", + "status": "answered", + "respondedAt": "2026-01-19T11:05:00Z" + } +} +``` + +### POST /tasks/:id/approve + +Aprueba una tarea en estado `ready_to_test`. + +### POST /tasks/:id/reject + +Rechaza una tarea y la regresa a `in_progress`. + +**Body**: +```json +{ + "reason": "Needs more tests" +} +``` + +--- + +## Task Groups (Merges) + +### POST /task-groups + +Crea un grupo de tareas para merge a staging/production. + +**Body**: +```json +{ + "projectId": "uuid", + "taskIds": ["task-1", "task-2", "task-3"], + "targetBranch": "staging", + "notes": "Sprint 1 features" +} +``` + +**Response**: +```json +{ + "taskGroup": { + "id": "uuid", + "taskIds": ["task-1", "task-2", "task-3"], + "status": "pending", + "stagingBranch": "release/sprint-1" + } +} +``` + +### GET /task-groups/:id + +Obtiene detalles de un task group. + +### POST /task-groups/:id/deploy-staging + +Despliega el task group a staging. + +### POST /task-groups/:id/deploy-production + +Despliega el task group a production. + +--- + +## Agents + +### GET /agents + +Lista todos los agentes. + +**Response**: +```json +{ + "agents": [ + { + "id": "agent-123", + "podName": "claude-agent-abc123", + "status": "busy", + "currentTaskId": "task-uuid", + "capabilities": ["javascript", "react", "node"], + "tasksCompleted": 42, + "lastHeartbeat": "2026-01-19T12:00:00Z" + } + ] +} +``` + +### GET /agents/:id + +Obtiene detalles de un agente incluyendo logs recientes. + +### GET /agents/:id/logs + +Obtiene logs del agente. + +**Query params**: +- `limit`: Número de logs (default: 100) +- `level`: Filtrar por nivel (`debug`, `info`, `warn`, `error`) + +--- + +## Deployments + +### GET /deployments + +Lista deployments con filtros. + +**Query params**: +- `projectId`: Filtrar por proyecto +- `environment`: Filtrar por entorno +- `status`: Filtrar por estado + +### GET /deployments/:id + +Obtiene detalles de un deployment. + +### POST /deployments/:id/rollback + +Hace rollback de un deployment. + +**Response**: +```json +{ + "success": true, + "rollbackDeploymentId": "new-uuid" +} +``` + +--- + +## Health & Status + +### GET /health + +Health check del backend. + +**Response**: +```json +{ + "status": "ok", + "timestamp": "2026-01-19T12:00:00Z", + "services": { + "mysql": "connected", + "redis": "connected", + "gitea": "reachable", + "kubernetes": "connected" + }, + "version": "1.0.0" +} +``` + +### GET /metrics + +Métricas del sistema (Prometheus format). + +--- + +## WebSocket Events + +Conectar a: `ws://localhost:3000` + +### Client → Server + +```json +{ + "event": "auth", + "data": { + "token": "jwt-token" + } +} +``` + +```json +{ + "event": "subscribe", + "data": { + "projectId": "uuid" + } +} +``` + +### Server → Client + +```json +{ + "event": "task:created", + "data": { + "taskId": "uuid", + "projectId": "uuid", + "title": "New task" + } +} +``` + +```json +{ + "event": "task:status_changed", + "data": { + "taskId": "uuid", + "oldState": "in_progress", + "newState": "ready_to_test", + "previewUrl": "https://..." + } +} +``` + +```json +{ + "event": "task:needs_input", + "data": { + "taskId": "uuid", + "questionId": "q-uuid", + "question": "Which library?" + } +} +``` + +```json +{ + "event": "agent:status", + "data": { + "agentId": "agent-123", + "status": "idle", + "lastTaskId": "task-uuid" + } +} +``` + +```json +{ + "event": "deploy:started", + "data": { + "deploymentId": "uuid", + "environment": "staging" + } +} +``` + +```json +{ + "event": "deploy:completed", + "data": { + "deploymentId": "uuid", + "environment": "staging", + "url": "https://staging-project.aiworker.dev" + } +} +``` + +--- + +## Error Responses + +Todos los endpoints pueden retornar estos errores: + +### 400 Bad Request +```json +{ + "error": "Validation error", + "details": { + "field": "projectId", + "message": "Required" + } +} +``` + +### 401 Unauthorized +```json +{ + "error": "Invalid or expired token" +} +``` + +### 404 Not Found +```json +{ + "error": "Resource not found" +} +``` + +### 500 Internal Server Error +```json +{ + "error": "Internal server error", + "requestId": "req-uuid" +} +``` diff --git a/docs/02-backend/database-schema.md b/docs/02-backend/database-schema.md new file mode 100644 index 0000000..1dd54cb --- /dev/null +++ b/docs/02-backend/database-schema.md @@ -0,0 +1,462 @@ +# Database Schema con Drizzle ORM + +## Schema Definitions + +```typescript +// db/schema.ts +import { relations } from 'drizzle-orm' +import { + mysqlTable, + varchar, + text, + timestamp, + json, + int, + mysqlEnum, + boolean, + bigint, + index, + unique, +} from 'drizzle-orm/mysql-core' + +// ============================================ +// PROJECTS TABLE +// ============================================ + +export const projects = mysqlTable('projects', { + id: varchar('id', { length: 36 }).primaryKey(), + name: varchar('name', { length: 255 }).notNull(), + description: text('description'), + + // Gitea + giteaRepoId: int('gitea_repo_id'), + giteaRepoUrl: varchar('gitea_repo_url', { length: 512 }), + giteaOwner: varchar('gitea_owner', { length: 100 }), + giteaRepoName: varchar('gitea_repo_name', { length: 100 }), + defaultBranch: varchar('default_branch', { length: 100 }).default('main'), + + // K8s + k8sNamespace: varchar('k8s_namespace', { length: 63 }).notNull().unique(), + + // Infrastructure + dockerImage: varchar('docker_image', { length: 512 }), + envVars: json('env_vars').$type>(), + replicas: int('replicas').default(1), + cpuLimit: varchar('cpu_limit', { length: 20 }).default('500m'), + memoryLimit: varchar('memory_limit', { length: 20 }).default('512Mi'), + + // MCP + mcpTools: json('mcp_tools').$type(), + mcpPermissions: json('mcp_permissions').$type>(), + + // Status + status: mysqlEnum('status', ['active', 'paused', 'archived']).default('active'), + + // Timestamps + createdAt: timestamp('created_at').defaultNow(), + updatedAt: timestamp('updated_at').defaultNow().onUpdateNow(), +}, (table) => ({ + statusIdx: index('idx_status').on(table.status), + k8sNamespaceIdx: index('idx_k8s_namespace').on(table.k8sNamespace), +})) + +// ============================================ +// AGENTS TABLE +// ============================================ + +export const agents = mysqlTable('agents', { + id: varchar('id', { length: 36 }).primaryKey(), + + // K8s + podName: varchar('pod_name', { length: 253 }).notNull().unique(), + k8sNamespace: varchar('k8s_namespace', { length: 63 }).default('agents'), + nodeName: varchar('node_name', { length: 253 }), + + // Status + status: mysqlEnum('status', ['idle', 'busy', 'error', 'offline', 'initializing']).default('initializing'), + currentTaskId: varchar('current_task_id', { length: 36 }), + + // Capabilities + capabilities: json('capabilities').$type(), + maxConcurrentTasks: int('max_concurrent_tasks').default(1), + + // Health + lastHeartbeat: timestamp('last_heartbeat'), + errorMessage: text('error_message'), + restartsCount: int('restarts_count').default(0), + + // Metrics + tasksCompleted: int('tasks_completed').default(0), + totalRuntimeMinutes: int('total_runtime_minutes').default(0), + + // Timestamps + createdAt: timestamp('created_at').defaultNow(), + updatedAt: timestamp('updated_at').defaultNow().onUpdateNow(), +}, (table) => ({ + statusIdx: index('idx_status').on(table.status), + podNameIdx: index('idx_pod_name').on(table.podName), + lastHeartbeatIdx: index('idx_last_heartbeat').on(table.lastHeartbeat), +})) + +// ============================================ +// TASKS TABLE +// ============================================ + +export const tasks = mysqlTable('tasks', { + id: varchar('id', { length: 36 }).primaryKey(), + projectId: varchar('project_id', { length: 36 }).notNull().references(() => projects.id, { onDelete: 'cascade' }), + + // Task info + title: varchar('title', { length: 255 }).notNull(), + description: text('description'), + priority: mysqlEnum('priority', ['low', 'medium', 'high', 'urgent']).default('medium'), + + // State + state: mysqlEnum('state', [ + 'backlog', + 'in_progress', + 'needs_input', + 'ready_to_test', + 'approved', + 'staging', + 'production', + 'cancelled' + ]).default('backlog'), + + // Assignment + assignedAgentId: varchar('assigned_agent_id', { length: 36 }).references(() => agents.id, { onDelete: 'set null' }), + assignedAt: timestamp('assigned_at'), + + // Git + branchName: varchar('branch_name', { length: 255 }), + prNumber: int('pr_number'), + prUrl: varchar('pr_url', { length: 512 }), + + // Preview + previewNamespace: varchar('preview_namespace', { length: 63 }), + previewUrl: varchar('preview_url', { length: 512 }), + previewDeployedAt: timestamp('preview_deployed_at'), + + // Metadata + estimatedComplexity: mysqlEnum('estimated_complexity', ['trivial', 'simple', 'medium', 'complex']).default('medium'), + actualDurationMinutes: int('actual_duration_minutes'), + + // Timestamps + createdAt: timestamp('created_at').defaultNow(), + updatedAt: timestamp('updated_at').defaultNow().onUpdateNow(), + startedAt: timestamp('started_at'), + completedAt: timestamp('completed_at'), + deployedStagingAt: timestamp('deployed_staging_at'), + deployedProductionAt: timestamp('deployed_production_at'), +}, (table) => ({ + projectStateIdx: index('idx_project_state').on(table.projectId, table.state, table.createdAt), + stateIdx: index('idx_state').on(table.state), + assignedAgentIdx: index('idx_assigned_agent').on(table.assignedAgentId), + createdAtIdx: index('idx_created_at').on(table.createdAt), +})) + +// ============================================ +// TASK QUESTIONS TABLE +// ============================================ + +export const taskQuestions = mysqlTable('task_questions', { + id: varchar('id', { length: 36 }).primaryKey(), + taskId: varchar('task_id', { length: 36 }).notNull().references(() => tasks.id, { onDelete: 'cascade' }), + + // Question + question: text('question').notNull(), + context: text('context'), + askedAt: timestamp('asked_at').defaultNow(), + + // Response + response: text('response'), + respondedAt: timestamp('responded_at'), + respondedBy: varchar('responded_by', { length: 36 }), + + // Status + status: mysqlEnum('status', ['pending', 'answered', 'skipped']).default('pending'), +}, (table) => ({ + taskStatusIdx: index('idx_task_status').on(table.taskId, table.status), + statusIdx: index('idx_status').on(table.status), +})) + +// ============================================ +// TASK GROUPS TABLE +// ============================================ + +export const taskGroups = mysqlTable('task_groups', { + id: varchar('id', { length: 36 }).primaryKey(), + projectId: varchar('project_id', { length: 36 }).notNull().references(() => projects.id, { onDelete: 'cascade' }), + + // Grouping + taskIds: json('task_ids').$type().notNull(), + + // Staging + stagingBranch: varchar('staging_branch', { length: 255 }), + stagingPrNumber: int('staging_pr_number'), + stagingPrUrl: varchar('staging_pr_url', { length: 512 }), + stagingDeployedAt: timestamp('staging_deployed_at'), + + // Production + productionDeployedAt: timestamp('production_deployed_at'), + productionRollbackAvailable: boolean('production_rollback_available').default(true), + + // Status + status: mysqlEnum('status', ['pending', 'staging', 'production', 'rolled_back']).default('pending'), + + // Metadata + createdBy: varchar('created_by', { length: 36 }), + notes: text('notes'), + + // Timestamps + createdAt: timestamp('created_at').defaultNow(), + updatedAt: timestamp('updated_at').defaultNow().onUpdateNow(), +}, (table) => ({ + projectStatusIdx: index('idx_project_status').on(table.projectId, table.status), + statusIdx: index('idx_status').on(table.status), +})) + +// ============================================ +// DEPLOYMENTS TABLE +// ============================================ + +export const deployments = mysqlTable('deployments', { + id: varchar('id', { length: 36 }).primaryKey(), + projectId: varchar('project_id', { length: 36 }).notNull().references(() => projects.id, { onDelete: 'cascade' }), + taskGroupId: varchar('task_group_id', { length: 36 }).references(() => taskGroups.id, { onDelete: 'set null' }), + + // Deployment info + environment: mysqlEnum('environment', ['preview', 'staging', 'production']).notNull(), + deploymentType: mysqlEnum('deployment_type', ['manual', 'automatic', 'rollback']).default('manual'), + + // Git + branch: varchar('branch', { length: 255 }), + commitHash: varchar('commit_hash', { length: 40 }), + + // K8s + k8sNamespace: varchar('k8s_namespace', { length: 63 }), + k8sDeploymentName: varchar('k8s_deployment_name', { length: 253 }), + imageTag: varchar('image_tag', { length: 255 }), + + // Status + status: mysqlEnum('status', ['pending', 'in_progress', 'completed', 'failed', 'rolled_back']).default('pending'), + + // Results + url: varchar('url', { length: 512 }), + errorMessage: text('error_message'), + logs: text('logs'), + + // Timing + startedAt: timestamp('started_at'), + completedAt: timestamp('completed_at'), + durationSeconds: int('duration_seconds'), + + // Metadata + triggeredBy: varchar('triggered_by', { length: 36 }), + + // Timestamps + createdAt: timestamp('created_at').defaultNow(), +}, (table) => ({ + projectEnvIdx: index('idx_project_env').on(table.projectId, table.environment), + statusIdx: index('idx_status').on(table.status), + createdAtIdx: index('idx_created_at').on(table.createdAt), +})) + +// ============================================ +// AGENT LOGS TABLE +// ============================================ + +export const agentLogs = mysqlTable('agent_logs', { + id: bigint('id', { mode: 'number' }).autoincrement().primaryKey(), + agentId: varchar('agent_id', { length: 36 }).notNull().references(() => agents.id, { onDelete: 'cascade' }), + taskId: varchar('task_id', { length: 36 }).references(() => tasks.id, { onDelete: 'set null' }), + + // Log entry + level: mysqlEnum('level', ['debug', 'info', 'warn', 'error']).default('info'), + message: text('message').notNull(), + metadata: json('metadata').$type>(), + + // Timestamp + createdAt: timestamp('created_at').defaultNow(), +}, (table) => ({ + agentCreatedIdx: index('idx_agent_created').on(table.agentId, table.createdAt), + taskCreatedIdx: index('idx_task_created').on(table.taskId, table.createdAt), + levelIdx: index('idx_level').on(table.level), +})) + +// ============================================ +// RELATIONS +// ============================================ + +export const projectsRelations = relations(projects, ({ many }) => ({ + tasks: many(tasks), + taskGroups: many(taskGroups), + deployments: many(deployments), +})) + +export const tasksRelations = relations(tasks, ({ one, many }) => ({ + project: one(projects, { + fields: [tasks.projectId], + references: [projects.id], + }), + assignedAgent: one(agents, { + fields: [tasks.assignedAgentId], + references: [agents.id], + }), + questions: many(taskQuestions), +})) + +export const agentsRelations = relations(agents, ({ one, many }) => ({ + currentTask: one(tasks, { + fields: [agents.currentTaskId], + references: [tasks.id], + }), + logs: many(agentLogs), +})) + +export const taskQuestionsRelations = relations(taskQuestions, ({ one }) => ({ + task: one(tasks, { + fields: [taskQuestions.taskId], + references: [tasks.id], + }), +})) + +export const taskGroupsRelations = relations(taskGroups, ({ one, many }) => ({ + project: one(projects, { + fields: [taskGroups.projectId], + references: [projects.id], + }), + deployments: many(deployments), +})) + +export const deploymentsRelations = relations(deployments, ({ one }) => ({ + project: one(projects, { + fields: [deployments.projectId], + references: [projects.id], + }), + taskGroup: one(taskGroups, { + fields: [deployments.taskGroupId], + references: [taskGroups.id], + }), +})) + +export const agentLogsRelations = relations(agentLogs, ({ one }) => ({ + agent: one(agents, { + fields: [agentLogs.agentId], + references: [agents.id], + }), + task: one(tasks, { + fields: [agentLogs.taskId], + references: [tasks.id], + }), +})) +``` + +## Drizzle Configuration + +```typescript +// drizzle.config.ts +import type { Config } from 'drizzle-kit' + +export default { + schema: './src/db/schema.ts', + out: './drizzle/migrations', + driver: 'mysql2', + dbCredentials: { + host: process.env.DB_HOST || 'localhost', + port: parseInt(process.env.DB_PORT || '3306'), + user: process.env.DB_USER || 'root', + password: process.env.DB_PASSWORD || '', + database: process.env.DB_NAME || 'aiworker', + }, +} satisfies Config +``` + +## Database Client + +```typescript +// db/client.ts +import { drizzle } from 'drizzle-orm/mysql2' +import mysql from 'mysql2/promise' +import * as schema from './schema' + +const pool = mysql.createPool({ + host: process.env.DB_HOST, + port: parseInt(process.env.DB_PORT || '3306'), + user: process.env.DB_USER, + password: process.env.DB_PASSWORD, + database: process.env.DB_NAME, + waitForConnections: true, + connectionLimit: 10, + queueLimit: 0, +}) + +export const db = drizzle(pool, { schema, mode: 'default' }) +``` + +## Ejemplos de Queries + +```typescript +// Get all tasks for a project +const projectTasks = await db.query.tasks.findMany({ + where: eq(tasks.projectId, projectId), + with: { + assignedAgent: true, + questions: { + where: eq(taskQuestions.status, 'pending') + } + }, + orderBy: [desc(tasks.createdAt)] +}) + +// Get next available task +const nextTask = await db.query.tasks.findFirst({ + where: eq(tasks.state, 'backlog'), + orderBy: [desc(tasks.priority), asc(tasks.createdAt)] +}) + +// Get idle agents +const idleAgents = await db.query.agents.findMany({ + where: and( + eq(agents.status, 'idle'), + gt(agents.lastHeartbeat, new Date(Date.now() - 60000)) + ) +}) + +// Insert new task +const newTask = await db.insert(tasks).values({ + id: crypto.randomUUID(), + projectId: projectId, + title: 'New task', + description: 'Task description', + state: 'backlog', + priority: 'medium', +}) +``` + +## Migrations + +```bash +# Generate migration +bun run drizzle-kit generate:mysql + +# Push changes directly (dev only) +bun run drizzle-kit push:mysql + +# Run migrations +bun run scripts/migrate.ts +``` + +```typescript +// scripts/migrate.ts +import { migrate } from 'drizzle-orm/mysql2/migrator' +import { db } from '../src/db/client' + +async function runMigrations() { + await migrate(db, { migrationsFolder: './drizzle/migrations' }) + console.log('✓ Migrations completed') + process.exit(0) +} + +runMigrations().catch(console.error) +``` diff --git a/docs/02-backend/estructura.md b/docs/02-backend/estructura.md new file mode 100644 index 0000000..3736272 --- /dev/null +++ b/docs/02-backend/estructura.md @@ -0,0 +1,480 @@ +# Estructura del Backend + +## Árbol de Directorios + +``` +backend/ +├── src/ +│ ├── index.ts # Entry point +│ ├── config/ +│ │ ├── database.ts # MySQL connection +│ │ ├── redis.ts # Redis connection +│ │ └── env.ts # Environment variables +│ │ +│ ├── api/ +│ │ ├── app.ts # Express app setup +│ │ ├── routes/ +│ │ │ ├── index.ts +│ │ │ ├── projects.ts # /api/projects +│ │ │ ├── tasks.ts # /api/tasks +│ │ │ ├── agents.ts # /api/agents +│ │ │ ├── deployments.ts# /api/deployments +│ │ │ └── health.ts # /api/health +│ │ │ +│ │ ├── middleware/ +│ │ │ ├── auth.ts # JWT validation +│ │ │ ├── error.ts # Error handler +│ │ │ ├── logger.ts # Request logging +│ │ │ └── validate.ts # Schema validation +│ │ │ +│ │ └── websocket/ +│ │ ├── server.ts # Socket.io setup +│ │ └── handlers.ts # WS event handlers +│ │ +│ ├── db/ +│ │ ├── schema.ts # Drizzle schema +│ │ ├── migrations/ # SQL migrations +│ │ └── client.ts # DB client instance +│ │ +│ ├── services/ +│ │ ├── mcp/ +│ │ │ ├── server.ts # MCP server for agents +│ │ │ ├── tools.ts # MCP tool definitions +│ │ │ └── handlers.ts # Tool implementations +│ │ │ +│ │ ├── gitea/ +│ │ │ ├── client.ts # Gitea API client +│ │ │ ├── repos.ts # Repo operations +│ │ │ ├── pulls.ts # PR operations +│ │ │ └── webhooks.ts # Webhook handling +│ │ │ +│ │ ├── kubernetes/ +│ │ │ ├── client.ts # K8s API client +│ │ │ ├── namespaces.ts # Namespace management +│ │ │ ├── deployments.ts# Deployment management +│ │ │ ├── pods.ts # Pod operations +│ │ │ └── ingress.ts # Ingress management +│ │ │ +│ │ ├── queue/ +│ │ │ ├── task-queue.ts # Task queue +│ │ │ ├── deploy-queue.ts# Deploy queue +│ │ │ └── workers.ts # Queue workers +│ │ │ +│ │ └── cache/ +│ │ ├── redis.ts # Redis operations +│ │ └── strategies.ts # Caching strategies +│ │ +│ ├── models/ +│ │ ├── Project.ts # Project model +│ │ ├── Task.ts # Task model +│ │ ├── Agent.ts # Agent model +│ │ ├── TaskGroup.ts # TaskGroup model +│ │ └── Deployment.ts # Deployment model +│ │ +│ ├── types/ +│ │ ├── api.ts # API types +│ │ ├── mcp.ts # MCP types +│ │ ├── k8s.ts # K8s types +│ │ └── common.ts # Common types +│ │ +│ └── utils/ +│ ├── logger.ts # Winston logger +│ ├── errors.ts # Custom errors +│ ├── validators.ts # Validation helpers +│ └── helpers.ts # General helpers +│ +├── drizzle/ # Drizzle config +│ ├── drizzle.config.ts +│ └── migrations/ +│ +├── tests/ +│ ├── unit/ +│ ├── integration/ +│ └── e2e/ +│ +├── scripts/ +│ ├── seed.ts # Seed database +│ ├── migrate.ts # Run migrations +│ └── generate-types.ts # Generate types +│ +├── .env.example +├── .eslintrc.json +├── .prettierrc +├── tsconfig.json +├── package.json +└── README.md +``` + +## Entry Point (index.ts) + +```typescript +import { startServer } from './api/app' +import { connectDatabase } from './config/database' +import { connectRedis } from './config/redis' +import { startMCPServer } from './services/mcp/server' +import { startQueueWorkers } from './services/queue/workers' +import { logger } from './utils/logger' + +async function bootstrap() { + try { + // Connect to MySQL + await connectDatabase() + logger.info('✓ MySQL connected') + + // Connect to Redis + await connectRedis() + logger.info('✓ Redis connected') + + // Start MCP Server for agents + await startMCPServer() + logger.info('✓ MCP Server started') + + // Start BullMQ workers + await startQueueWorkers() + logger.info('✓ Queue workers started') + + // Start HTTP + WebSocket server + await startServer() + logger.info('✓ API Server started on port 3000') + + } catch (error) { + logger.error('Failed to start server:', error) + process.exit(1) + } +} + +bootstrap() +``` + +## Express App Setup (api/app.ts) + +```typescript +import express from 'express' +import cors from 'cors' +import { createServer } from 'http' +import { Server as SocketIOServer } from 'socket.io' +import routes from './routes' +import { errorHandler } from './middleware/error' +import { requestLogger } from './middleware/logger' +import { setupWebSocket } from './websocket/server' + +export async function startServer() { + const app = express() + const httpServer = createServer(app) + const io = new SocketIOServer(httpServer, { + cors: { origin: process.env.FRONTEND_URL } + }) + + // Middleware + app.use(cors()) + app.use(express.json()) + app.use(requestLogger) + + // Routes + app.use('/api', routes) + + // Error handling + app.use(errorHandler) + + // WebSocket + setupWebSocket(io) + + // Start + const port = process.env.PORT || 3000 + httpServer.listen(port) + + return { app, httpServer, io } +} +``` + +## Configuración de Base de Datos + +```typescript +// config/database.ts +import { drizzle } from 'drizzle-orm/mysql2' +import mysql from 'mysql2/promise' +import * as schema from '../db/schema' + +let connection: mysql.Connection +let db: ReturnType + +export async function connectDatabase() { + connection = await mysql.createConnection({ + host: process.env.DB_HOST, + port: parseInt(process.env.DB_PORT || '3306'), + user: process.env.DB_USER, + password: process.env.DB_PASSWORD, + database: process.env.DB_NAME, + }) + + db = drizzle(connection, { schema, mode: 'default' }) + + return db +} + +export function getDatabase() { + if (!db) { + throw new Error('Database not initialized') + } + return db +} +``` + +## Configuración de Redis + +```typescript +// config/redis.ts +import Redis from 'ioredis' + +let redis: Redis + +export async function connectRedis() { + redis = new Redis({ + host: process.env.REDIS_HOST || 'localhost', + port: parseInt(process.env.REDIS_PORT || '6379'), + password: process.env.REDIS_PASSWORD, + retryStrategy: (times) => { + const delay = Math.min(times * 50, 2000) + return delay + } + }) + + await redis.ping() + return redis +} + +export function getRedis() { + if (!redis) { + throw new Error('Redis not initialized') + } + return redis +} +``` + +## Variables de Entorno + +```bash +# .env.example + +# Server +NODE_ENV=development +PORT=3000 +FRONTEND_URL=http://localhost:5173 + +# Database +DB_HOST=localhost +DB_PORT=3306 +DB_USER=root +DB_PASSWORD=password +DB_NAME=aiworker + +# Redis +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD= + +# Gitea +GITEA_URL=http://localhost:3001 +GITEA_TOKEN=your-gitea-token +GITEA_OWNER=aiworker + +# Kubernetes +K8S_IN_CLUSTER=false +K8S_CONFIG_PATH=~/.kube/config +K8S_DEFAULT_NAMESPACE=aiworker + +# MCP Server +MCP_SERVER_PORT=3100 +MCP_AUTH_TOKEN=your-mcp-token + +# JWT +JWT_SECRET=your-secret-key +JWT_EXPIRES_IN=7d + +# Claude API +ANTHROPIC_API_KEY=your-api-key +``` + +## Scripts de Package.json + +```json +{ + "name": "aiworker-backend", + "version": "1.0.0", + "scripts": { + "dev": "bun --watch src/index.ts", + "build": "bun build src/index.ts --outdir dist --target node", + "start": "bun dist/index.js", + "db:generate": "drizzle-kit generate:mysql", + "db:push": "drizzle-kit push:mysql", + "db:migrate": "bun run scripts/migrate.ts", + "db:seed": "bun run scripts/seed.ts", + "test": "bun test", + "test:watch": "bun test --watch", + "lint": "eslint src/**/*.ts", + "format": "prettier --write src/**/*.ts" + }, + "dependencies": { + "express": "^4.19.0", + "mysql2": "^3.11.0", + "drizzle-orm": "^0.36.0", + "ioredis": "^5.4.1", + "bullmq": "^5.23.0", + "socket.io": "^4.8.1", + "@modelcontextprotocol/sdk": "^1.0.0", + "@kubernetes/client-node": "^0.22.0", + "axios": "^1.7.9", + "zod": "^3.24.1", + "winston": "^3.17.0", + "jsonwebtoken": "^9.0.2", + "cors": "^2.8.5", + "dotenv": "^16.4.7" + }, + "devDependencies": { + "@types/express": "^5.0.0", + "@types/node": "^22.10.2", + "drizzle-kit": "^0.31.0", + "typescript": "^5.7.2", + "prettier": "^3.4.2", + "eslint": "^9.18.0" + } +} +``` + +## Estructura de Rutas + +```typescript +// api/routes/index.ts +import { Router } from 'express' +import projectRoutes from './projects' +import taskRoutes from './tasks' +import agentRoutes from './agents' +import deploymentRoutes from './deployments' +import healthRoutes from './health' + +const router = Router() + +router.use('/projects', projectRoutes) +router.use('/tasks', taskRoutes) +router.use('/agents', agentRoutes) +router.use('/deployments', deploymentRoutes) +router.use('/health', healthRoutes) + +export default router +``` + +## Middleware de Validación + +```typescript +// middleware/validate.ts +import { Request, Response, NextFunction } from 'express' +import { ZodSchema } from 'zod' + +export function validate(schema: ZodSchema) { + return (req: Request, res: Response, next: NextFunction) => { + try { + schema.parse({ + body: req.body, + query: req.query, + params: req.params, + }) + next() + } catch (error) { + res.status(400).json({ + error: 'Validation error', + details: error + }) + } + } +} +``` + +## Logger Setup + +```typescript +// utils/logger.ts +import winston from 'winston' + +export const logger = winston.createLogger({ + level: process.env.LOG_LEVEL || 'info', + format: winston.format.combine( + winston.format.timestamp(), + winston.format.errors({ stack: true }), + winston.format.json() + ), + transports: [ + new winston.transports.Console({ + format: winston.format.combine( + winston.format.colorize(), + winston.format.simple() + ) + }), + new winston.transports.File({ filename: 'error.log', level: 'error' }), + new winston.transports.File({ filename: 'combined.log' }) + ] +}) +``` + +## Manejo de Errores + +```typescript +// middleware/error.ts +import { Request, Response, NextFunction } from 'express' +import { logger } from '../utils/logger' + +export class AppError extends Error { + statusCode: number + isOperational: boolean + + constructor(message: string, statusCode: number) { + super(message) + this.statusCode = statusCode + this.isOperational = true + Error.captureStackTrace(this, this.constructor) + } +} + +export function errorHandler( + err: Error | AppError, + req: Request, + res: Response, + next: NextFunction +) { + logger.error('Error:', err) + + if (err instanceof AppError) { + return res.status(err.statusCode).json({ + error: err.message + }) + } + + res.status(500).json({ + error: 'Internal server error' + }) +} +``` + +## Comandos Útiles + +```bash +# Desarrollo +bun run dev + +# Generar migraciones +bun run db:generate + +# Aplicar migraciones +bun run db:migrate + +# Seed inicial +bun run db:seed + +# Tests +bun test + +# Build para producción +bun run build + +# Producción +bun run start +``` diff --git a/docs/02-backend/gitea-integration.md b/docs/02-backend/gitea-integration.md new file mode 100644 index 0000000..1e4df6d --- /dev/null +++ b/docs/02-backend/gitea-integration.md @@ -0,0 +1,459 @@ +# Integración con Gitea + +## Cliente de Gitea + +```typescript +// services/gitea/client.ts +import axios, { AxiosInstance } from 'axios' +import { logger } from '../../utils/logger' + +export interface GiteaConfig { + url: string + token: string + owner: string +} + +export class GiteaClient { + private client: AxiosInstance + private owner: string + + constructor(config?: GiteaConfig) { + const url = config?.url || process.env.GITEA_URL! + const token = config?.token || process.env.GITEA_TOKEN! + this.owner = config?.owner || process.env.GITEA_OWNER! + + this.client = axios.create({ + baseURL: `${url}/api/v1`, + headers: { + 'Authorization': `token ${token}`, + 'Content-Type': 'application/json' + }, + timeout: 30000 + }) + + // Log requests + this.client.interceptors.request.use((config) => { + logger.debug(`Gitea API: ${config.method?.toUpperCase()} ${config.url}`) + return config + }) + + // Handle errors + this.client.interceptors.response.use( + (response) => response, + (error) => { + logger.error('Gitea API Error:', { + url: error.config?.url, + status: error.response?.status, + data: error.response?.data + }) + throw error + } + ) + } + + // ============================================ + // REPOSITORIES + // ============================================ + + async createRepo(name: string, options: { + description?: string + private?: boolean + autoInit?: boolean + defaultBranch?: string + } = {}) { + const response = await this.client.post('/user/repos', { + name, + description: options.description || '', + private: options.private !== false, + auto_init: options.autoInit !== false, + default_branch: options.defaultBranch || 'main', + trust_model: 'default' + }) + + logger.info(`Gitea: Created repo ${name}`) + return response.data + } + + async getRepo(owner: string, repo: string) { + const response = await this.client.get(`/repos/${owner}/${repo}`) + return response.data + } + + async deleteRepo(owner: string, repo: string) { + await this.client.delete(`/repos/${owner}/${repo}`) + logger.info(`Gitea: Deleted repo ${owner}/${repo}`) + } + + async listRepos(owner?: string) { + const targetOwner = owner || this.owner + const response = await this.client.get(`/users/${targetOwner}/repos`) + return response.data + } + + // ============================================ + // BRANCHES + // ============================================ + + async createBranch(owner: string, repo: string, branchName: string, fromBranch: string = 'main') { + // Get reference commit + const refResponse = await this.client.get( + `/repos/${owner}/${repo}/git/refs/heads/${fromBranch}` + ) + const sha = refResponse.data.object.sha + + // Create new branch + const response = await this.client.post( + `/repos/${owner}/${repo}/git/refs`, + { + ref: `refs/heads/${branchName}`, + sha + } + ) + + logger.info(`Gitea: Created branch ${branchName} from ${fromBranch}`) + return response.data + } + + async getBranch(owner: string, repo: string, branch: string) { + const response = await this.client.get( + `/repos/${owner}/${repo}/branches/${branch}` + ) + return response.data + } + + async listBranches(owner: string, repo: string) { + const response = await this.client.get( + `/repos/${owner}/${repo}/branches` + ) + return response.data + } + + async deleteBranch(owner: string, repo: string, branch: string) { + await this.client.delete( + `/repos/${owner}/${repo}/branches/${branch}` + ) + logger.info(`Gitea: Deleted branch ${branch}`) + } + + // ============================================ + // PULL REQUESTS + // ============================================ + + async createPullRequest(owner: string, repo: string, data: { + title: string + body: string + head: string + base: string + }) { + const response = await this.client.post( + `/repos/${owner}/${repo}/pulls`, + { + title: data.title, + body: data.body, + head: data.head, + base: data.base + } + ) + + logger.info(`Gitea: Created PR #${response.data.number}`) + return response.data + } + + async getPullRequest(owner: string, repo: string, index: number) { + const response = await this.client.get( + `/repos/${owner}/${repo}/pulls/${index}` + ) + return response.data + } + + async listPullRequests(owner: string, repo: string, state: 'open' | 'closed' | 'all' = 'open') { + const response = await this.client.get( + `/repos/${owner}/${repo}/pulls`, + { params: { state } } + ) + return response.data + } + + async mergePullRequest(owner: string, repo: string, index: number, method: 'merge' | 'rebase' | 'squash' = 'merge') { + const response = await this.client.post( + `/repos/${owner}/${repo}/pulls/${index}/merge`, + { + Do: method, + MergeMessageField: '', + MergeTitleField: '' + } + ) + + logger.info(`Gitea: Merged PR #${index}`) + return response.data + } + + async closePullRequest(owner: string, repo: string, index: number) { + const response = await this.client.patch( + `/repos/${owner}/${repo}/pulls/${index}`, + { state: 'closed' } + ) + + logger.info(`Gitea: Closed PR #${index}`) + return response.data + } + + // ============================================ + // COMMITS + // ============================================ + + async getCommit(owner: string, repo: string, sha: string) { + const response = await this.client.get( + `/repos/${owner}/${repo}/git/commits/${sha}` + ) + return response.data + } + + async listCommits(owner: string, repo: string, options: { + sha?: string + path?: string + page?: number + limit?: number + } = {}) { + const response = await this.client.get( + `/repos/${owner}/${repo}/commits`, + { params: options } + ) + return response.data + } + + // ============================================ + // WEBHOOKS + // ============================================ + + async createWebhook(owner: string, repo: string, config: { + url: string + contentType?: 'json' | 'form' + secret?: string + events?: string[] + }) { + const response = await this.client.post( + `/repos/${owner}/${repo}/hooks`, + { + type: 'gitea', + config: { + url: config.url, + content_type: config.contentType || 'json', + secret: config.secret || '' + }, + events: config.events || ['push', 'pull_request'], + active: true + } + ) + + logger.info(`Gitea: Created webhook for ${owner}/${repo}`) + return response.data + } + + async listWebhooks(owner: string, repo: string) { + const response = await this.client.get( + `/repos/${owner}/${repo}/hooks` + ) + return response.data + } + + async deleteWebhook(owner: string, repo: string, hookId: number) { + await this.client.delete( + `/repos/${owner}/${repo}/hooks/${hookId}` + ) + logger.info(`Gitea: Deleted webhook ${hookId}`) + } + + // ============================================ + // FILES + // ============================================ + + async getFileContents(owner: string, repo: string, filepath: string, ref: string = 'main') { + const response = await this.client.get( + `/repos/${owner}/${repo}/contents/${filepath}`, + { params: { ref } } + ) + return response.data + } + + async createOrUpdateFile(owner: string, repo: string, filepath: string, data: { + content: string // base64 encoded + message: string + branch?: string + sha?: string // for updates + }) { + const response = await this.client.post( + `/repos/${owner}/${repo}/contents/${filepath}`, + { + content: data.content, + message: data.message, + branch: data.branch || 'main', + sha: data.sha + } + ) + + logger.info(`Gitea: Updated file ${filepath}`) + return response.data + } + + // ============================================ + // USERS + // ============================================ + + async getCurrentUser() { + const response = await this.client.get('/user') + return response.data + } + + async getUser(username: string) { + const response = await this.client.get(`/users/${username}`) + return response.data + } + + // ============================================ + // ORGANIZATIONS (if needed) + // ============================================ + + async createOrg(name: string, options: { + fullName?: string + description?: string + } = {}) { + const response = await this.client.post('/orgs', { + username: name, + full_name: options.fullName || name, + description: options.description || '' + }) + + logger.info(`Gitea: Created org ${name}`) + return response.data + } +} + +// Export singleton instance +export const giteaClient = new GiteaClient() +``` + +## Webhook Handler + +```typescript +// services/gitea/webhooks.ts +import { Request, Response } from 'express' +import crypto from 'crypto' +import { logger } from '../../utils/logger' +import { db } from '../../db/client' +import { tasks } from '../../db/schema' +import { eq } from 'drizzle-orm' +import { emitWebSocketEvent } from '../../api/websocket/server' + +export async function handleGiteaWebhook(req: Request, res: Response) { + const signature = req.headers['x-gitea-signature'] as string + const event = req.headers['x-gitea-event'] as string + const payload = req.body + + // Verify signature + const secret = process.env.GITEA_WEBHOOK_SECRET || '' + if (secret && signature) { + const hmac = crypto.createHmac('sha256', secret) + hmac.update(JSON.stringify(payload)) + const calculatedSignature = hmac.digest('hex') + + if (signature !== calculatedSignature) { + logger.warn('Gitea webhook: Invalid signature') + return res.status(401).json({ error: 'Invalid signature' }) + } + } + + logger.info(`Gitea webhook: ${event}`, { + repo: payload.repository?.full_name, + ref: payload.ref + }) + + try { + switch (event) { + case 'push': + await handlePushEvent(payload) + break + + case 'pull_request': + await handlePullRequestEvent(payload) + break + + default: + logger.debug(`Unhandled webhook event: ${event}`) + } + + res.status(200).json({ success: true }) + } catch (error) { + logger.error('Webhook handler error:', error) + res.status(500).json({ error: 'Internal error' }) + } +} + +async function handlePushEvent(payload: any) { + const branch = payload.ref.replace('refs/heads/', '') + const commits = payload.commits || [] + + logger.info(`Push to ${branch}: ${commits.length} commits`) + + // Find task by branch name + const task = await db.query.tasks.findFirst({ + where: eq(tasks.branchName, branch) + }) + + if (task) { + emitWebSocketEvent('task:push', { + taskId: task.id, + branch, + commitsCount: commits.length + }) + } +} + +async function handlePullRequestEvent(payload: any) { + const action = payload.action // opened, closed, reopened, edited, synchronized + const prNumber = payload.pull_request.number + const state = payload.pull_request.state + + logger.info(`PR #${prNumber}: ${action}`) + + // Find task by PR number + const task = await db.query.tasks.findFirst({ + where: eq(tasks.prNumber, prNumber) + }) + + if (task) { + if (action === 'closed' && payload.pull_request.merged) { + // PR was merged + await db.update(tasks) + .set({ state: 'staging' }) + .where(eq(tasks.id, task.id)) + + emitWebSocketEvent('task:merged', { + taskId: task.id, + prNumber + }) + } + + emitWebSocketEvent('task:pr_updated', { + taskId: task.id, + prNumber, + action, + state + }) + } +} +``` + +## Router para Webhooks + +```typescript +// api/routes/webhooks.ts +import { Router } from 'express' +import { handleGiteaWebhook } from '../../services/gitea/webhooks' + +const router = Router() + +router.post('/gitea', handleGiteaWebhook) + +export default router +``` diff --git a/docs/02-backend/mcp-server.md b/docs/02-backend/mcp-server.md new file mode 100644 index 0000000..82e7e95 --- /dev/null +++ b/docs/02-backend/mcp-server.md @@ -0,0 +1,788 @@ +# MCP Server para Agentes + +El MCP (Model Context Protocol) Server es la interfaz que permite a los agentes Claude Code comunicarse con el backend y ejecutar operaciones. + +## Arquitectura MCP + +``` +┌─────────────────┐ ┌─────────────────┐ +│ Claude Code │ MCP Protocol │ MCP Server │ +│ (Agent Pod) │◄──────────────────►│ (Backend) │ +└─────────────────┘ └─────────────────┘ + │ + ┌─────────────────────┼─────────────────────┐ + │ │ │ + ┌────▼────┐ ┌────▼────┐ ┌────▼────┐ + │ MySQL │ │ Gitea │ │ K8s │ + └─────────┘ └─────────┘ └─────────┘ +``` + +## Setup del MCP Server + +```typescript +// services/mcp/server.ts +import { Server } from '@modelcontextprotocol/sdk/server/index.js' +import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js' +import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js' +import { tools } from './tools' +import { handleToolCall } from './handlers' +import { logger } from '../../utils/logger' + +export class AgentMCPServer { + private server: Server + + constructor() { + this.server = new Server( + { + name: 'aiworker-orchestrator', + version: '1.0.0', + }, + { + capabilities: { + tools: {}, + }, + } + ) + + this.setupHandlers() + } + + private setupHandlers() { + // List available tools + this.server.setRequestHandler(ListToolsRequestSchema, async () => { + return { + tools: tools.map(tool => ({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + })) + } + }) + + // Handle tool calls + this.server.setRequestHandler(CallToolRequestSchema, async (request) => { + const { name, arguments: args } = request.params + + logger.info(`MCP: Tool called: ${name}`, { args }) + + try { + const result = await handleToolCall(name, args) + return result + } catch (error) { + logger.error(`MCP: Tool error: ${name}`, error) + return { + content: [{ + type: 'text', + text: `Error: ${error.message}` + }], + isError: true + } + } + }) + } + + async start() { + const transport = new StdioServerTransport() + await this.server.connect(transport) + logger.info('MCP Server started') + } +} + +// Start MCP server +let mcpServer: AgentMCPServer + +export async function startMCPServer() { + mcpServer = new AgentMCPServer() + await mcpServer.start() + return mcpServer +} + +export function getMCPServer() { + return mcpServer +} +``` + +## Definición de Herramientas + +```typescript +// services/mcp/tools.ts +import { z } from 'zod' + +export const tools = [ + { + name: 'get_next_task', + description: 'Obtiene la siguiente tarea disponible de la cola', + inputSchema: { + type: 'object', + properties: { + agentId: { + type: 'string', + description: 'ID del agente solicitante' + }, + capabilities: { + type: 'array', + items: { type: 'string' }, + description: 'Capacidades del agente (ej: ["javascript", "react"])' + } + }, + required: ['agentId'] + } + }, + + { + name: 'update_task_status', + description: 'Actualiza el estado de una tarea', + inputSchema: { + type: 'object', + properties: { + taskId: { + type: 'string', + description: 'ID de la tarea' + }, + status: { + type: 'string', + enum: ['in_progress', 'needs_input', 'ready_to_test', 'completed'], + description: 'Nuevo estado' + }, + metadata: { + type: 'object', + description: 'Metadata adicional (duración, errores, etc.)' + } + }, + required: ['taskId', 'status'] + } + }, + + { + name: 'ask_user_question', + description: 'Solicita información al usuario', + inputSchema: { + type: 'object', + properties: { + taskId: { + type: 'string', + description: 'ID de la tarea' + }, + question: { + type: 'string', + description: 'Pregunta para el usuario' + }, + context: { + type: 'string', + description: 'Contexto adicional' + } + }, + required: ['taskId', 'question'] + } + }, + + { + name: 'check_question_response', + description: 'Verifica si el usuario ha respondido una pregunta', + inputSchema: { + type: 'object', + properties: { + taskId: { + type: 'string', + description: 'ID de la tarea' + } + }, + required: ['taskId'] + } + }, + + { + name: 'create_branch', + description: 'Crea una nueva rama en Gitea', + inputSchema: { + type: 'object', + properties: { + taskId: { + type: 'string', + description: 'ID de la tarea' + }, + branchName: { + type: 'string', + description: 'Nombre de la rama (opcional, se genera automático)' + } + }, + required: ['taskId'] + } + }, + + { + name: 'create_pull_request', + description: 'Crea un Pull Request en Gitea', + inputSchema: { + type: 'object', + properties: { + taskId: { + type: 'string', + description: 'ID de la tarea' + }, + title: { + type: 'string', + description: 'Título del PR' + }, + description: { + type: 'string', + description: 'Descripción del PR' + } + }, + required: ['taskId', 'title', 'description'] + } + }, + + { + name: 'trigger_preview_deploy', + description: 'Despliega un preview environment en K8s', + inputSchema: { + type: 'object', + properties: { + taskId: { + type: 'string', + description: 'ID de la tarea' + } + }, + required: ['taskId'] + } + }, + + { + name: 'get_task_details', + description: 'Obtiene detalles completos de una tarea', + inputSchema: { + type: 'object', + properties: { + taskId: { + type: 'string', + description: 'ID de la tarea' + } + }, + required: ['taskId'] + } + }, + + { + name: 'log_activity', + description: 'Registra actividad del agente', + inputSchema: { + type: 'object', + properties: { + agentId: { + type: 'string', + description: 'ID del agente' + }, + level: { + type: 'string', + enum: ['debug', 'info', 'warn', 'error'], + description: 'Nivel de log' + }, + message: { + type: 'string', + description: 'Mensaje' + }, + metadata: { + type: 'object', + description: 'Metadata adicional' + } + }, + required: ['agentId', 'message'] + } + }, + + { + name: 'heartbeat', + description: 'Envía heartbeat para indicar que el agente está activo', + inputSchema: { + type: 'object', + properties: { + agentId: { + type: 'string', + description: 'ID del agente' + }, + status: { + type: 'string', + enum: ['idle', 'busy', 'error'], + description: 'Estado actual' + } + }, + required: ['agentId', 'status'] + } + } +] +``` + +## Implementación de Handlers + +```typescript +// services/mcp/handlers.ts +import { db } from '../../db/client' +import { tasks, agents, taskQuestions, agentLogs } from '../../db/schema' +import { eq, and, desc, asc } from 'drizzle-orm' +import { GiteaClient } from '../gitea/client' +import { K8sClient } from '../kubernetes/client' +import { getRedis } from '../../config/redis' +import { emitWebSocketEvent } from '../../api/websocket/server' +import crypto from 'crypto' + +const giteaClient = new GiteaClient() +const k8sClient = new K8sClient() +const redis = getRedis() + +export async function handleToolCall(name: string, args: any) { + switch (name) { + case 'get_next_task': + return await getNextTask(args) + + case 'update_task_status': + return await updateTaskStatus(args) + + case 'ask_user_question': + return await askUserQuestion(args) + + case 'check_question_response': + return await checkQuestionResponse(args) + + case 'create_branch': + return await createBranch(args) + + case 'create_pull_request': + return await createPullRequest(args) + + case 'trigger_preview_deploy': + return await triggerPreviewDeploy(args) + + case 'get_task_details': + return await getTaskDetails(args) + + case 'log_activity': + return await logActivity(args) + + case 'heartbeat': + return await heartbeat(args) + + default: + throw new Error(`Unknown tool: ${name}`) + } +} + +// ============================================ +// TOOL IMPLEMENTATIONS +// ============================================ + +async function getNextTask(args: { agentId: string; capabilities?: string[] }) { + const { agentId } = args + + // Get next task from backlog + const task = await db.query.tasks.findFirst({ + where: eq(tasks.state, 'backlog'), + with: { + project: true + }, + orderBy: [desc(tasks.priority), asc(tasks.createdAt)] + }) + + if (!task) { + return { + content: [{ + type: 'text', + text: JSON.stringify({ message: 'No tasks available' }) + }] + } + } + + // Assign task to agent + await db.update(tasks) + .set({ + state: 'in_progress', + assignedAgentId: agentId, + assignedAt: new Date(), + startedAt: new Date() + }) + .where(eq(tasks.id, task.id)) + + await db.update(agents) + .set({ + status: 'busy', + currentTaskId: task.id + }) + .where(eq(agents.id, agentId)) + + // Emit WebSocket event + emitWebSocketEvent('task:status_changed', { + taskId: task.id, + oldState: 'backlog', + newState: 'in_progress', + agentId + }) + + // Cache invalidation + await redis.del(`task:${task.id}`) + await redis.del(`task:list:${task.projectId}`) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ + task: { + id: task.id, + title: task.title, + description: task.description, + priority: task.priority, + project: task.project + } + }) + }] + } +} + +async function updateTaskStatus(args: { taskId: string; status: string; metadata?: any }) { + const { taskId, status, metadata } = args + + const updates: any = { state: status } + + if (status === 'completed') { + updates.completedAt = new Date() + } + + if (metadata?.durationMinutes) { + updates.actualDurationMinutes = metadata.durationMinutes + } + + await db.update(tasks) + .set(updates) + .where(eq(tasks.id, taskId)) + + // If task completed, free up agent + if (status === 'completed' || status === 'ready_to_test') { + const task = await db.query.tasks.findFirst({ + where: eq(tasks.id, taskId) + }) + + if (task?.assignedAgentId) { + await db.update(agents) + .set({ + status: 'idle', + currentTaskId: null, + tasksCompleted: db.$sql`tasks_completed + 1` + }) + .where(eq(agents.id, task.assignedAgentId)) + } + } + + emitWebSocketEvent('task:status_changed', { + taskId, + newState: status, + metadata + }) + + await redis.del(`task:${taskId}`) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ success: true }) + }] + } +} + +async function askUserQuestion(args: { taskId: string; question: string; context?: string }) { + const { taskId, question, context } = args + + // Update task state + await db.update(tasks) + .set({ state: 'needs_input' }) + .where(eq(tasks.id, taskId)) + + // Insert question + const questionId = crypto.randomUUID() + await db.insert(taskQuestions).values({ + id: questionId, + taskId, + question, + context, + status: 'pending' + }) + + // Notify frontend + emitWebSocketEvent('task:needs_input', { + taskId, + questionId, + question, + context + }) + + await redis.del(`task:${taskId}`) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ + success: true, + message: 'Question sent to user', + questionId + }) + }] + } +} + +async function checkQuestionResponse(args: { taskId: string }) { + const { taskId } = args + + const question = await db.query.taskQuestions.findFirst({ + where: and( + eq(taskQuestions.taskId, taskId), + eq(taskQuestions.status, 'answered') + ), + orderBy: [desc(taskQuestions.respondedAt)] + }) + + if (!question || !question.response) { + return { + content: [{ + type: 'text', + text: JSON.stringify({ + hasResponse: false, + message: 'No response yet' + }) + }] + } + } + + // Update task back to in_progress + await db.update(tasks) + .set({ state: 'in_progress' }) + .where(eq(tasks.id, taskId)) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ + hasResponse: true, + response: question.response, + question: question.question + }) + }] + } +} + +async function createBranch(args: { taskId: string; branchName?: string }) { + const { taskId, branchName } = args + + const task = await db.query.tasks.findFirst({ + where: eq(tasks.id, taskId), + with: { project: true } + }) + + if (!task) { + throw new Error('Task not found') + } + + const branch = branchName || `task-${taskId.slice(0, 8)}-${task.title.toLowerCase().replace(/\s+/g, '-').slice(0, 30)}` + + // Create branch in Gitea + await giteaClient.createBranch( + task.project.giteaOwner!, + task.project.giteaRepoName!, + branch, + task.project.defaultBranch! + ) + + // Update task + await db.update(tasks) + .set({ branchName: branch }) + .where(eq(tasks.id, taskId)) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ + success: true, + branchName: branch, + repoUrl: task.project.giteaRepoUrl + }) + }] + } +} + +async function createPullRequest(args: { taskId: string; title: string; description: string }) { + const { taskId, title, description } = args + + const task = await db.query.tasks.findFirst({ + where: eq(tasks.id, taskId), + with: { project: true } + }) + + if (!task || !task.branchName) { + throw new Error('Task not found or branch not created') + } + + const pr = await giteaClient.createPullRequest( + task.project.giteaOwner!, + task.project.giteaRepoName!, + { + title, + body: description, + head: task.branchName, + base: task.project.defaultBranch! + } + ) + + await db.update(tasks) + .set({ + prNumber: pr.number, + prUrl: pr.html_url + }) + .where(eq(tasks.id, taskId)) + + emitWebSocketEvent('task:pr_created', { + taskId, + prUrl: pr.html_url, + prNumber: pr.number + }) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ + success: true, + prUrl: pr.html_url, + prNumber: pr.number + }) + }] + } +} + +async function triggerPreviewDeploy(args: { taskId: string }) { + const { taskId } = args + + const task = await db.query.tasks.findFirst({ + where: eq(tasks.id, taskId), + with: { project: true } + }) + + if (!task) { + throw new Error('Task not found') + } + + const previewNamespace = `preview-task-${taskId.slice(0, 8)}` + const previewUrl = `https://${previewNamespace}.preview.aiworker.dev` + + // Deploy to K8s + await k8sClient.createPreviewDeployment({ + namespace: previewNamespace, + taskId, + projectId: task.projectId, + image: task.project.dockerImage!, + branch: task.branchName!, + envVars: task.project.envVars as Record + }) + + await db.update(tasks) + .set({ + state: 'ready_to_test', + previewNamespace, + previewUrl, + previewDeployedAt: new Date() + }) + .where(eq(tasks.id, taskId)) + + emitWebSocketEvent('task:ready_to_test', { + taskId, + previewUrl + }) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ + success: true, + previewUrl, + namespace: previewNamespace + }) + }] + } +} + +async function getTaskDetails(args: { taskId: string }) { + const { taskId } = args + + const task = await db.query.tasks.findFirst({ + where: eq(tasks.id, taskId), + with: { + project: true, + questions: true + } + }) + + if (!task) { + throw new Error('Task not found') + } + + return { + content: [{ + type: 'text', + text: JSON.stringify({ task }) + }] + } +} + +async function logActivity(args: { agentId: string; level?: string; message: string; metadata?: any }) { + const { agentId, level = 'info', message, metadata } = args + + await db.insert(agentLogs).values({ + agentId, + level: level as any, + message, + metadata + }) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ success: true }) + }] + } +} + +async function heartbeat(args: { agentId: string; status: string }) { + const { agentId, status } = args + + await db.update(agents) + .set({ + lastHeartbeat: new Date(), + status: status as any + }) + .where(eq(agents.id, agentId)) + + return { + content: [{ + type: 'text', + text: JSON.stringify({ success: true }) + }] + } +} +``` + +## Uso desde Claude Code Agent + +Desde el pod del agente, Claude Code usaría las herramientas así: + +```bash +# En el pod del agente, configurar MCP +# claude-code config add-mcp-server aiworker stdio \ +# "bun run /app/mcp-client.js" + +# Ejemplo de uso en conversación con Claude Code: +# User: "Toma la siguiente tarea y trabaja en ella" +# Claude Code internamente llama: +# - get_next_task({ agentId: "agent-xyz" }) +# - Si necesita info: ask_user_question({ taskId: "...", question: "..." }) +# - Trabaja en el código +# - create_branch({ taskId: "..." }) +# - (commits and pushes) +# - create_pull_request({ taskId: "...", title: "...", description: "..." }) +# - trigger_preview_deploy({ taskId: "..." }) +# - update_task_status({ taskId: "...", status: "ready_to_test" }) +``` diff --git a/docs/02-backend/queue-system.md b/docs/02-backend/queue-system.md new file mode 100644 index 0000000..d4688fb --- /dev/null +++ b/docs/02-backend/queue-system.md @@ -0,0 +1,520 @@ +# Sistema de Colas con BullMQ + +## Setup de BullMQ + +```typescript +// services/queue/config.ts +import { Queue, Worker, QueueScheduler } from 'bullmq' +import { getRedis } from '../../config/redis' +import { logger } from '../../utils/logger' + +const connection = getRedis() + +export const queues = { + tasks: new Queue('tasks', { connection }), + deploys: new Queue('deploys', { connection }), + merges: new Queue('merges', { connection }), + cleanup: new Queue('cleanup', { connection }), +} + +// Queue options +export const defaultJobOptions = { + attempts: 3, + backoff: { + type: 'exponential', + delay: 2000, + }, + removeOnComplete: { + age: 3600, // 1 hour + count: 1000, + }, + removeOnFail: { + age: 86400, // 24 hours + }, +} +``` + +## Task Queue + +```typescript +// services/queue/task-queue.ts +import { queues, defaultJobOptions } from './config' +import { logger } from '../../utils/logger' + +export interface TaskJob { + taskId: string + projectId: string + priority: 'low' | 'medium' | 'high' | 'urgent' +} + +export async function enqueueTask(data: TaskJob) { + const priorityMap = { + urgent: 1, + high: 2, + medium: 3, + low: 4, + } + + await queues.tasks.add('process-task', data, { + ...defaultJobOptions, + priority: priorityMap[data.priority], + jobId: data.taskId, + }) + + logger.info(`Task queued: ${data.taskId}`) +} + +export async function dequeueTask(taskId: string) { + const job = await queues.tasks.getJob(taskId) + if (job) { + await job.remove() + logger.info(`Task dequeued: ${taskId}`) + } +} + +export async function getQueuedTasks() { + const jobs = await queues.tasks.getJobs(['waiting', 'active']) + return jobs.map(job => ({ + id: job.id, + data: job.data, + state: await job.getState(), + progress: job.progress, + attemptsMade: job.attemptsMade, + })) +} +``` + +## Deploy Queue + +```typescript +// services/queue/deploy-queue.ts +import { queues, defaultJobOptions } from './config' +import { logger } from '../../utils/logger' + +export interface DeployJob { + deploymentId: string + projectId: string + taskId?: string + environment: 'preview' | 'staging' | 'production' + branch: string + commitHash: string +} + +export async function enqueueDeploy(data: DeployJob) { + await queues.deploys.add('deploy', data, { + ...defaultJobOptions, + priority: data.environment === 'production' ? 1 : 2, + jobId: data.deploymentId, + }) + + logger.info(`Deploy queued: ${data.environment} - ${data.deploymentId}`) +} + +export async function getDeployStatus(deploymentId: string) { + const job = await queues.deploys.getJob(deploymentId) + if (!job) return null + + return { + id: job.id, + state: await job.getState(), + progress: job.progress, + result: job.returnvalue, + failedReason: job.failedReason, + } +} +``` + +## Merge Queue + +```typescript +// services/queue/merge-queue.ts +import { queues, defaultJobOptions } from './config' +import { logger } from '../../utils/logger' + +export interface MergeJob { + taskGroupId: string + projectId: string + taskIds: string[] + targetBranch: 'staging' | 'main' +} + +export async function enqueueMerge(data: MergeJob) { + await queues.merges.add('merge-tasks', data, { + ...defaultJobOptions, + priority: data.targetBranch === 'main' ? 1 : 2, + jobId: data.taskGroupId, + }) + + logger.info(`Merge queued: ${data.taskGroupId}`) +} +``` + +## Cleanup Queue + +```typescript +// services/queue/cleanup-queue.ts +import { queues, defaultJobOptions } from './config' +import { logger } from '../../utils/logger' + +export interface CleanupJob { + type: 'preview-namespace' | 'old-logs' | 'completed-jobs' + namespaceOrResource: string + ageHours: number +} + +export async function enqueueCleanup(data: CleanupJob) { + await queues.cleanup.add('cleanup', data, { + ...defaultJobOptions, + attempts: 1, + }) + + logger.info(`Cleanup queued: ${data.type}`) +} + +// Schedule recurring cleanup +export async function scheduleRecurringCleanup() { + // Clean preview namespaces older than 7 days + await queues.cleanup.add( + 'cleanup-preview-namespaces', + { + type: 'preview-namespace', + ageHours: 168, // 7 days + }, + { + repeat: { + pattern: '0 2 * * *', // Daily at 2 AM + }, + } + ) + + // Clean old logs + await queues.cleanup.add( + 'cleanup-old-logs', + { + type: 'old-logs', + ageHours: 720, // 30 days + }, + { + repeat: { + pattern: '0 3 * * 0', // Weekly on Sunday at 3 AM + }, + } + ) + + logger.info('Recurring cleanup jobs scheduled') +} +``` + +## Workers Implementation + +```typescript +// services/queue/workers.ts +import { Worker, Job } from 'bullmq' +import { getRedis } from '../../config/redis' +import { logger } from '../../utils/logger' +import { db } from '../../db/client' +import { tasks, agents, deployments } from '../../db/schema' +import { eq } from 'drizzle-orm' +import { K8sClient } from '../kubernetes/client' +import { GiteaClient } from '../gitea/client' +import { TaskJob, DeployJob, MergeJob, CleanupJob } from './types' + +const connection = getRedis() +const k8sClient = new K8sClient() +const giteaClient = new GiteaClient() + +// ============================================ +// TASK WORKER +// ============================================ + +const taskWorker = new Worker( + 'tasks', + async (job: Job) => { + logger.info(`Processing task job: ${job.id}`) + + // Check if there's an available agent + const availableAgent = await db.query.agents.findFirst({ + where: eq(agents.status, 'idle'), + }) + + if (!availableAgent) { + logger.info('No available agents, task will be retried') + throw new Error('No available agents') + } + + // Task will be picked up by agent via MCP get_next_task + logger.info(`Task ${job.data.taskId} ready for agent pickup`) + + return { success: true, readyForPickup: true } + }, + { + connection, + concurrency: 5, + } +) + +taskWorker.on('completed', (job) => { + logger.info(`Task job completed: ${job.id}`) +}) + +taskWorker.on('failed', (job, err) => { + logger.error(`Task job failed: ${job?.id}`, err) +}) + +// ============================================ +// DEPLOY WORKER +// ============================================ + +const deployWorker = new Worker( + 'deploys', + async (job: Job) => { + const { deploymentId, projectId, environment, branch, commitHash } = job.data + + logger.info(`Deploying: ${environment} - ${deploymentId}`) + + // Update deployment status + await db.update(deployments) + .set({ + status: 'in_progress', + startedAt: new Date(), + }) + .where(eq(deployments.id, deploymentId)) + + job.updateProgress(10) + + try { + // Get project config + const project = await db.query.projects.findFirst({ + where: eq(deployments.projectId, projectId), + }) + + if (!project) { + throw new Error('Project not found') + } + + job.updateProgress(20) + + // Prepare deployment + const namespace = environment === 'production' + ? `${project.k8sNamespace}-prod` + : environment === 'staging' + ? `${project.k8sNamespace}-staging` + : job.data.taskId + ? `preview-task-${job.data.taskId.slice(0, 8)}` + : project.k8sNamespace + + job.updateProgress(40) + + // Deploy to K8s + await k8sClient.createOrUpdateDeployment({ + namespace, + name: `${project.name}-${environment}`, + image: `${project.dockerImage}:${commitHash.slice(0, 7)}`, + envVars: project.envVars as Record, + replicas: project.replicas || 1, + resources: { + cpu: project.cpuLimit || '500m', + memory: project.memoryLimit || '512Mi', + }, + }) + + job.updateProgress(70) + + // Create/update ingress + const url = await k8sClient.createOrUpdateIngress({ + namespace, + name: `${project.name}-${environment}`, + host: environment === 'production' + ? `${project.name}.aiworker.dev` + : `${environment}-${project.name}.aiworker.dev`, + serviceName: `${project.name}-${environment}`, + servicePort: 3000, + }) + + job.updateProgress(90) + + // Update deployment record + await db.update(deployments) + .set({ + status: 'completed', + completedAt: new Date(), + url, + durationSeconds: Math.floor( + (new Date().getTime() - job.processedOn!) / 1000 + ), + }) + .where(eq(deployments.id, deploymentId)) + + job.updateProgress(100) + + logger.info(`Deploy completed: ${environment} - ${url}`) + + return { success: true, url } + } catch (error) { + // Update deployment as failed + await db.update(deployments) + .set({ + status: 'failed', + errorMessage: error.message, + completedAt: new Date(), + }) + .where(eq(deployments.id, deploymentId)) + + throw error + } + }, + { + connection, + concurrency: 3, + } +) + +// ============================================ +// MERGE WORKER +// ============================================ + +const mergeWorker = new Worker( + 'merges', + async (job: Job) => { + const { taskGroupId, projectId, taskIds, targetBranch } = job.data + + logger.info(`Merging tasks: ${taskIds.join(', ')} to ${targetBranch}`) + + // Get project and tasks + const project = await db.query.projects.findFirst({ + where: eq(deployments.projectId, projectId), + }) + + if (!project) { + throw new Error('Project not found') + } + + const tasksList = await db.query.tasks.findMany({ + where: (tasks, { inArray }) => inArray(tasks.id, taskIds), + }) + + job.updateProgress(20) + + // Merge each PR + for (const task of tasksList) { + if (task.prNumber) { + await giteaClient.mergePullRequest( + project.giteaOwner!, + project.giteaRepoName!, + task.prNumber, + 'squash' + ) + + job.updateProgress(20 + (40 / tasksList.length)) + } + } + + job.updateProgress(60) + + // Create staging/production branch if needed + // Then trigger deploy + // ... implementation + + job.updateProgress(100) + + logger.info(`Merge completed: ${taskGroupId}`) + + return { success: true } + }, + { + connection, + concurrency: 2, + } +) + +// ============================================ +// CLEANUP WORKER +// ============================================ + +const cleanupWorker = new Worker( + 'cleanup', + async (job: Job) => { + const { type, ageHours } = job.data + + logger.info(`Cleanup: ${type}`) + + switch (type) { + case 'preview-namespace': + await k8sClient.cleanupOldPreviewNamespaces(ageHours) + break + + case 'old-logs': + const cutoffDate = new Date(Date.now() - ageHours * 60 * 60 * 1000) + await db.delete(agentLogs) + .where(lt(agentLogs.createdAt, cutoffDate)) + break + } + + logger.info(`Cleanup completed: ${type}`) + + return { success: true } + }, + { + connection, + concurrency: 1, + } +) + +// ============================================ +// START ALL WORKERS +// ============================================ + +export async function startQueueWorkers() { + logger.info('Starting BullMQ workers...') + + // Workers are already instantiated above + // Just schedule recurring jobs + await scheduleRecurringCleanup() + + logger.info('✓ All workers started') + + return { + taskWorker, + deployWorker, + mergeWorker, + cleanupWorker, + } +} + +// Graceful shutdown +process.on('SIGTERM', async () => { + logger.info('Shutting down workers...') + await taskWorker.close() + await deployWorker.close() + await mergeWorker.close() + await cleanupWorker.close() + logger.info('Workers shut down') + process.exit(0) +}) +``` + +## Monitorización de Colas + +```typescript +// api/routes/queues.ts +import { Router } from 'express' +import { queues } from '../../services/queue/config' + +const router = Router() + +router.get('/status', async (req, res) => { + const status = await Promise.all( + Object.entries(queues).map(async ([name, queue]) => ({ + name, + waiting: await queue.getWaitingCount(), + active: await queue.getActiveCount(), + completed: await queue.getCompletedCount(), + failed: await queue.getFailedCount(), + })) + ) + + res.json({ queues: status }) +}) + +export default router +``` diff --git a/docs/03-frontend/componentes.md b/docs/03-frontend/componentes.md new file mode 100644 index 0000000..f01b5e9 --- /dev/null +++ b/docs/03-frontend/componentes.md @@ -0,0 +1,498 @@ +# Componentes Principales + +## KanbanBoard + +```typescript +// components/kanban/KanbanBoard.tsx +import { useMemo } from 'react' +import { DndContext, DragEndEvent, PointerSensor, useSensor, useSensors } from '@dnd-kit/core' +import { useTasks, useUpdateTask } from '@/hooks/useTasks' +import KanbanColumn from './KanbanColumn' +import { Task, TaskState } from '@/types/task' + +const COLUMNS: { id: TaskState; title: string; color: string }[] = [ + { id: 'backlog', title: 'Backlog', color: 'gray' }, + { id: 'in_progress', title: 'En Progreso', color: 'blue' }, + { id: 'needs_input', title: 'Necesita Respuestas', color: 'yellow' }, + { id: 'ready_to_test', title: 'Listo para Probar', color: 'purple' }, + { id: 'approved', title: 'Aprobado', color: 'green' }, + { id: 'staging', title: 'Staging', color: 'indigo' }, + { id: 'production', title: 'Producción', color: 'emerald' }, +] + +interface KanbanBoardProps { + projectId: string +} + +export function KanbanBoard({ projectId }: KanbanBoardProps) { + const { data: tasks = [], isLoading } = useTasks({ projectId }) + const updateTask = useUpdateTask() + + const sensors = useSensors( + useSensor(PointerSensor, { + activationConstraint: { + distance: 8, + }, + }) + ) + + const tasksByState = useMemo(() => { + return COLUMNS.reduce((acc, column) => { + acc[column.id] = tasks.filter((task) => task.state === column.id) + return acc + }, {} as Record) + }, [tasks]) + + const handleDragEnd = (event: DragEndEvent) => { + const { active, over } = event + + if (!over || active.id === over.id) return + + const taskId = active.id as string + const newState = over.id as TaskState + + updateTask.mutate({ + taskId, + updates: { state: newState }, + }) + } + + if (isLoading) { + return
Loading...
+ } + + return ( + +
+ {COLUMNS.map((column) => ( + + ))} +
+
+ ) +} +``` + +## KanbanColumn + +```typescript +// components/kanban/KanbanColumn.tsx +import { useDroppable } from '@dnd-kit/core' +import { SortableContext, verticalListSortingStrategy } from '@dnd-kit/sortable' +import TaskCard from './TaskCard' +import { Task, TaskState } from '@/types/task' + +interface KanbanColumnProps { + id: TaskState + title: string + color: string + tasks: Task[] +} + +export default function KanbanColumn({ id, title, color, tasks }: KanbanColumnProps) { + const { setNodeRef } = useDroppable({ id }) + + return ( +
+
+

+ {title} + ({tasks.length}) +

+
+ +
+ t.id)} strategy={verticalListSortingStrategy}> +
+ {tasks.map((task) => ( + + ))} +
+
+ + {tasks.length === 0 && ( +
+ Sin tareas +
+ )} +
+
+ ) +} +``` + +## TaskCard + +```typescript +// components/kanban/TaskCard.tsx +import { useSortable } from '@dnd-kit/sortable' +import { CSS } from '@dnd-kit/utilities' +import { Clock, User, GitBranch, AlertCircle } from 'lucide-react' +import { Task } from '@/types/task' +import { useNavigate } from 'react-router-dom' + +interface TaskCardProps { + task: Task +} + +const PRIORITY_COLORS = { + low: 'bg-gray-100 text-gray-800', + medium: 'bg-blue-100 text-blue-800', + high: 'bg-orange-100 text-orange-800', + urgent: 'bg-red-100 text-red-800', +} + +export default function TaskCard({ task }: TaskCardProps) { + const navigate = useNavigate() + const { attributes, listeners, setNodeRef, transform, transition, isDragging } = useSortable({ + id: task.id, + }) + + const style = { + transform: CSS.Transform.toString(transform), + transition, + opacity: isDragging ? 0.5 : 1, + } + + return ( +
navigate(`/tasks/${task.id}`)} + > +
+

{task.title}

+ + {task.priority} + +
+ + {task.description && ( +

{task.description}

+ )} + +
+ {task.assignedAgent && ( +
+ + Agent {task.assignedAgent.podName.slice(0, 8)} +
+ )} + + {task.branchName && ( +
+ + {task.branchName} +
+ )} + + {task.state === 'needs_input' && ( +
+ + Pregunta pendiente +
+ )} +
+ + {task.actualDurationMinutes && ( +
+ + {task.actualDurationMinutes}min +
+ )} + + {task.previewUrl && ( + e.stopPropagation()} + > + Ver Preview → + + )} +
+ ) +} +``` + +## WebTerminal + +```typescript +// components/terminal/WebTerminal.tsx +import { useEffect, useRef } from 'react' +import { Terminal } from 'xterm' +import { FitAddon } from 'xterm-addon-fit' +import { WebLinksAddon } from 'xterm-addon-web-links' +import 'xterm/css/xterm.css' + +interface WebTerminalProps { + agentId: string + podName: string +} + +export function WebTerminal({ agentId, podName }: WebTerminalProps) { + const terminalRef = useRef(null) + const xtermRef = useRef() + const fitAddonRef = useRef() + + useEffect(() => { + if (!terminalRef.current) return + + // Create terminal + const term = new Terminal({ + cursorBlink: true, + fontSize: 14, + fontFamily: 'Menlo, Monaco, "Courier New", monospace', + theme: { + background: '#1e1e1e', + foreground: '#d4d4d4', + }, + }) + + const fitAddon = new FitAddon() + const webLinksAddon = new WebLinksAddon() + + term.loadAddon(fitAddon) + term.loadAddon(webLinksAddon) + term.open(terminalRef.current) + fitAddon.fit() + + xtermRef.current = term + fitAddonRef.current = fitAddon + + // Connect to backend WebSocket for terminal + const ws = new WebSocket(`ws://localhost:3000/terminal/${agentId}`) + + ws.onopen = () => { + term.writeln(`Connected to ${podName}`) + term.writeln('') + } + + ws.onmessage = (event) => { + term.write(event.data) + } + + term.onData((data) => { + ws.send(data) + }) + + // Handle resize + const handleResize = () => { + fitAddon.fit() + } + window.addEventListener('resize', handleResize) + + return () => { + term.dispose() + ws.close() + window.removeEventListener('resize', handleResize) + } + }, [agentId, podName]) + + return ( +
+
+
+ ) +} +``` + +## TaskForm + +```typescript +// components/tasks/TaskForm.tsx +import { useState } from 'react' +import { useCreateTask } from '@/hooks/useTasks' +import { Button } from '@/components/ui/Button' +import { Input } from '@/components/ui/Input' +import { Select } from '@/components/ui/Select' +import { toast } from 'react-hot-toast' + +interface TaskFormProps { + projectId: string + onSuccess?: () => void +} + +export function TaskForm({ projectId, onSuccess }: TaskFormProps) { + const [title, setTitle] = useState('') + const [description, setDescription] = useState('') + const [priority, setPriority] = useState<'low' | 'medium' | 'high' | 'urgent'>('medium') + + const createTask = useCreateTask() + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + + if (!title.trim()) { + toast.error('El título es requerido') + return + } + + try { + await createTask.mutateAsync({ + projectId, + title, + description, + priority, + }) + + toast.success('Tarea creada') + setTitle('') + setDescription('') + setPriority('medium') + onSuccess?.() + } catch (error) { + toast.error('Error al crear tarea') + } + } + + return ( +
+ setTitle(e.target.value)} + placeholder="Ej: Implementar autenticación" + required + /> + +
+ +