# Server configuration PORT=3000 HOST=0.0.0.0 LOG_LEVEL=info CORS_ORIGIN=* # Database DATABASE_URL=postgresql://postgres:postgres@localhost:5432/dexorder # LLM Provider API Keys (configure at least one) # Anthropic Claude ANTHROPIC_API_KEY=sk-ant-xxxxx # OpenAI GPT OPENAI_API_KEY=sk-xxxxx # Google Gemini GOOGLE_API_KEY=xxxxx # OpenRouter (access to 300+ models with one key) OPENROUTER_API_KEY=sk-or-xxxxx # Default model (if user has no preference) DEFAULT_MODEL_PROVIDER=anthropic DEFAULT_MODEL=claude-sonnet-4-6 # Telegram (optional) TELEGRAM_BOT_TOKEN= # Kubernetes configuration KUBERNETES_NAMESPACE=sandbox KUBERNETES_IN_CLUSTER=false KUBERNETES_CONTEXT=minikube SANDBOX_IMAGE=ghcr.io/dexorder/sandbox:latest SIDECAR_IMAGE=ghcr.io/dexorder/lifecycle-sidecar:latest SANDBOX_STORAGE_CLASS=standard # Redis (for hot storage and session management) REDIS_URL=redis://localhost:6379 # Iceberg (for durable storage via REST catalog) ICEBERG_CATALOG_URI=http://iceberg-catalog:8181 ICEBERG_NAMESPACE=gateway S3_ENDPOINT=http://minio:9000 S3_ACCESS_KEY=minioadmin S3_SECRET_KEY=minioadmin # Event router (ZeroMQ) EVENT_ROUTER_BIND=tcp://*:5571 # Embeddings (for RAG vector search) # Recommended: ollama with all-minilm (90MB model, CPU-friendly, ~100MB RAM) EMBEDDING_PROVIDER=ollama EMBEDDING_MODEL=all-minilm OLLAMA_URL=http://localhost:11434 # Alternative models: nomic-embed-text (8K context), mxbai-embed-large (higher accuracy) # For OpenAI embeddings, set: EMBEDDING_PROVIDER=openai, EMBEDDING_MODEL=text-embedding-3-small