Files
ai/gateway/.env.example

62 lines
1.6 KiB
Plaintext

# Server configuration
PORT=3000
HOST=0.0.0.0
LOG_LEVEL=info
CORS_ORIGIN=*
# Database
DATABASE_URL=postgresql://postgres:postgres@localhost:5432/dexorder
# LLM Provider API Keys (configure at least one)
# Anthropic Claude
ANTHROPIC_API_KEY=sk-ant-xxxxx
# OpenAI GPT
OPENAI_API_KEY=sk-xxxxx
# Google Gemini
GOOGLE_API_KEY=xxxxx
# OpenRouter (access to 300+ models with one key)
OPENROUTER_API_KEY=sk-or-xxxxx
# Default model (if user has no preference)
DEFAULT_MODEL_PROVIDER=anthropic
DEFAULT_MODEL=claude-3-5-sonnet-20241022
# Telegram (optional)
TELEGRAM_BOT_TOKEN=
# Kubernetes configuration
KUBERNETES_NAMESPACE=dexorder-agents
KUBERNETES_IN_CLUSTER=false
KUBERNETES_CONTEXT=minikube
AGENT_IMAGE=ghcr.io/dexorder/agent:latest
SIDECAR_IMAGE=ghcr.io/dexorder/lifecycle-sidecar:latest
AGENT_STORAGE_CLASS=standard
# Redis (for hot storage and session management)
REDIS_URL=redis://localhost:6379
# Qdrant (for RAG vector search)
QDRANT_URL=http://localhost:6333
QDRANT_API_KEY= # optional, leave empty for local dev
# Iceberg (for durable storage via REST catalog)
ICEBERG_CATALOG_URI=http://iceberg-catalog:8181
ICEBERG_NAMESPACE=gateway
S3_ENDPOINT=http://minio:9000
S3_ACCESS_KEY=minioadmin
S3_SECRET_KEY=minioadmin
# Event router (ZeroMQ)
EVENT_ROUTER_BIND=tcp://*:5571
# Embeddings (for RAG vector search)
# Recommended: ollama with all-minilm (90MB model, CPU-friendly, ~100MB RAM)
EMBEDDING_PROVIDER=ollama
EMBEDDING_MODEL=all-minilm
OLLAMA_URL=http://localhost:11434
# Alternative models: nomic-embed-text (8K context), mxbai-embed-large (higher accuracy)
# For OpenAI embeddings, set: EMBEDDING_PROVIDER=openai, EMBEDDING_MODEL=text-embedding-3-small