apiVersion: v1 kind: ConfigMap metadata: name: gateway-config data: config.yaml: | # Gateway Configuration # Server configuration server: port: 3000 host: 0.0.0.0 log_level: debug cors_origin: "*" base_url: http://dexorder.local trusted_origins: - http://dexorder.local - http://localhost:5173 - ws://dexorder.local # Database database: url: postgresql://postgres:password@postgres:5432/iceberg # Default model (if user has no preference) defaults: model_provider: deepinfra model: zai-org/GLM-5 # License tier model configuration (null = fall back to defaults.model) license_models: free: default: ~ cost_optimized: ~ complex: ~ pro: default: ~ cost_optimized: ~ complex: ~ enterprise: default: ~ cost_optimized: ~ complex: ~ # Kubernetes configuration kubernetes: namespace: sandbox service_namespace: default in_cluster: true sandbox_image: SANDBOX_IMAGE_TAG sidecar_image: SIDECAR_IMAGE_TAG storage_class: standard image_pull_policy: Never # For minikube dev - use local images # Agent configuration agent: # Number of prior conversation turns loaded as LLM context and flushed to Iceberg at session end conversation_history_limit: 20 # DragonflyDB (Redis-compatible, for hot storage and session management) redis: url: redis://dragonfly:6379 # Iceberg (for durable storage via REST catalog) iceberg: catalog_uri: http://iceberg-catalog:8181 namespace: gateway ohlc_catalog_uri: http://iceberg-catalog:8181 ohlc_namespace: trading s3_endpoint: http://minio:9000 conversations_bucket: warehouse # S3 bucket for conversation Parquet cold storage # Event router (ZeroMQ) events: router_bind: tcp://*:5571 # Embeddings (for RAG vector search) # Ollama runs in the same container as the gateway (see gateway/Dockerfile) embedding: provider: ollama model: all-minilm ollama_url: http://localhost:11434 # Email service configuration email: from_address: noreply@dexorder.com