apiVersion: v1 kind: ConfigMap metadata: name: gateway-config data: config.yaml: | # Gateway Configuration (production) # Server configuration server: port: 3000 host: 0.0.0.0 log_level: info cors_origin: "https://dexorder.ai" base_url: https://dexorder.ai trusted_origins: - https://dexorder.ai # Default model (if user has no preference) defaults: model_provider: deepinfra model: zai-org/GLM-5 # License tier model configuration license_models: # Free tier models free: default: zai-org/GLM-5 cost_optimized: zai-org/GLM-5 complex: zai-org/GLM-5 allowed_models: - zai-org/GLM-5 # Pro tier models pro: default: zai-org/GLM-5 cost_optimized: zai-org/GLM-5 complex: zai-org/GLM-5 blocked_models: - Qwen/Qwen3-235B-A22B-Instruct-2507 # Enterprise tier models enterprise: default: zai-org/GLM-5 cost_optimized: zai-org/GLM-5 complex: Qwen/Qwen3-235B-A22B-Instruct-2507 # Kubernetes configuration kubernetes: namespace: sandbox service_namespace: ai in_cluster: true sandbox_image: git.dxod.org/dexorder/dexorder/ai-sandbox:bbddd58 sidecar_image: git.dxod.org/dexorder/dexorder/ai-lifecycle-sidecar:latest image_pull_policy: Always storage_class: ceph-block # DragonflyDB (Redis-compatible, for hot storage and session management) redis: url: redis://dragonfly:6379 # Qdrant (for RAG vector search) qdrant: url: http://qdrant:6333 collection: gateway_memory # Agent configuration agent: # Number of prior conversation turns loaded as LLM context and flushed to Iceberg at session end conversation_history_limit: 20 # Iceberg (for durable storage via REST catalog) iceberg: catalog_uri: http://iceberg-catalog:8181 namespace: gateway ohlc_catalog_uri: http://iceberg-catalog:8181 ohlc_namespace: trading s3_endpoint: http://minio:9000 conversations_bucket: warehouse # Event router (ZeroMQ) events: router_bind: tcp://*:5571 # Embeddings (for RAG vector search) embedding: provider: ollama model: all-minilm ollama_url: http://ollama:11434 # Email service configuration email: from_address: noreply@dexorder.ai