#!/usr/bin/env bash set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" # Colors RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color usage() { echo "Usage: $0 [COMMAND]" echo "" echo "Manage the minikube development environment" echo "" echo "Commands:" echo " start Start minikube and deploy all services" echo " stop [--keep-data] Stop minikube (deletes PVCs by default)" echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web|client-py)" echo " deep-restart [svc] Restart StatefulSet(s) and delete their PVCs (kafka|postgres|minio|qdrant|all)" echo " rebuild [svc] Rebuild all custom images, or just one" echo " deploy [svc] Deploy/update all services, or just one" echo " delete-pvcs [svc] Delete PVCs for specific service or all (kafka|postgres|minio|qdrant|all)" echo " status Show status of all services" echo " logs Tail logs for a service" echo " shell Open a shell in a service pod" echo " clean Delete all resources and volumes" echo " tunnel Start minikube tunnel (for LoadBalancer access)" echo "" echo "Examples:" echo " $0 start # Start minikube and deploy everything" echo " $0 stop # Stop minikube and delete PVCs" echo " $0 stop --keep-data # Stop minikube but keep PVCs" echo " $0 deep-restart postgres # Restart postgres with fresh storage" echo " $0 delete-pvcs kafka # Delete kafka PVCs only" echo " $0 rebuild # Rebuild all custom images" echo " $0 logs relay # Tail logs for relay service" echo " $0 shell ingestor # Open shell in ingestor pod" exit 1 } COMMAND="${1:-start}" check_minikube() { if ! command -v minikube &> /dev/null; then echo -e "${RED}Error: minikube not found. Please install minikube first.${NC}" echo "https://minikube.sigs.k8s.io/docs/start/" exit 1 fi } check_kubectl() { if ! command -v kubectl &> /dev/null; then echo -e "${RED}Error: kubectl not found. Please install kubectl first.${NC}" exit 1 fi } start_minikube() { echo -e "${BLUE}Starting minikube...${NC}" if minikube status &> /dev/null; then echo -e "${GREEN}✓ Minikube already running${NC}" else minikube start --cpus=6 --memory=12g --driver=docker echo -e "${GREEN}✓ Minikube started${NC}" fi # Enable ingress addon echo -e "${BLUE}Enabling ingress addon...${NC}" minikube addons enable ingress # Wait for ingress webhook to be ready echo -e "${BLUE}Waiting for ingress webhook to be ready...${NC}" kubectl wait --namespace ingress-nginx \ --for=condition=ready pod \ --selector=app.kubernetes.io/component=controller \ --timeout=120s 2>/dev/null || echo -e "${YELLOW}⚠️ Ingress controller not ready yet${NC}" # Give webhook endpoint a moment to start listening sleep 5 echo -e "${GREEN}✓ Ingress enabled${NC}" # Set docker environment echo -e "${YELLOW}Setting docker environment to minikube...${NC}" eval $(minikube docker-env) echo -e "${GREEN}✓ Docker environment set${NC}" # Add /etc/hosts entry MINIKUBE_IP=$(minikube ip) if ! grep -q "dexorder.local" /etc/hosts; then echo -e "${YELLOW}Adding dexorder.local to /etc/hosts (requires sudo)...${NC}" echo "$MINIKUBE_IP dexorder.local" | sudo tee -a /etc/hosts else echo -e "${GREEN}✓ /etc/hosts entry exists${NC}" fi } rebuild_images() { local service="${1:-all}" echo -e "${BLUE}Building custom images...${NC}" # Use minikube's docker daemon eval $(minikube docker-env) # Build images using the standard bin/build script with dev flag cd "$ROOT_DIR" # Load existing tags so we preserve whichever services we're not rebuilding if [ -f "$ROOT_DIR/.dev-image-tag" ]; then source "$ROOT_DIR/.dev-image-tag" fi # Helper: run build, show output, and return just the dev tag via stdout # bin/build now outputs the tag on its last line to stderr build_and_get_tag() { local svc="$1" local output local tag # Capture stderr (which contains both output and the tag) output=$("$SCRIPT_DIR/build" "$svc" dev 2>&1) || { echo "$output" >&2; return 1; } # Show the build output (excluding the final tag line) echo "$output" | head -n -1 >&2 # Return just the tag (last line) tag=$(echo "$output" | tail -n 1) echo "$tag" } if [ "$service" == "all" ] || [ "$service" == "relay" ]; then echo -e "${GREEN}→${NC} Building relay..." RELAY_TAG=$(build_and_get_tag relay) || exit 1 docker tag "dexorder/ai-relay:$RELAY_TAG" "dexorder/relay:$RELAY_TAG" fi if [ "$service" == "all" ] || [ "$service" == "ingestor" ]; then echo -e "${GREEN}→${NC} Building ingestor..." INGEST_TAG=$(build_and_get_tag ingestor) || exit 1 docker tag "dexorder/ai-ingestor:$INGEST_TAG" "dexorder/ingestor:$INGEST_TAG" fi if [ "$service" == "all" ] || [ "$service" == "flink" ]; then echo -e "${GREEN}→${NC} Building flink..." FLINK_TAG=$(build_and_get_tag flink) || exit 1 docker tag "dexorder/ai-flink:$FLINK_TAG" "dexorder/flink:$FLINK_TAG" fi # Build gateway (Node.js application) if [ "$service" == "all" ] || [ "$service" == "gateway" ]; then echo -e "${GREEN}→${NC} Building gateway..." GATEWAY_TAG=$(build_and_get_tag gateway) || exit 1 docker tag "dexorder/ai-gateway:$GATEWAY_TAG" "dexorder/gateway:$GATEWAY_TAG" fi # Build lifecycle-sidecar (Go binary) if [ "$service" == "all" ] || [ "$service" == "lifecycle-sidecar" ] || [ "$service" == "sidecar" ]; then echo -e "${GREEN}→${NC} Building lifecycle-sidecar..." SIDECAR_TAG=$(build_and_get_tag lifecycle-sidecar) || exit 1 docker tag "dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG" "lifecycle-sidecar:$SIDECAR_TAG" fi # Build web (Vue.js application) if [ "$service" == "all" ] || [ "$service" == "web" ]; then echo -e "${GREEN}→${NC} Building web..." WEB_TAG=$(build_and_get_tag web) || exit 1 fi # Build client-py (Python client library) if [ "$service" == "all" ] || [ "$service" == "client-py" ]; then echo -e "${GREEN}→${NC} Building client-py..." CLIENT_PY_TAG=$(build_and_get_tag client-py) || exit 1 fi # Save the tags for deployment (all services, preserving any we didn't rebuild) echo "RELAY_TAG=$RELAY_TAG" > "$ROOT_DIR/.dev-image-tag" echo "INGEST_TAG=$INGEST_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "FLINK_TAG=$FLINK_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "GATEWAY_TAG=$GATEWAY_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "SIDECAR_TAG=$SIDECAR_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "WEB_TAG=$WEB_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "CLIENT_PY_TAG=$CLIENT_PY_TAG" >> "$ROOT_DIR/.dev-image-tag" echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG, client-py=$CLIENT_PY_TAG${NC}" } deploy_services() { echo -e "${BLUE}Deploying services to minikube...${NC}" cd "$ROOT_DIR/deploy/k8s/dev" # Get the dev image tags if [ -f "$ROOT_DIR/.dev-image-tag" ]; then source "$ROOT_DIR/.dev-image-tag" echo -e "${BLUE}Using image tags:${NC}" echo -e " Relay: $RELAY_TAG" echo -e " Ingestor: $INGEST_TAG" echo -e " Flink: $FLINK_TAG" echo -e " Gateway: $GATEWAY_TAG" echo -e " Web: $WEB_TAG" else echo -e "${YELLOW}⚠️ No dev tags found. Using 'latest'. Run rebuild first.${NC}" RELAY_TAG="latest" INGEST_TAG="latest" FLINK_TAG="latest" GATEWAY_TAG="latest" WEB_TAG="latest" fi # Create secrets first (if they exist) echo -e "${GREEN}→${NC} Checking secrets..." if ls secrets/*.yaml &> /dev/null; then "$SCRIPT_DIR/secret-update" dev || echo -e "${YELLOW} (Some secrets missing - copy from .example files)${NC}" else echo -e "${YELLOW}⚠️ No secrets found. Copy from .example files:${NC}" echo -e "${YELLOW} cd deploy/k8s/dev/secrets${NC}" echo -e "${YELLOW} cp ai-secrets.yaml.example ai-secrets.yaml${NC}" echo -e "${YELLOW} # Edit with actual values, then run: bin/secret-update dev${NC}" fi # Update configs echo -e "${GREEN}→${NC} Updating configs..." "$SCRIPT_DIR/config-update" dev # Create a temporary kustomization overlay with image tags echo -e "${GREEN}→${NC} Setting image tags in kustomization..." cat >> kustomization.yaml </dev/null || echo -e "${YELLOW}(Some deployments not ready yet)${NC}" # Initialize gateway database schema echo -e "${BLUE}Initializing gateway database schema...${NC}" echo -e "${GREEN}→${NC} Waiting for postgres..." kubectl wait --for=condition=ready --timeout=120s pod -l app=postgres 2>/dev/null || { echo -e "${YELLOW}⚠️ Postgres not ready yet${NC}" } pg_pod=$(kubectl get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -n "$pg_pod" ]; then # Wait for postgres to actually be ready to accept connections echo -e "${GREEN}→${NC} Verifying postgres is ready to accept connections..." for i in {1..30}; do if kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "SELECT 1;" > /dev/null 2>&1; then echo -e "${GREEN}✓ Postgres ready${NC}" break fi if [ $i -eq 30 ]; then echo -e "${RED}✗ Postgres not ready after 30 seconds${NC}" exit 1 fi sleep 1 done table_count=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'user';" 2>/dev/null | tr -d ' ') if [ "$table_count" = "1" ]; then echo -e "${GREEN}✓ Gateway schema already exists${NC}" else echo -e "${GREEN}→${NC} Applying gateway schema..." if kubectl exec -i "$pg_pod" -- psql -U postgres -d iceberg < "$ROOT_DIR/gateway/schema.sql" > /dev/null 2>&1; then # Verify schema was actually created sleep 1 table_count=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'user';" 2>/dev/null | tr -d ' ') if [ "$table_count" = "1" ]; then echo -e "${GREEN}✓ Gateway schema initialized${NC}" else echo -e "${RED}✗ Failed to verify schema creation${NC}" exit 1 fi else echo -e "${RED}✗ Failed to initialize gateway schema${NC}" exit 1 fi fi # Create dev user (refactored into reusable function) create_dev_user fi echo "" echo -e "${GREEN}✓ Dev environment ready!${NC}" echo "" echo -e "${BLUE}Access the application:${NC}" echo -e " Web UI: http://dexorder.local/" echo -e " Backend WS: ws://dexorder.local/ws" echo "" echo -e "${BLUE}Admin UIs (use port-forward):${NC}" echo -e " Flink UI: kubectl port-forward svc/flink-jobmanager 8081:8081" echo -e " Then open http://localhost:8081" echo -e " MinIO Console: kubectl port-forward svc/minio 9001:9001" echo -e " Then open http://localhost:9001" echo "" echo -e "${YELLOW}Note: Run 'minikube tunnel' in another terminal for dexorder.local ingress to work${NC}" } create_dev_user() { # Dev user configuration (single source of truth) local DEV_EMAIL="tim@dexorder.ai" local DEV_PASSWORD="test1234" local DEV_NAME="Tim" local LICENSE_TYPE="pro" echo -e "${BLUE}Initializing dev user...${NC}" # Find postgres pod local pg_pod=$(kubectl get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$pg_pod" ]; then echo -e "${YELLOW}⚠️ Postgres pod not found${NC}" return 1 fi # Check if user already exists echo -e "${GREEN}→${NC} Checking for dev user..." local user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = '$DEV_EMAIL';" 2>/dev/null | tr -d ' ') if [ -n "$user_id" ]; then echo -e "${GREEN}✓ Dev user already exists ($DEV_EMAIL)${NC}" else echo -e "${GREEN}→${NC} Creating dev user via Better Auth API..." echo -e "${BLUE}Waiting for gateway to be ready...${NC}" kubectl wait --for=condition=available --timeout=120s deployment/gateway 2>/dev/null || { echo -e "${YELLOW}⚠️ Gateway not ready after 120s${NC}" } # Give gateway a few seconds to start accepting requests sleep 5 # Create user via custom auth endpoint local response=$(curl -s -w "\n%{http_code}" -X POST "http://dexorder.local/api/auth/register" \ -H "Content-Type: application/json" \ -d '{ "email": "'"$DEV_EMAIL"'", "password": "'"$DEV_PASSWORD"'", "name": "'"$DEV_NAME"'" }' 2>&1) local http_code=$(echo "$response" | tail -n1) if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then echo -e "${GREEN}✓ User created via auth API${NC}" elif [ "$http_code" = "400" ]; then echo -e "${YELLOW}⚠️ User may already exist (status 400)${NC}" else echo -e "${YELLOW}⚠️ API call returned status $http_code${NC}" local body=$(echo "$response" | head -n -1) echo -e "${YELLOW}Response: $body${NC}" fi # Wait a moment for database to be updated sleep 2 # Check again if user exists now user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = '$DEV_EMAIL';" 2>/dev/null | tr -d ' ') if [ -n "$user_id" ]; then echo -e "${GREEN}✓ Dev user confirmed in database${NC}" fi fi if [ -n "$user_id" ]; then # Create/update license for the user echo -e "${GREEN}→${NC} Creating $LICENSE_TYPE license for dev user..." kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c " INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url, features, resource_limits, preferred_model) VALUES ( '$user_id', '$DEV_EMAIL', '$LICENSE_TYPE', 'http://localhost:8080/mcp', '{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true}', '{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60}', '{\"provider\":\"anthropic\",\"model\":\"claude-3-5-sonnet-20241022\",\"temperature\":0.7}' ) ON CONFLICT (user_id) DO UPDATE SET license_type = EXCLUDED.license_type, features = EXCLUDED.features, resource_limits = EXCLUDED.resource_limits, preferred_model = EXCLUDED.preferred_model, updated_at = NOW(); " > /dev/null 2>&1 echo -e "${GREEN}✓ Dev user ready ($DEV_EMAIL / $DEV_PASSWORD)${NC}" else echo -e "${YELLOW}⚠️ Could not create dev user (gateway may not be ready)${NC}" return 1 fi } show_status() { echo -e "${BLUE}Kubernetes Resources:${NC}" echo "" kubectl get pods,svc,ingress } show_logs() { local service="$1" if [ -z "$service" ]; then echo -e "${RED}Error: Please specify a service name${NC}" echo "Available services: relay, ingestor, flink-jobmanager, flink-taskmanager, kafka, postgres, minio, iceberg-catalog" exit 1 fi # Try to find pod by label or name local pod=$(kubectl get pods -l app="$service" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$pod" ]; then pod=$(kubectl get pods | grep "$service" | head -n1 | awk '{print $1}') fi if [ -z "$pod" ]; then echo -e "${RED}Error: No pod found for service '$service'${NC}" exit 1 fi echo -e "${BLUE}Tailing logs for $pod...${NC}" kubectl logs -f "$pod" } open_shell() { local service="$1" if [ -z "$service" ]; then echo -e "${RED}Error: Please specify a service name${NC}" exit 1 fi local pod=$(kubectl get pods -l app="$service" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$pod" ]; then pod=$(kubectl get pods | grep "$service" | head -n1 | awk '{print $1}') fi if [ -z "$pod" ]; then echo -e "${RED}Error: No pod found for service '$service'${NC}" exit 1 fi echo -e "${BLUE}Opening shell in $pod...${NC}" kubectl exec -it "$pod" -- /bin/sh || kubectl exec -it "$pod" -- /bin/bash } delete_pvcs() { local service="${1:-all}" echo -e "${BLUE}Deleting PVCs for: $service${NC}" case "$service" in kafka) kubectl delete pvc -l app=kafka || true ;; postgres) kubectl delete pvc -l app=postgres || true ;; minio) kubectl delete pvc -l app=minio || true ;; qdrant) kubectl delete pvc -l app=qdrant || true ;; all) echo -e "${YELLOW}Deleting all StatefulSet PVCs...${NC}" kubectl delete pvc -l app=kafka 2>/dev/null || true kubectl delete pvc -l app=postgres 2>/dev/null || true kubectl delete pvc -l app=minio 2>/dev/null || true kubectl delete pvc -l app=qdrant 2>/dev/null || true ;; *) echo -e "${RED}Error: Unknown service '$service'${NC}" echo "Valid services: kafka, postgres, minio, qdrant, all" exit 1 ;; esac echo -e "${GREEN}✓ PVCs deleted${NC}" } deep_restart() { local service="${1:-all}" echo -e "${BLUE}Deep restart for: $service${NC}" echo -e "${YELLOW}This will delete the StatefulSet(s) and their PVCs, then redeploy.${NC}" case "$service" in kafka) echo -e "${GREEN}→${NC} Deleting kafka StatefulSet..." kubectl delete statefulset kafka || true sleep 2 delete_pvcs kafka ;; postgres) echo -e "${GREEN}→${NC} Deleting postgres StatefulSet..." kubectl delete statefulset postgres || true sleep 2 delete_pvcs postgres # Force restart iceberg-catalog since it depends on postgres echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on postgres)..." kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true ;; minio) echo -e "${GREEN}→${NC} Deleting minio StatefulSet..." kubectl delete statefulset minio || true sleep 2 delete_pvcs minio # Force restart iceberg-catalog since it depends on minio echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on minio)..." kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true ;; qdrant) echo -e "${GREEN}→${NC} Deleting qdrant StatefulSet..." kubectl delete statefulset qdrant || true sleep 2 delete_pvcs qdrant ;; all) echo -e "${GREEN}→${NC} Deleting all StatefulSets..." kubectl delete statefulset kafka postgres minio qdrant || true sleep 2 delete_pvcs all # Force restart iceberg-catalog since it depends on postgres and minio echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on postgres/minio)..." kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true ;; *) echo -e "${RED}Error: Unknown service '$service'${NC}" echo "Valid services: kafka, postgres, minio, qdrant, all" exit 1 ;; esac echo -e "${GREEN}→${NC} Redeploying services..." deploy_services # Note: deploy_services already calls create_dev_user, so no need to call it again here echo -e "${GREEN}✓ Deep restart complete${NC}" } clean_all() { echo -e "${RED}⚠️ WARNING: This will delete all resources and volumes!${NC}" read -p "Are you sure? (yes/no): " confirm if [[ "$confirm" != "yes" ]]; then echo "Aborted." exit 0 fi echo -e "${BLUE}Deleting all resources...${NC}" kubectl delete -k deploy/k8s/dev/ || true kubectl delete pvc --all || true echo -e "${GREEN}✓ Resources deleted${NC}" } start_tunnel() { echo -e "${BLUE}Starting minikube tunnel...${NC}" echo -e "${YELLOW}This requires sudo and will run in the foreground.${NC}" echo -e "${YELLOW}Press Ctrl+C to stop.${NC}" echo "" minikube tunnel } # Deploy a single service by re-applying full kustomize (ensures patches are applied) deploy_service() { local service="$1" if [ -f "$ROOT_DIR/.dev-image-tag" ]; then source "$ROOT_DIR/.dev-image-tag" fi echo -e "${GREEN}→${NC} Deploying $service (via kustomize)..." # Re-apply full kustomize with image tags properly set # This ensures all patches (including imagePullPolicy) are properly applied cd "$ROOT_DIR/deploy/k8s/dev" # Create a temporary kustomization overlay with image tags cat >> kustomization.yaml </dev/null || true # Wait for pods to terminate echo -e "${GREEN}→${NC} Waiting for pods to terminate..." kubectl wait --for=delete pod -l app=kafka --timeout=60s 2>/dev/null || true kubectl wait --for=delete pod -l app=postgres --timeout=60s 2>/dev/null || true kubectl wait --for=delete pod -l app=minio --timeout=60s 2>/dev/null || true kubectl wait --for=delete pod -l app=qdrant --timeout=60s 2>/dev/null || true # Now delete PVCs delete_pvcs all minikube stop echo -e "${GREEN}✓ Minikube stopped and PVCs deleted${NC}" echo -e "${YELLOW}Tip: Use 'bin/dev stop --keep-data' to preserve PVCs${NC}" fi ;; restart) shift # Remove 'restart' from args if [ $# -eq 0 ]; then # No services specified, restart all rebuild_images deploy_services else # Multiple services specified for service in "$@"; do rebuild_images "$service" deploy_service "$service" done fi ;; rebuild) rebuild_images "${2:-}" ;; deploy) if [ -n "$2" ]; then deploy_service "$2" else deploy_services fi ;; status) show_status ;; logs) show_logs "$2" ;; shell) open_shell "$2" ;; clean) clean_all ;; deep-restart) deep_restart "${2:-all}" ;; delete-pvcs) delete_pvcs "${2:-all}" ;; tunnel) start_tunnel ;; *) usage ;; esac