#!/usr/bin/env bash
set -e

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"

# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

usage() {
    echo "Usage: $0 [COMMAND]"
    echo ""
    echo "Manage the minikube development environment"
    echo ""
    echo "Commands:"
    echo "  start              Start minikube and deploy all services"
    echo "  stop [--keep-data] Stop minikube (deletes PVCs by default)"
    echo "  restart [svc]      Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web|sandbox)"
    echo "  deep-restart [svc] Restart StatefulSet(s) and delete their PVCs (kafka|postgres|minio|all)"
    echo "  rebuild [svc]      Rebuild all custom images, or just one"
    echo "  deploy  [svc]      Deploy/update all services, or just one"
    echo "  delete-pvcs [svc]  Delete PVCs for specific service or all (kafka|postgres|minio|all)"
    echo "  status             Show status of all services"
    echo "  logs               Tail logs for a service"
    echo "  shell              Open a shell in a service pod"
    echo "  clean              Delete all resources and volumes"
    echo "  tunnel             Start minikube tunnel (for LoadBalancer access)"
    echo ""
    echo "Examples:"
    echo "  $0 start                    # Start minikube and deploy everything"
    echo "  $0 stop                     # Stop minikube and delete PVCs"
    echo "  $0 stop --keep-data         # Stop minikube but keep PVCs"
    echo "  $0 deep-restart postgres    # Restart postgres with fresh storage"
    echo "  $0 delete-pvcs kafka        # Delete kafka PVCs only"
    echo "  $0 rebuild                  # Rebuild all custom images"
    echo "  $0 logs relay               # Tail logs for relay service"
    echo "  $0 shell ingestor           # Open shell in ingestor pod"
    exit 1
}

COMMAND="${1:-start}"

check_minikube() {
    if ! command -v minikube &> /dev/null; then
        echo -e "${RED}Error: minikube not found. Please install minikube first.${NC}"
        echo "https://minikube.sigs.k8s.io/docs/start/"
        exit 1
    fi
}

check_kubectl() {
    if ! command -v kubectl &> /dev/null; then
        echo -e "${RED}Error: kubectl not found. Please install kubectl first.${NC}"
        exit 1
    fi
}

start_minikube() {
    echo -e "${BLUE}Starting minikube...${NC}"

    if minikube status &> /dev/null; then
        echo -e "${GREEN}✓ Minikube already running${NC}"
    else
        minikube start --cpus=6 --memory=12g --driver=docker
        echo -e "${GREEN}✓ Minikube started${NC}"
    fi

    # Enable ingress addon
    echo -e "${BLUE}Enabling ingress addon...${NC}"
    minikube addons enable ingress

    # Wait for ingress webhook to be ready
    echo -e "${BLUE}Waiting for ingress webhook to be ready...${NC}"
    kubectl wait --namespace ingress-nginx \
      --for=condition=ready pod \
      --selector=app.kubernetes.io/component=controller \
      --timeout=120s 2>/dev/null || echo -e "${YELLOW}⚠️  Ingress controller not ready yet${NC}"

    # Give webhook endpoint a moment to start listening
    sleep 5
    echo -e "${GREEN}✓ Ingress enabled${NC}"

    # Set docker environment
    echo -e "${YELLOW}Setting docker environment to minikube...${NC}"
    eval $(minikube docker-env)
    echo -e "${GREEN}✓ Docker environment set${NC}"

    # Add /etc/hosts entry
    MINIKUBE_IP=$(minikube ip)
    if ! grep -q "dexorder.local" /etc/hosts; then
        echo -e "${YELLOW}Adding dexorder.local to /etc/hosts (requires sudo)...${NC}"
        echo "$MINIKUBE_IP  dexorder.local" | sudo tee -a /etc/hosts
    else
        echo -e "${GREEN}✓ /etc/hosts entry exists${NC}"
    fi
}

generate_gateway_config_dev() {
    sed "s|SANDBOX_IMAGE_TAG|dexorder/ai-sandbox:$SANDBOX_TAG|g; s|SIDECAR_IMAGE_TAG|dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG|g" \
        "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml.tpl" \
        > "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
}

rebuild_images() {
    local service="${1:-all}"
    echo -e "${BLUE}Building custom images...${NC}"

    # Use minikube's docker daemon
    eval $(minikube docker-env)

    # Build images using the standard bin/build script with dev flag
    cd "$ROOT_DIR"

    # Load existing tags so we preserve whichever services we're not rebuilding
    if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
        source "$ROOT_DIR/.dev-image-tag"
    fi

    # Helper: run build, show output, and return just the dev tag via stdout
    # bin/build now outputs the tag on its last line to stderr
    build_and_get_tag() {
        local svc="$1"
        local output
        local tag
        # Capture stderr (which contains both output and the tag)
        output=$("$SCRIPT_DIR/build" "$svc" dev 2>&1) || { echo "$output" >&2; return 1; }
        # Show the build output (excluding the final tag line)
        echo "$output" | head -n -1 >&2
        # Return just the tag (last line)
        tag=$(echo "$output" | tail -n 1)
        echo "$tag"
    }

    if [ "$service" == "all" ] || [ "$service" == "relay" ]; then
        echo -e "${GREEN}→${NC} Building relay..."
        RELAY_TAG=$(build_and_get_tag relay) || exit 1
    fi

    if [ "$service" == "all" ] || [ "$service" == "ingestor" ]; then
        echo -e "${GREEN}→${NC} Building ingestor..."
        INGEST_TAG=$(build_and_get_tag ingestor) || exit 1
    fi

    if [ "$service" == "all" ] || [ "$service" == "flink" ]; then
        echo -e "${GREEN}→${NC} Building flink..."
        FLINK_TAG=$(build_and_get_tag flink) || exit 1
    fi

    # Build gateway (Node.js application)
    if [ "$service" == "all" ] || [ "$service" == "gateway" ]; then
        echo -e "${GREEN}→${NC} Building gateway..."
        GATEWAY_TAG=$(build_and_get_tag gateway) || exit 1
    fi

    # Build lifecycle-sidecar (Go binary)
    if [ "$service" == "all" ] || [ "$service" == "lifecycle-sidecar" ] || [ "$service" == "sidecar" ]; then
        echo -e "${GREEN}→${NC} Building lifecycle-sidecar..."
        _SIDECAR_BUILD_TAG=$(build_and_get_tag lifecycle-sidecar) || exit 1
        docker tag dexorder/ai-lifecycle-sidecar:$_SIDECAR_BUILD_TAG dexorder/ai-lifecycle-sidecar:dev
        SIDECAR_TAG=$_SIDECAR_BUILD_TAG
    fi

    # Build web (Vue.js application)
    if [ "$service" == "all" ] || [ "$service" == "web" ]; then
        echo -e "${GREEN}→${NC} Building web..."
        WEB_TAG=$(build_and_get_tag web) || exit 1
    fi

    # Build sandbox (Python client library)
    if [ "$service" == "all" ] || [ "$service" == "sandbox" ]; then
        echo -e "${GREEN}→${NC} Building sandbox..."
        _SANDBOX_BUILD_TAG=$(build_and_get_tag sandbox) || exit 1
        docker tag dexorder/ai-sandbox:$_SANDBOX_BUILD_TAG dexorder/ai-sandbox:dev
        SANDBOX_TAG=$_SANDBOX_BUILD_TAG
    fi

    # Save the tags for deployment (all services, preserving any we didn't rebuild)
    echo "RELAY_TAG=$RELAY_TAG" > "$ROOT_DIR/.dev-image-tag"
    echo "INGEST_TAG=$INGEST_TAG" >> "$ROOT_DIR/.dev-image-tag"
    echo "FLINK_TAG=$FLINK_TAG" >> "$ROOT_DIR/.dev-image-tag"
    echo "GATEWAY_TAG=$GATEWAY_TAG" >> "$ROOT_DIR/.dev-image-tag"
    echo "SIDECAR_TAG=$SIDECAR_TAG" >> "$ROOT_DIR/.dev-image-tag"
    echo "WEB_TAG=$WEB_TAG" >> "$ROOT_DIR/.dev-image-tag"
    echo "SANDBOX_TAG=$SANDBOX_TAG" >> "$ROOT_DIR/.dev-image-tag"

    echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG, sandbox=$SANDBOX_TAG${NC}"
}

deploy_services() {
    echo -e "${BLUE}Deploying services to minikube...${NC}"

    cd "$ROOT_DIR/deploy/k8s/dev"

    # Get the dev image tags
    if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
        source "$ROOT_DIR/.dev-image-tag"
        echo -e "${BLUE}Using image tags:${NC}"
        echo -e "  Relay: $RELAY_TAG"
        echo -e "  Ingestor: $INGEST_TAG"
        echo -e "  Flink: $FLINK_TAG"
        echo -e "  Gateway: $GATEWAY_TAG"
        echo -e "  Web: $WEB_TAG"
    else
        echo -e "${YELLOW}⚠️  No dev tags found. Using 'latest'. Run rebuild first.${NC}"
        RELAY_TAG="latest"
        INGEST_TAG="latest"
        FLINK_TAG="latest"
        GATEWAY_TAG="latest"
        WEB_TAG="latest"
    fi

    # Create secrets first (if they exist)
    echo -e "${GREEN}→${NC} Checking secrets..."
    if ls secrets/*.yaml &> /dev/null; then
        "$SCRIPT_DIR/secret-update" dev || echo -e "${YELLOW}  (Some secrets missing - copy from .example files)${NC}"
    else
        echo -e "${YELLOW}⚠️  No secrets found. Copy from .example files:${NC}"
        echo -e "${YELLOW}  cd deploy/k8s/dev/secrets${NC}"
        echo -e "${YELLOW}  cp ai-secrets.yaml.example ai-secrets.yaml${NC}"
        echo -e "${YELLOW}  # Edit with actual values, then run: bin/secret-update dev${NC}"
    fi

    # Update configs
    echo -e "${GREEN}→${NC} Updating configs..."

    generate_gateway_config_dev

    "$SCRIPT_DIR/config-update" dev

    # Create a temporary kustomization overlay with image tags
    echo -e "${GREEN}→${NC} Setting image tags in kustomization..."
    cat >> kustomization.yaml <<EOF
# Image tags (added by bin/dev)
images:
  - name: dexorder/ai-relay
    newTag: $RELAY_TAG
  - name: dexorder/ai-ingestor
    newTag: $INGEST_TAG
  - name: dexorder/ai-flink
    newTag: $FLINK_TAG
  - name: dexorder/ai-gateway
    newTag: $GATEWAY_TAG
  - name: dexorder/ai-web
    newTag: $WEB_TAG
  - name: dexorder/ai-sandbox
    newTag: $SANDBOX_TAG
  - name: dexorder/ai-lifecycle-sidecar
    newTag: $SIDECAR_TAG
EOF

    # Apply kustomize
    echo -e "${GREEN}→${NC} Applying Kubernetes manifests..."
    kubectl apply -k .

    # Apply sandbox-namespace secrets (must be after kustomize creates the sandbox namespace)
    echo -e "${GREEN}→${NC} Applying sandbox secrets..."
    if [ -f "$ROOT_DIR/deploy/k8s/dev/secrets/sandbox-secrets.yaml" ]; then
        kubectl apply -f "$ROOT_DIR/deploy/k8s/dev/secrets/sandbox-secrets.yaml"
    fi

    # Clean up the appended image tags from kustomization.yaml
    sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml

    echo -e "${GREEN}✓ Services deployed${NC}"

    echo ""
    echo -e "${BLUE}Waiting for deployments to be ready...${NC}"
    kubectl wait --for=condition=available --timeout=300s \
        deployment/relay \
        deployment/ingestor \
        deployment/iceberg-catalog \
        deployment/flink-jobmanager \
        deployment/flink-taskmanager \
        2>/dev/null || echo -e "${YELLOW}(Some deployments not ready yet)${NC}"

    # Initialize schema and dev user
    "$SCRIPT_DIR/init" dev

    echo ""
    echo -e "${GREEN}✓ Dev environment ready!${NC}"
    echo ""
    echo -e "${BLUE}Access the application:${NC}"
    echo -e "  Web UI:        http://dexorder.local/"
    echo -e "  Backend WS:    ws://dexorder.local/ws"
    echo ""
    echo -e "${BLUE}Admin UIs (use port-forward):${NC}"
    echo -e "  Flink UI:      kubectl port-forward svc/flink-jobmanager 8081:8081"
    echo -e "                 Then open http://localhost:8081"
    echo -e "  MinIO Console: kubectl port-forward svc/minio 9001:9001"
    echo -e "                 Then open http://localhost:9001"
    echo ""
    echo -e "${YELLOW}Note: Run 'minikube tunnel' in another terminal for dexorder.local ingress to work${NC}"
}

create_dev_user() {
    # Dev user configuration (single source of truth)
    local DEV_EMAIL="tim@dexorder.ai"
    local DEV_PASSWORD="test1234"
    local DEV_NAME="Tim"
    local LICENSE_TYPE="pro"

    echo -e "${BLUE}Initializing dev user...${NC}"

    # Find postgres pod
    local pg_pod=$(kubectl get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
    if [ -z "$pg_pod" ]; then
        echo -e "${YELLOW}⚠️  Postgres pod not found${NC}"
        return 1
    fi

    # Check if user already exists
    echo -e "${GREEN}→${NC} Checking for dev user..."
    local user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = '$DEV_EMAIL';" 2>/dev/null | tr -d ' ')

    if [ -n "$user_id" ]; then
        echo -e "${GREEN}✓ Dev user already exists ($DEV_EMAIL)${NC}"
    else
        echo -e "${GREEN}→${NC} Creating dev user via Better Auth API..."
        echo -e "${BLUE}Waiting for gateway to be ready...${NC}"
        kubectl wait --for=condition=available --timeout=120s deployment/gateway 2>/dev/null || {
            echo -e "${YELLOW}⚠️  Gateway not ready after 120s${NC}"
        }

        # Give gateway a few seconds to start accepting requests
        sleep 5

        # Create user via custom auth endpoint
        local response=$(curl -s -w "\n%{http_code}" -X POST "http://dexorder.local/api/auth/register" \
            -H "Content-Type: application/json" \
            -d '{
              "email": "'"$DEV_EMAIL"'",
              "password": "'"$DEV_PASSWORD"'",
              "name": "'"$DEV_NAME"'"
            }' 2>&1)

        local http_code=$(echo "$response" | tail -n1)
        if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then
            echo -e "${GREEN}✓ User created via auth API${NC}"
        elif [ "$http_code" = "400" ]; then
            echo -e "${YELLOW}⚠️  User may already exist (status 400)${NC}"
        else
            echo -e "${YELLOW}⚠️  API call returned status $http_code${NC}"
            local body=$(echo "$response" | head -n -1)
            echo -e "${YELLOW}Response: $body${NC}"
        fi

        # Wait a moment for database to be updated
        sleep 2

        # Check again if user exists now
        user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = '$DEV_EMAIL';" 2>/dev/null | tr -d ' ')

        if [ -n "$user_id" ]; then
            echo -e "${GREEN}✓ Dev user confirmed in database${NC}"
        fi
    fi

    if [ -n "$user_id" ]; then
        # Create/update license for the user
        echo -e "${GREEN}→${NC} Creating $LICENSE_TYPE license for dev user..."
        kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "
            INSERT INTO user_licenses (user_id, email, license, mcp_server_url)
            VALUES (
                '$user_id',
                '$DEV_EMAIL',
                '{\"licenseType\":\"$LICENSE_TYPE\",\"features\":{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true},\"resourceLimits\":{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60},\"k8sResources\":{\"memoryRequest\":\"512Mi\",\"memoryLimit\":\"2Gi\",\"cpuRequest\":\"250m\",\"cpuLimit\":\"2000m\",\"storage\":\"10Gi\",\"tmpSizeLimit\":\"256Mi\",\"enableIdleShutdown\":true,\"idleTimeoutMinutes\":60},\"preferredModel\":{\"provider\":\"anthropic\",\"model\":\"claude-sonnet-4-6\",\"temperature\":0.7}}',
                'http://localhost:8080/mcp'
            )
            ON CONFLICT (user_id) DO UPDATE SET
                license = EXCLUDED.license,
                updated_at = NOW();
        " > /dev/null 2>&1
        echo -e "${GREEN}✓ Dev user ready ($DEV_EMAIL / $DEV_PASSWORD)${NC}"
    else
        echo -e "${YELLOW}⚠️  Could not create dev user (gateway may not be ready)${NC}"
        return 1
    fi
}

show_status() {
    echo -e "${BLUE}Kubernetes Resources:${NC}"
    echo ""
    kubectl get pods,svc,ingress
}

show_logs() {
    local service="$1"
    if [ -z "$service" ]; then
        echo -e "${RED}Error: Please specify a service name${NC}"
        echo "Available services: relay, ingestor, flink-jobmanager, flink-taskmanager, kafka, postgres, minio, iceberg-catalog"
        exit 1
    fi

    # Try to find pod by label or name
    local pod=$(kubectl get pods -l app="$service" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
    if [ -z "$pod" ]; then
        pod=$(kubectl get pods | grep "$service" | head -n1 | awk '{print $1}')
    fi

    if [ -z "$pod" ]; then
        echo -e "${RED}Error: No pod found for service '$service'${NC}"
        exit 1
    fi

    echo -e "${BLUE}Tailing logs for $pod...${NC}"
    kubectl logs -f "$pod"
}

open_shell() {
    local service="$1"
    if [ -z "$service" ]; then
        echo -e "${RED}Error: Please specify a service name${NC}"
        exit 1
    fi

    local pod=$(kubectl get pods -l app="$service" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
    if [ -z "$pod" ]; then
        pod=$(kubectl get pods | grep "$service" | head -n1 | awk '{print $1}')
    fi

    if [ -z "$pod" ]; then
        echo -e "${RED}Error: No pod found for service '$service'${NC}"
        exit 1
    fi

    echo -e "${BLUE}Opening shell in $pod...${NC}"
    kubectl exec -it "$pod" -- /bin/sh || kubectl exec -it "$pod" -- /bin/bash
}

delete_pvcs() {
    local service="${1:-all}"

    echo -e "${BLUE}Deleting PVCs for: $service${NC}"

    case "$service" in
        kafka)
            kubectl delete pvc -l app=kafka || true
            ;;
        postgres)
            kubectl delete pvc -l app=postgres || true
            ;;
        minio)
            kubectl delete pvc -l app=minio || true
            ;;
        all)
            echo -e "${YELLOW}Deleting all StatefulSet PVCs...${NC}"
            kubectl delete pvc -l app=kafka 2>/dev/null || true
            kubectl delete pvc -l app=postgres 2>/dev/null || true
            kubectl delete pvc -l app=minio 2>/dev/null || true
            ;;
        *)
            echo -e "${RED}Error: Unknown service '$service'${NC}"
            echo "Valid services: kafka, postgres, minio, all"
            exit 1
            ;;
    esac

    echo -e "${GREEN}✓ PVCs deleted${NC}"
}

deep_restart() {
    local service="${1:-all}"

    echo -e "${BLUE}Deep restart for: $service${NC}"
    echo -e "${YELLOW}This will delete the StatefulSet(s) and their PVCs, then redeploy.${NC}"

    case "$service" in
        kafka)
            echo -e "${GREEN}→${NC} Deleting kafka StatefulSet..."
            kubectl delete statefulset kafka || true
            sleep 2
            delete_pvcs kafka
            ;;
        postgres)
            echo -e "${GREEN}→${NC} Deleting postgres StatefulSet..."
            kubectl delete statefulset postgres || true
            sleep 2
            delete_pvcs postgres
            # Force restart iceberg-catalog since it depends on postgres
            echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on postgres)..."
            kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true
            ;;
        minio)
            echo -e "${GREEN}→${NC} Deleting minio StatefulSet..."
            kubectl delete statefulset minio || true
            sleep 2
            delete_pvcs minio
            # Force restart iceberg-catalog since it depends on minio
            echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on minio)..."
            kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true
            ;;
        all)
            echo -e "${GREEN}→${NC} Deleting all StatefulSets..."
            kubectl delete statefulset kafka postgres minio || true
            sleep 2
            delete_pvcs all
            # Force restart iceberg-catalog since it depends on postgres and minio
            echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on postgres/minio)..."
            kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true
            # Remove all sandbox deployments, services, and PVCs to fully reset user state
            echo -e "${GREEN}→${NC} Removing all sandbox deployments, services, and PVCs..."
            kubectl delete deployments,services,pvc --all -n sandbox 2>/dev/null || true
            ;;
        *)
            echo -e "${RED}Error: Unknown service '$service'${NC}"
            echo "Valid services: kafka, postgres, minio, all"
            exit 1
            ;;
    esac

    echo -e "${GREEN}→${NC} Rebuilding application images..."
    rebuild_images

    echo -e "${GREEN}→${NC} Redeploying services..."
    deploy_services

    # Note: deploy_services already calls create_dev_user, so no need to call it again here

    echo -e "${GREEN}✓ Deep restart complete${NC}"
}

clean_all() {
    echo -e "${RED}⚠️  WARNING: This will delete all resources and volumes!${NC}"
    read -p "Are you sure? (yes/no): " confirm
    if [[ "$confirm" != "yes" ]]; then
        echo "Aborted."
        exit 0
    fi

    echo -e "${BLUE}Deleting all resources...${NC}"
    kubectl delete -k deploy/k8s/dev/ || true
    kubectl delete pvc --all || true
    echo -e "${GREEN}✓ Resources deleted${NC}"
}

start_tunnel() {
    echo -e "${BLUE}Starting minikube tunnel...${NC}"
    echo -e "${YELLOW}This requires sudo and will run in the foreground.${NC}"
    echo -e "${YELLOW}Press Ctrl+C to stop.${NC}"
    echo ""
    minikube tunnel
}

# Deploy a single service by re-applying full kustomize (ensures patches are applied)
deploy_service() {
    local service="$1"

    if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
        source "$ROOT_DIR/.dev-image-tag"
    fi

    echo -e "${GREEN}→${NC} Deploying $service (via kustomize)..."

    # Re-apply full kustomize with image tags properly set
    # This ensures all patches (including imagePullPolicy) are properly applied
    cd "$ROOT_DIR/deploy/k8s/dev"

    # Map service names to image names and tags
    local image_name=""
    local image_tag=""

    case "$service" in
        relay)
            image_name="dexorder/ai-relay"
            image_tag="$RELAY_TAG"
            ;;
        ingestor)
            image_name="dexorder/ai-ingestor"
            image_tag="$INGEST_TAG"
            ;;
        flink)
            image_name="dexorder/ai-flink"
            image_tag="$FLINK_TAG"
            ;;
        gateway)
            image_name="dexorder/ai-gateway"
            image_tag="$GATEWAY_TAG"
            generate_gateway_config_dev
            "$SCRIPT_DIR/config-update" dev
            ;;
        web)
            image_name="dexorder/ai-web"
            image_tag="$WEB_TAG"
            ;;
        lifecycle-sidecar|sidecar)
            image_name="dexorder/ai-lifecycle-sidecar"
            image_tag="$SIDECAR_TAG"
            ;;
        *)
            echo -e "${RED}Error: Unknown service '$service'${NC}"
            return 1
            ;;
    esac

    # Create a temporary kustomization overlay with ONLY this service's image tag
    cat >> kustomization.yaml <<EOF
# Image tags (added by bin/dev)
images:
  - name: $image_name
    newTag: $image_tag
EOF

    kubectl apply -k .

    # Clean up the appended image tags from kustomization.yaml
    sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml

    echo -e "${GREEN}✓ $service deployed${NC}"
}

# Main command routing
check_minikube
check_kubectl

case "$COMMAND" in
    start)
        start_minikube
        rebuild_images
        deploy_services
        ;;
    stop)
        # Check for --keep-data flag
        if [[ "$2" == "--keep-data" ]]; then
            echo -e "${BLUE}Stopping minikube (keeping data)...${NC}"
            minikube stop
            echo -e "${GREEN}✓ Minikube stopped (PVCs preserved)${NC}"
        else
            echo -e "${BLUE}Stopping minikube and deleting PVCs...${NC}"
            # Scale down StatefulSets first to release PVCs
            echo -e "${GREEN}→${NC} Scaling down StatefulSets..."
            kubectl scale statefulset kafka postgres minio --replicas=0 2>/dev/null || true
            # Wait for pods to terminate
            echo -e "${GREEN}→${NC} Waiting for pods to terminate..."
            kubectl wait --for=delete pod -l app=kafka --timeout=60s 2>/dev/null || true
            kubectl wait --for=delete pod -l app=postgres --timeout=60s 2>/dev/null || true
            kubectl wait --for=delete pod -l app=minio --timeout=60s 2>/dev/null || true
            # Now delete PVCs
            delete_pvcs all
            # Delete sandbox namespace
            echo -e "${GREEN}→${NC} Deleting sandbox namespace..."
            kubectl delete namespace sandbox 2>/dev/null || true
            minikube stop
            echo -e "${GREEN}✓ Minikube stopped and PVCs deleted${NC}"
            echo -e "${YELLOW}Tip: Use 'bin/dev stop --keep-data' to preserve PVCs${NC}"
        fi
        ;;
    restart)
        shift  # Remove 'restart' from args
        if [ $# -eq 0 ]; then
            # No services specified, restart all
            rebuild_images
            deploy_services
        else
            # Multiple services specified: rebuild ALL first, then deploy ALL together.
            # Deploying one-at-a-time causes each deploy to revert the previous service's
            # image tag override (each kubectl apply -k . only carries one tag at a time).
            sandbox_requested=0
            deploy_services_list=()

            for service in "$@"; do
                rebuild_images "$service"

                if [ "$service" == "sandbox" ]; then
                    sandbox_requested=1
                else
                    deploy_services_list+=("$service")
                fi
            done

            # Sandbox restart requires gateway to redeploy with the new sandbox image tag.
            # If gateway wasn't explicitly listed, rebuild and deploy it automatically.
            if [ "$sandbox_requested" == "1" ]; then
                gateway_in_list=0
                for svc in "${deploy_services_list[@]}"; do
                    [ "$svc" == "gateway" ] && gateway_in_list=1 && break
                done
                if [ "$gateway_in_list" == "0" ]; then
                    rebuild_images "gateway"
                    deploy_services_list+=("gateway")
                fi
            fi

            # Deploy all non-sandbox services together in one kustomize apply
            if [ ${#deploy_services_list[@]} -gt 0 ]; then
                if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
                    source "$ROOT_DIR/.dev-image-tag"
                fi

                cd "$ROOT_DIR/deploy/k8s/dev"

                # Regenerate gateway-config if gateway is in the list
                for svc in "${deploy_services_list[@]}"; do
                    if [ "$svc" == "gateway" ]; then
                        generate_gateway_config_dev
                        break
                    fi
                done

                # Build the images stanza for all services at once
                echo "# Image tags (added by bin/dev)" >> kustomization.yaml
                echo "images:" >> kustomization.yaml
                for svc in "${deploy_services_list[@]}"; do
                    case "$svc" in
                        relay)           echo "  - name: dexorder/ai-relay" >> kustomization.yaml; echo "    newTag: $RELAY_TAG" >> kustomization.yaml ;;
                        ingestor)        echo "  - name: dexorder/ai-ingestor" >> kustomization.yaml; echo "    newTag: $INGEST_TAG" >> kustomization.yaml ;;
                        flink)           echo "  - name: dexorder/ai-flink" >> kustomization.yaml; echo "    newTag: $FLINK_TAG" >> kustomization.yaml ;;
                        gateway)         echo "  - name: dexorder/ai-gateway" >> kustomization.yaml; echo "    newTag: $GATEWAY_TAG" >> kustomization.yaml ;;
                        web)             echo "  - name: dexorder/ai-web" >> kustomization.yaml; echo "    newTag: $WEB_TAG" >> kustomization.yaml ;;
                        lifecycle-sidecar|sidecar) echo "  - name: dexorder/ai-lifecycle-sidecar" >> kustomization.yaml; echo "    newTag: $SIDECAR_TAG" >> kustomization.yaml ;;
                    esac
                done

                kubectl apply -k .

                sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml

            fi

            # Handle sandbox separately
            if [ "$sandbox_requested" == "1" ]; then
                echo -e "${GREEN}→${NC} Deleting user container deployments in sandbox namespace..."
                kubectl delete deployments --all -n sandbox 2>/dev/null || true
                echo -e "${GREEN}✓ User containers will be recreated by gateway on next login${NC}"
            fi
        fi
        ;;
    rebuild)
        rebuild_images "${2:-}"
        ;;
    deploy)
        if [ -n "$2" ]; then
            deploy_service "$2"
        else
            deploy_services
        fi
        ;;
    status)
        show_status
        ;;
    logs)
        show_logs "$2"
        ;;
    shell)
        open_shell "$2"
        ;;
    clean)
        clean_all
        ;;
    deep-restart)
        deep_restart "${2:-all}"
        ;;
    delete-pvcs)
        delete_pvcs "${2:-all}"
        ;;
    tunnel)
        start_tunnel
        ;;
    *)
        usage
        ;;
esac
