Files
ai/bin/dev

646 lines
23 KiB
Bash
Executable File

#!/usr/bin/env bash
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
usage() {
echo "Usage: $0 [COMMAND]"
echo ""
echo "Manage the minikube development environment"
echo ""
echo "Commands:"
echo " start Start minikube and deploy all services"
echo " stop [--keep-data] Stop minikube (deletes PVCs by default)"
echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web)"
echo " deep-restart [svc] Restart StatefulSet(s) and delete their PVCs (kafka|postgres|minio|qdrant|all)"
echo " rebuild [svc] Rebuild all custom images, or just one"
echo " deploy [svc] Deploy/update all services, or just one"
echo " delete-pvcs [svc] Delete PVCs for specific service or all (kafka|postgres|minio|qdrant|all)"
echo " status Show status of all services"
echo " logs Tail logs for a service"
echo " shell Open a shell in a service pod"
echo " clean Delete all resources and volumes"
echo " tunnel Start minikube tunnel (for LoadBalancer access)"
echo ""
echo "Examples:"
echo " $0 start # Start minikube and deploy everything"
echo " $0 stop # Stop minikube and delete PVCs"
echo " $0 stop --keep-data # Stop minikube but keep PVCs"
echo " $0 deep-restart postgres # Restart postgres with fresh storage"
echo " $0 delete-pvcs kafka # Delete kafka PVCs only"
echo " $0 rebuild # Rebuild all custom images"
echo " $0 logs relay # Tail logs for relay service"
echo " $0 shell ingestor # Open shell in ingestor pod"
exit 1
}
COMMAND="${1:-start}"
check_minikube() {
if ! command -v minikube &> /dev/null; then
echo -e "${RED}Error: minikube not found. Please install minikube first.${NC}"
echo "https://minikube.sigs.k8s.io/docs/start/"
exit 1
fi
}
check_kubectl() {
if ! command -v kubectl &> /dev/null; then
echo -e "${RED}Error: kubectl not found. Please install kubectl first.${NC}"
exit 1
fi
}
start_minikube() {
echo -e "${BLUE}Starting minikube...${NC}"
if minikube status &> /dev/null; then
echo -e "${GREEN}✓ Minikube already running${NC}"
else
minikube start --cpus=6 --memory=12g --driver=docker
echo -e "${GREEN}✓ Minikube started${NC}"
fi
# Enable ingress addon
echo -e "${BLUE}Enabling ingress addon...${NC}"
minikube addons enable ingress
# Wait for ingress webhook to be ready
echo -e "${BLUE}Waiting for ingress webhook to be ready...${NC}"
kubectl wait --namespace ingress-nginx \
--for=condition=ready pod \
--selector=app.kubernetes.io/component=controller \
--timeout=120s 2>/dev/null || echo -e "${YELLOW}⚠️ Ingress controller not ready yet${NC}"
# Give webhook endpoint a moment to start listening
sleep 5
echo -e "${GREEN}✓ Ingress enabled${NC}"
# Set docker environment
echo -e "${YELLOW}Setting docker environment to minikube...${NC}"
eval $(minikube docker-env)
echo -e "${GREEN}✓ Docker environment set${NC}"
# Add /etc/hosts entry
MINIKUBE_IP=$(minikube ip)
if ! grep -q "dexorder.local" /etc/hosts; then
echo -e "${YELLOW}Adding dexorder.local to /etc/hosts (requires sudo)...${NC}"
echo "$MINIKUBE_IP dexorder.local" | sudo tee -a /etc/hosts
else
echo -e "${GREEN}✓ /etc/hosts entry exists${NC}"
fi
}
rebuild_images() {
local service="${1:-all}"
echo -e "${BLUE}Building custom images...${NC}"
# Use minikube's docker daemon
eval $(minikube docker-env)
# Build images using the standard bin/build script with dev flag
cd "$ROOT_DIR"
# Load existing tags so we preserve whichever services we're not rebuilding
if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
source "$ROOT_DIR/.dev-image-tag"
fi
# Helper: run build, show output, and return just the dev tag via stdout
# Build output goes to stderr so the caller can capture only the tag via $()
build_and_get_tag() {
local svc="$1"
local output
output=$("$SCRIPT_DIR/build" "$svc" dev 2>&1) || { echo "$output" >&2; return 1; }
echo "$output" >&2
# Extract tag from "built <remote>/ai-<svc>:<tag>" line
echo "$output" | grep -oE "ai-${svc}:dev[0-9]+" | tail -1 | cut -d: -f2
}
if [ "$service" == "all" ] || [ "$service" == "relay" ]; then
echo -e "${GREEN}${NC} Building relay..."
RELAY_TAG=$(build_and_get_tag relay) || exit 1
docker tag "dexorder/ai-relay:$RELAY_TAG" "dexorder/relay:$RELAY_TAG"
fi
if [ "$service" == "all" ] || [ "$service" == "ingestor" ]; then
echo -e "${GREEN}${NC} Building ingestor..."
INGEST_TAG=$(build_and_get_tag ingestor) || exit 1
docker tag "dexorder/ai-ingestor:$INGEST_TAG" "dexorder/ingestor:$INGEST_TAG"
fi
if [ "$service" == "all" ] || [ "$service" == "flink" ]; then
echo -e "${GREEN}${NC} Building flink..."
FLINK_TAG=$(build_and_get_tag flink) || exit 1
docker tag "dexorder/ai-flink:$FLINK_TAG" "dexorder/flink:$FLINK_TAG"
fi
# Build gateway (Node.js application)
if [ "$service" == "all" ] || [ "$service" == "gateway" ]; then
echo -e "${GREEN}${NC} Building gateway..."
cd "$ROOT_DIR/gateway"
GATEWAY_TAG="dev$(date +%Y%m%d%H%M%S)"
docker build -t dexorder/gateway:latest -t dexorder/gateway:$GATEWAY_TAG . || exit 1
echo -e "${GREEN}✓ Built dexorder/gateway:$GATEWAY_TAG${NC}"
cd "$ROOT_DIR"
fi
# Build lifecycle-sidecar (Go binary)
if [ "$service" == "all" ] || [ "$service" == "lifecycle-sidecar" ] || [ "$service" == "sidecar" ]; then
echo -e "${GREEN}${NC} Building lifecycle-sidecar..."
cd "$ROOT_DIR/lifecycle-sidecar"
SIDECAR_TAG="dev$(date +%Y%m%d%H%M%S)"
docker build -t lifecycle-sidecar:latest -t lifecycle-sidecar:$SIDECAR_TAG . || exit 1
echo -e "${GREEN}✓ Built lifecycle-sidecar:$SIDECAR_TAG${NC}"
cd "$ROOT_DIR"
fi
# Build web (Vue.js application)
if [ "$service" == "all" ] || [ "$service" == "web" ]; then
echo -e "${GREEN}${NC} Building web..."
cd "$ROOT_DIR/web"
WEB_TAG="dev$(date +%Y%m%d%H%M%S)"
docker build -t dexorder/ai-web:latest -t dexorder/ai-web:$WEB_TAG . || exit 1
echo -e "${GREEN}✓ Built dexorder/ai-web:$WEB_TAG${NC}"
cd "$ROOT_DIR"
fi
# Save the tags for deployment (all services, preserving any we didn't rebuild)
echo "RELAY_TAG=$RELAY_TAG" > "$ROOT_DIR/.dev-image-tag"
echo "INGEST_TAG=$INGEST_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo "FLINK_TAG=$FLINK_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo "GATEWAY_TAG=$GATEWAY_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo "SIDECAR_TAG=$SIDECAR_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo "WEB_TAG=$WEB_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG${NC}"
}
deploy_services() {
echo -e "${BLUE}Deploying services to minikube...${NC}"
cd "$ROOT_DIR/deploy/k8s/dev"
# Get the dev image tags
if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
source "$ROOT_DIR/.dev-image-tag"
echo -e "${BLUE}Using image tags:${NC}"
echo -e " Relay: $RELAY_TAG"
echo -e " Ingestor: $INGEST_TAG"
echo -e " Flink: $FLINK_TAG"
echo -e " Gateway: $GATEWAY_TAG"
echo -e " Web: $WEB_TAG"
else
echo -e "${YELLOW}⚠️ No dev tags found. Using 'latest'. Run rebuild first.${NC}"
RELAY_TAG="latest"
INGEST_TAG="latest"
FLINK_TAG="latest"
GATEWAY_TAG="latest"
WEB_TAG="latest"
fi
# Create secrets first (if they exist)
echo -e "${GREEN}${NC} Checking secrets..."
if ls secrets/*.yaml &> /dev/null; then
"$SCRIPT_DIR/secret-update" dev || echo -e "${YELLOW} (Some secrets missing - copy from .example files)${NC}"
else
echo -e "${YELLOW}⚠️ No secrets found. Copy from .example files:${NC}"
echo -e "${YELLOW} cd deploy/k8s/dev/secrets${NC}"
echo -e "${YELLOW} cp ai-secrets.yaml.example ai-secrets.yaml${NC}"
echo -e "${YELLOW} # Edit with actual values, then run: bin/secret-update dev${NC}"
fi
# Update configs
echo -e "${GREEN}${NC} Updating configs..."
"$SCRIPT_DIR/config-update" dev
# Create a temporary kustomization overlay with image tags
echo -e "${GREEN}${NC} Setting image tags in kustomization..."
cat >> kustomization.yaml <<EOF
# Image tags (added by bin/dev)
images:
- name: dexorder/relay
newTag: $RELAY_TAG
- name: dexorder/ingestor
newTag: $INGEST_TAG
- name: dexorder/flink
newTag: $FLINK_TAG
- name: dexorder/gateway
newTag: $GATEWAY_TAG
- name: dexorder/ai-web
newTag: $WEB_TAG
EOF
# Apply kustomize
echo -e "${GREEN}${NC} Applying Kubernetes manifests..."
kubectl apply -k .
# Clean up the appended image tags from kustomization.yaml
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
echo -e "${GREEN}✓ Services deployed${NC}"
echo ""
echo -e "${BLUE}Waiting for deployments to be ready...${NC}"
kubectl wait --for=condition=available --timeout=300s \
deployment/relay \
deployment/ingestor \
deployment/iceberg-catalog \
deployment/flink-jobmanager \
deployment/flink-taskmanager \
2>/dev/null || echo -e "${YELLOW}(Some deployments not ready yet)${NC}"
# Initialize gateway database schema
echo -e "${BLUE}Initializing gateway database schema...${NC}"
echo -e "${GREEN}${NC} Waiting for postgres..."
kubectl wait --for=condition=ready --timeout=120s pod -l app=postgres 2>/dev/null || {
echo -e "${YELLOW}⚠️ Postgres not ready yet${NC}"
}
pg_pod=$(kubectl get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -n "$pg_pod" ]; then
table_count=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'user';" 2>/dev/null | tr -d ' ')
if [ "$table_count" = "1" ]; then
echo -e "${GREEN}✓ Gateway schema already exists${NC}"
else
echo -e "${GREEN}${NC} Applying gateway schema..."
kubectl exec -i "$pg_pod" -- psql -U postgres -d iceberg < "$ROOT_DIR/gateway/schema.sql" > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo -e "${GREEN}✓ Gateway schema initialized${NC}"
else
echo -e "${YELLOW}⚠️ Failed to initialize gateway schema${NC}"
fi
fi
# Create dev user via Better Auth API (skip if already exists)
echo -e "${GREEN}${NC} Checking for dev user..."
user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = 'cryptochimp@dexorder.ai';" 2>/dev/null | tr -d ' ')
if [ -n "$user_id" ]; then
echo -e "${GREEN}✓ Dev user already exists (cryptochimp@dexorder.ai)${NC}"
else
echo -e "${GREEN}${NC} Creating dev user via Better Auth API..."
echo -e "${BLUE}Waiting for gateway to be ready...${NC}"
kubectl wait --for=condition=available --timeout=120s deployment/gateway 2>/dev/null || {
echo -e "${YELLOW}⚠️ Gateway not ready after 120s${NC}"
}
# Give gateway a few seconds to start accepting requests
sleep 5
# Create user via custom auth endpoint
response=$(curl -s -w "\n%{http_code}" -X POST "http://dexorder.local/api/auth/register" \
-H "Content-Type: application/json" \
-d '{
"email": "cryptochimp@dexorder.ai",
"password": "moon2the",
"name": "Crypto Chimp"
}' 2>&1)
http_code=$(echo "$response" | tail -n1)
if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then
echo -e "${GREEN}✓ User created via auth API${NC}"
elif [ "$http_code" = "400" ]; then
echo -e "${YELLOW}⚠️ User may already exist (status 400)${NC}"
else
echo -e "${YELLOW}⚠️ API call returned status $http_code${NC}"
fi
# Wait a moment for database to be updated
sleep 2
# Check again if user exists now
user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = 'cryptochimp@dexorder.ai';" 2>/dev/null | tr -d ' ')
if [ -n "$user_id" ]; then
echo -e "${GREEN}✓ Dev user confirmed in database${NC}"
fi
fi
if [ -n "$user_id" ]; then
# Create/update license for the user
echo -e "${GREEN}${NC} Creating pro license for dev user..."
kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "
INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url, features, resource_limits, preferred_model)
VALUES (
'$user_id',
'cryptochimp@dexorder.ai',
'pro',
'http://localhost:8080/mcp',
'{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true}',
'{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60}',
'{\"provider\":\"anthropic\",\"model\":\"claude-3-5-sonnet-20241022\",\"temperature\":0.7}'
)
ON CONFLICT (user_id) DO UPDATE SET
license_type = EXCLUDED.license_type,
features = EXCLUDED.features,
resource_limits = EXCLUDED.resource_limits,
preferred_model = EXCLUDED.preferred_model,
updated_at = NOW();
" > /dev/null 2>&1
echo -e "${GREEN}✓ Dev user ready (cryptochimp@dexorder.ai / moon2the)${NC}"
else
echo -e "${YELLOW}⚠️ Could not create dev user (gateway may not be ready)${NC}"
fi
fi
echo ""
echo -e "${GREEN}✓ Dev environment ready!${NC}"
echo ""
echo -e "${BLUE}Access the application:${NC}"
echo -e " Web UI: http://dexorder.local/"
echo -e " Backend WS: ws://dexorder.local/ws"
echo ""
echo -e "${BLUE}Admin UIs (use port-forward):${NC}"
echo -e " Flink UI: kubectl port-forward svc/flink-jobmanager 8081:8081"
echo -e " Then open http://localhost:8081"
echo -e " MinIO Console: kubectl port-forward svc/minio 9001:9001"
echo -e " Then open http://localhost:9001"
echo ""
echo -e "${YELLOW}Note: Run 'minikube tunnel' in another terminal for dexorder.local ingress to work${NC}"
}
show_status() {
echo -e "${BLUE}Kubernetes Resources:${NC}"
echo ""
kubectl get pods,svc,ingress
}
show_logs() {
local service="$1"
if [ -z "$service" ]; then
echo -e "${RED}Error: Please specify a service name${NC}"
echo "Available services: relay, ingestor, flink-jobmanager, flink-taskmanager, kafka, postgres, minio, iceberg-catalog"
exit 1
fi
# Try to find pod by label or name
local pod=$(kubectl get pods -l app="$service" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$pod" ]; then
pod=$(kubectl get pods | grep "$service" | head -n1 | awk '{print $1}')
fi
if [ -z "$pod" ]; then
echo -e "${RED}Error: No pod found for service '$service'${NC}"
exit 1
fi
echo -e "${BLUE}Tailing logs for $pod...${NC}"
kubectl logs -f "$pod"
}
open_shell() {
local service="$1"
if [ -z "$service" ]; then
echo -e "${RED}Error: Please specify a service name${NC}"
exit 1
fi
local pod=$(kubectl get pods -l app="$service" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$pod" ]; then
pod=$(kubectl get pods | grep "$service" | head -n1 | awk '{print $1}')
fi
if [ -z "$pod" ]; then
echo -e "${RED}Error: No pod found for service '$service'${NC}"
exit 1
fi
echo -e "${BLUE}Opening shell in $pod...${NC}"
kubectl exec -it "$pod" -- /bin/sh || kubectl exec -it "$pod" -- /bin/bash
}
delete_pvcs() {
local service="${1:-all}"
echo -e "${BLUE}Deleting PVCs for: $service${NC}"
case "$service" in
kafka)
kubectl delete pvc -l app=kafka || true
;;
postgres)
kubectl delete pvc -l app=postgres || true
;;
minio)
kubectl delete pvc -l app=minio || true
;;
qdrant)
kubectl delete pvc -l app=qdrant || true
;;
all)
echo -e "${YELLOW}Deleting all StatefulSet PVCs...${NC}"
kubectl delete pvc -l app=kafka 2>/dev/null || true
kubectl delete pvc -l app=postgres 2>/dev/null || true
kubectl delete pvc -l app=minio 2>/dev/null || true
kubectl delete pvc -l app=qdrant 2>/dev/null || true
;;
*)
echo -e "${RED}Error: Unknown service '$service'${NC}"
echo "Valid services: kafka, postgres, minio, qdrant, all"
exit 1
;;
esac
echo -e "${GREEN}✓ PVCs deleted${NC}"
}
deep_restart() {
local service="${1:-all}"
echo -e "${BLUE}Deep restart for: $service${NC}"
echo -e "${YELLOW}This will delete the StatefulSet(s) and their PVCs, then redeploy.${NC}"
case "$service" in
kafka)
echo -e "${GREEN}${NC} Deleting kafka StatefulSet..."
kubectl delete statefulset kafka || true
sleep 2
delete_pvcs kafka
;;
postgres)
echo -e "${GREEN}${NC} Deleting postgres StatefulSet..."
kubectl delete statefulset postgres || true
sleep 2
delete_pvcs postgres
;;
minio)
echo -e "${GREEN}${NC} Deleting minio StatefulSet..."
kubectl delete statefulset minio || true
sleep 2
delete_pvcs minio
;;
qdrant)
echo -e "${GREEN}${NC} Deleting qdrant StatefulSet..."
kubectl delete statefulset qdrant || true
sleep 2
delete_pvcs qdrant
;;
all)
echo -e "${GREEN}${NC} Deleting all StatefulSets..."
kubectl delete statefulset kafka postgres minio qdrant || true
sleep 2
delete_pvcs all
;;
*)
echo -e "${RED}Error: Unknown service '$service'${NC}"
echo "Valid services: kafka, postgres, minio, qdrant, all"
exit 1
;;
esac
echo -e "${GREEN}${NC} Redeploying services..."
deploy_services
echo -e "${GREEN}✓ Deep restart complete${NC}"
}
clean_all() {
echo -e "${RED}⚠️ WARNING: This will delete all resources and volumes!${NC}"
read -p "Are you sure? (yes/no): " confirm
if [[ "$confirm" != "yes" ]]; then
echo "Aborted."
exit 0
fi
echo -e "${BLUE}Deleting all resources...${NC}"
kubectl delete -k deploy/k8s/dev/ || true
kubectl delete pvc --all || true
echo -e "${GREEN}✓ Resources deleted${NC}"
}
start_tunnel() {
echo -e "${BLUE}Starting minikube tunnel...${NC}"
echo -e "${YELLOW}This requires sudo and will run in the foreground.${NC}"
echo -e "${YELLOW}Press Ctrl+C to stop.${NC}"
echo ""
minikube tunnel
}
# Deploy a single service by re-applying full kustomize (ensures patches are applied)
deploy_service() {
local service="$1"
if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
source "$ROOT_DIR/.dev-image-tag"
fi
echo -e "${GREEN}${NC} Deploying $service (via kustomize)..."
# Re-apply full kustomize with image tags properly set
# This ensures all patches (including imagePullPolicy) are properly applied
cd "$ROOT_DIR/deploy/k8s/dev"
# Create a temporary kustomization overlay with image tags
cat >> kustomization.yaml <<EOF
# Image tags (added by bin/dev)
images:
- name: dexorder/relay
newTag: $RELAY_TAG
- name: dexorder/ingestor
newTag: $INGEST_TAG
- name: dexorder/flink
newTag: $FLINK_TAG
- name: dexorder/gateway
newTag: $GATEWAY_TAG
- name: dexorder/ai-web
newTag: $WEB_TAG
EOF
kubectl apply -k .
# Clean up the appended image tags from kustomization.yaml
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
echo -e "${GREEN}$service deployed${NC}"
}
# Main command routing
check_minikube
check_kubectl
case "$COMMAND" in
start)
start_minikube
rebuild_images
deploy_services
;;
stop)
# Check for --keep-data flag
if [[ "$2" == "--keep-data" ]]; then
echo -e "${BLUE}Stopping minikube (keeping data)...${NC}"
minikube stop
echo -e "${GREEN}✓ Minikube stopped (PVCs preserved)${NC}"
else
echo -e "${BLUE}Stopping minikube and deleting PVCs...${NC}"
# Scale down StatefulSets first to release PVCs
echo -e "${GREEN}${NC} Scaling down StatefulSets..."
kubectl scale statefulset kafka postgres minio qdrant --replicas=0 2>/dev/null || true
# Wait for pods to terminate
echo -e "${GREEN}${NC} Waiting for pods to terminate..."
kubectl wait --for=delete pod -l app=kafka --timeout=60s 2>/dev/null || true
kubectl wait --for=delete pod -l app=postgres --timeout=60s 2>/dev/null || true
kubectl wait --for=delete pod -l app=minio --timeout=60s 2>/dev/null || true
kubectl wait --for=delete pod -l app=qdrant --timeout=60s 2>/dev/null || true
# Now delete PVCs
delete_pvcs all
minikube stop
echo -e "${GREEN}✓ Minikube stopped and PVCs deleted${NC}"
echo -e "${YELLOW}Tip: Use 'bin/dev stop --keep-data' to preserve PVCs${NC}"
fi
;;
restart)
if [ -n "$2" ]; then
rebuild_images "$2"
deploy_service "$2"
else
rebuild_images
deploy_services
fi
;;
rebuild)
rebuild_images "${2:-}"
;;
deploy)
if [ -n "$2" ]; then
deploy_service "$2"
else
deploy_services
fi
;;
status)
show_status
;;
logs)
show_logs "$2"
;;
shell)
open_shell "$2"
;;
clean)
clean_all
;;
deep-restart)
deep_restart "${2:-all}"
;;
delete-pvcs)
delete_pvcs "${2:-all}"
;;
tunnel)
start_tunnel
;;
*)
usage
;;
esac