redesign fully scaffolded and web login works
This commit is contained in:
197
bin/client-test
Executable file
197
bin/client-test
Executable file
@@ -0,0 +1,197 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [COMMAND]"
|
||||
echo ""
|
||||
echo "Test client-py against the development environment"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " ohlc Test OHLCClient API (default)"
|
||||
echo " history Test low-level HistoryClient"
|
||||
echo " shell Open Python shell with client installed"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Run OHLC client test"
|
||||
echo " $0 ohlc # Run OHLC client test"
|
||||
echo " $0 history # Run history client test"
|
||||
echo " $0 shell # Interactive Python shell"
|
||||
exit 1
|
||||
}
|
||||
|
||||
COMMAND="${1:-ohlc}"
|
||||
|
||||
check_kubectl() {
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo -e "${RED}Error: kubectl not found. Please install kubectl first.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_services() {
|
||||
echo -e "${BLUE}Checking if services are running...${NC}"
|
||||
|
||||
# Check if required pods are running
|
||||
local services=("relay" "flink-jobmanager" "iceberg-catalog")
|
||||
local missing=()
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
if ! kubectl get pods -l app="$service" 2>/dev/null | grep -q "Running"; then
|
||||
missing+=("$service")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#missing[@]} -gt 0 ]; then
|
||||
echo -e "${RED}Error: Required services not running: ${missing[*]}${NC}"
|
||||
echo -e "${YELLOW}Run 'bin/dev start' first to start the environment${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ All required services are running${NC}"
|
||||
}
|
||||
|
||||
setup_port_forwards() {
|
||||
echo -e "${BLUE}Setting up port forwards...${NC}"
|
||||
|
||||
# Kill any existing port forwards
|
||||
pkill -f "kubectl port-forward.*relay" 2>/dev/null || true
|
||||
pkill -f "kubectl port-forward.*iceberg-catalog" 2>/dev/null || true
|
||||
pkill -f "kubectl port-forward.*minio" 2>/dev/null || true
|
||||
|
||||
# Port forward relay (5558=market-data pub, 5559=client requests)
|
||||
kubectl port-forward svc/relay 5558:5558 5559:5559 >/dev/null 2>&1 &
|
||||
local relay_pid=$!
|
||||
|
||||
# Port forward iceberg-catalog (8181)
|
||||
kubectl port-forward svc/iceberg-catalog 8181:8181 >/dev/null 2>&1 &
|
||||
local iceberg_pid=$!
|
||||
|
||||
# Port forward MinIO (9000) - needed for PyIceberg to read data files
|
||||
kubectl port-forward svc/minio 9000:9000 >/dev/null 2>&1 &
|
||||
local minio_pid=$!
|
||||
|
||||
# Wait for port forwards to establish
|
||||
sleep 2
|
||||
|
||||
echo -e "${GREEN}✓ Port forwards established${NC}"
|
||||
echo -e "${YELLOW} Relay: localhost:5558 (market-data), 5559 (requests)${NC}"
|
||||
echo -e "${YELLOW} Iceberg Catalog: localhost:8181${NC}"
|
||||
echo -e "${YELLOW} MinIO: localhost:9000${NC}"
|
||||
|
||||
# Store PIDs for cleanup
|
||||
export PORT_FORWARD_PIDS="$relay_pid $iceberg_pid $minio_pid"
|
||||
}
|
||||
|
||||
cleanup_port_forwards() {
|
||||
if [ -n "$PORT_FORWARD_PIDS" ]; then
|
||||
echo -e "\n${BLUE}Cleaning up port forwards...${NC}"
|
||||
for pid in $PORT_FORWARD_PIDS; do
|
||||
kill $pid 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
run_ohlc_test() {
|
||||
echo -e "${BLUE}Running OHLCClient test...${NC}"
|
||||
echo ""
|
||||
|
||||
cd "$ROOT_DIR"
|
||||
|
||||
# Install client-py in development mode
|
||||
pip install -e client-py >/dev/null 2>&1 || {
|
||||
echo -e "${YELLOW}Installing client-py dependencies...${NC}"
|
||||
pip install -e client-py
|
||||
}
|
||||
|
||||
# Run the test
|
||||
python3 test/history_client/client_ohlc_api.py
|
||||
}
|
||||
|
||||
run_history_test() {
|
||||
echo -e "${BLUE}Running HistoryClient test...${NC}"
|
||||
echo ""
|
||||
|
||||
cd "$ROOT_DIR"
|
||||
|
||||
# Install client-py in development mode
|
||||
pip install -e client-py >/dev/null 2>&1 || {
|
||||
echo -e "${YELLOW}Installing client-py dependencies...${NC}"
|
||||
pip install -e client-py
|
||||
}
|
||||
|
||||
# Run the low-level test
|
||||
python3 test/history_client/client.py
|
||||
}
|
||||
|
||||
open_shell() {
|
||||
echo -e "${BLUE}Opening Python shell with dexorder client...${NC}"
|
||||
echo ""
|
||||
|
||||
cd "$ROOT_DIR"
|
||||
|
||||
# Install client-py in development mode
|
||||
pip install -e client-py >/dev/null 2>&1 || {
|
||||
echo -e "${YELLOW}Installing client-py dependencies...${NC}"
|
||||
pip install -e client-py
|
||||
}
|
||||
|
||||
echo -e "${BLUE}Example usage:${NC}"
|
||||
echo -e " from dexorder import OHLCClient"
|
||||
echo -e " import asyncio"
|
||||
echo -e " client = OHLCClient('http://localhost:8181', 'tcp://localhost:5559', 'tcp://localhost:5558',"
|
||||
echo -e " s3_endpoint='http://localhost:9000', s3_access_key='minio', s3_secret_key='minio123')"
|
||||
echo -e " # Use asyncio.run(client.fetch_ohlc(...)) to fetch data"
|
||||
echo ""
|
||||
|
||||
python3 -i -c "
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.join(os.getcwd(), 'client-py'))
|
||||
from dexorder import OHLCClient, HistoryClient, IcebergClient
|
||||
import asyncio
|
||||
print('✓ dexorder package imported')
|
||||
print('Available: OHLCClient, HistoryClient, IcebergClient, asyncio')
|
||||
"
|
||||
}
|
||||
|
||||
# Set up cleanup trap
|
||||
trap cleanup_port_forwards EXIT
|
||||
|
||||
# Main command routing
|
||||
check_kubectl
|
||||
|
||||
case "$COMMAND" in
|
||||
ohlc)
|
||||
check_services
|
||||
setup_port_forwards
|
||||
run_ohlc_test
|
||||
;;
|
||||
history)
|
||||
check_services
|
||||
setup_port_forwards
|
||||
run_history_test
|
||||
;;
|
||||
shell)
|
||||
check_services
|
||||
setup_port_forwards
|
||||
open_shell
|
||||
;;
|
||||
-h|--help|help)
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Unknown command: $COMMAND${NC}"
|
||||
echo ""
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
360
bin/dev
360
bin/dev
@@ -17,19 +17,25 @@ usage() {
|
||||
echo "Manage the minikube development environment"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " start Start minikube and deploy all services"
|
||||
echo " stop Stop minikube"
|
||||
echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|sidecar)"
|
||||
echo " rebuild [svc] Rebuild all custom images, or just one"
|
||||
echo " deploy [svc] Deploy/update all services, or just one"
|
||||
echo " status Show status of all services"
|
||||
echo " logs Tail logs for a service"
|
||||
echo " shell Open a shell in a service pod"
|
||||
echo " clean Delete all resources and volumes"
|
||||
echo " tunnel Start minikube tunnel (for LoadBalancer access)"
|
||||
echo " start Start minikube and deploy all services"
|
||||
echo " stop [--keep-data] Stop minikube (deletes PVCs by default)"
|
||||
echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web)"
|
||||
echo " deep-restart [svc] Restart StatefulSet(s) and delete their PVCs (kafka|postgres|minio|qdrant|all)"
|
||||
echo " rebuild [svc] Rebuild all custom images, or just one"
|
||||
echo " deploy [svc] Deploy/update all services, or just one"
|
||||
echo " delete-pvcs [svc] Delete PVCs for specific service or all (kafka|postgres|minio|qdrant|all)"
|
||||
echo " status Show status of all services"
|
||||
echo " logs Tail logs for a service"
|
||||
echo " shell Open a shell in a service pod"
|
||||
echo " clean Delete all resources and volumes"
|
||||
echo " tunnel Start minikube tunnel (for LoadBalancer access)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 start # Start minikube and deploy everything"
|
||||
echo " $0 stop # Stop minikube and delete PVCs"
|
||||
echo " $0 stop --keep-data # Stop minikube but keep PVCs"
|
||||
echo " $0 deep-restart postgres # Restart postgres with fresh storage"
|
||||
echo " $0 delete-pvcs kafka # Delete kafka PVCs only"
|
||||
echo " $0 rebuild # Rebuild all custom images"
|
||||
echo " $0 logs relay # Tail logs for relay service"
|
||||
echo " $0 shell ingestor # Open shell in ingestor pod"
|
||||
@@ -66,6 +72,16 @@ start_minikube() {
|
||||
# Enable ingress addon
|
||||
echo -e "${BLUE}Enabling ingress addon...${NC}"
|
||||
minikube addons enable ingress
|
||||
|
||||
# Wait for ingress webhook to be ready
|
||||
echo -e "${BLUE}Waiting for ingress webhook to be ready...${NC}"
|
||||
kubectl wait --namespace ingress-nginx \
|
||||
--for=condition=ready pod \
|
||||
--selector=app.kubernetes.io/component=controller \
|
||||
--timeout=120s 2>/dev/null || echo -e "${YELLOW}⚠️ Ingress controller not ready yet${NC}"
|
||||
|
||||
# Give webhook endpoint a moment to start listening
|
||||
sleep 5
|
||||
echo -e "${GREEN}✓ Ingress enabled${NC}"
|
||||
|
||||
# Set docker environment
|
||||
@@ -127,6 +143,16 @@ rebuild_images() {
|
||||
docker tag "dexorder/ai-flink:$FLINK_TAG" "dexorder/flink:$FLINK_TAG"
|
||||
fi
|
||||
|
||||
# Build gateway (Node.js application)
|
||||
if [ "$service" == "all" ] || [ "$service" == "gateway" ]; then
|
||||
echo -e "${GREEN}→${NC} Building gateway..."
|
||||
cd "$ROOT_DIR/gateway"
|
||||
GATEWAY_TAG="dev$(date +%Y%m%d%H%M%S)"
|
||||
docker build -t dexorder/gateway:latest -t dexorder/gateway:$GATEWAY_TAG . || exit 1
|
||||
echo -e "${GREEN}✓ Built dexorder/gateway:$GATEWAY_TAG${NC}"
|
||||
cd "$ROOT_DIR"
|
||||
fi
|
||||
|
||||
# Build lifecycle-sidecar (Go binary)
|
||||
if [ "$service" == "all" ] || [ "$service" == "lifecycle-sidecar" ] || [ "$service" == "sidecar" ]; then
|
||||
echo -e "${GREEN}→${NC} Building lifecycle-sidecar..."
|
||||
@@ -137,19 +163,31 @@ rebuild_images() {
|
||||
cd "$ROOT_DIR"
|
||||
fi
|
||||
|
||||
# Build web (Vue.js application)
|
||||
if [ "$service" == "all" ] || [ "$service" == "web" ]; then
|
||||
echo -e "${GREEN}→${NC} Building web..."
|
||||
cd "$ROOT_DIR/web"
|
||||
WEB_TAG="dev$(date +%Y%m%d%H%M%S)"
|
||||
docker build -t dexorder/ai-web:latest -t dexorder/ai-web:$WEB_TAG . || exit 1
|
||||
echo -e "${GREEN}✓ Built dexorder/ai-web:$WEB_TAG${NC}"
|
||||
cd "$ROOT_DIR"
|
||||
fi
|
||||
|
||||
# Save the tags for deployment (all services, preserving any we didn't rebuild)
|
||||
echo "RELAY_TAG=$RELAY_TAG" > "$ROOT_DIR/.dev-image-tag"
|
||||
echo "INGEST_TAG=$INGEST_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
echo "FLINK_TAG=$FLINK_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
echo "GATEWAY_TAG=$GATEWAY_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
echo "SIDECAR_TAG=$SIDECAR_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
echo "WEB_TAG=$WEB_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
|
||||
echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, sidecar=$SIDECAR_TAG${NC}"
|
||||
echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG${NC}"
|
||||
}
|
||||
|
||||
deploy_services() {
|
||||
echo -e "${BLUE}Deploying services to minikube...${NC}"
|
||||
|
||||
cd "$ROOT_DIR"
|
||||
cd "$ROOT_DIR/deploy/k8s/dev"
|
||||
|
||||
# Get the dev image tags
|
||||
if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
|
||||
@@ -158,16 +196,20 @@ deploy_services() {
|
||||
echo -e " Relay: $RELAY_TAG"
|
||||
echo -e " Ingestor: $INGEST_TAG"
|
||||
echo -e " Flink: $FLINK_TAG"
|
||||
echo -e " Gateway: $GATEWAY_TAG"
|
||||
echo -e " Web: $WEB_TAG"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ No dev tags found. Using 'latest'. Run rebuild first.${NC}"
|
||||
RELAY_TAG="latest"
|
||||
INGEST_TAG="latest"
|
||||
FLINK_TAG="latest"
|
||||
GATEWAY_TAG="latest"
|
||||
WEB_TAG="latest"
|
||||
fi
|
||||
|
||||
# Create secrets first (if they exist)
|
||||
echo -e "${GREEN}→${NC} Checking secrets..."
|
||||
if ls deploy/k8s/dev/secrets/*.yaml &> /dev/null; then
|
||||
if ls secrets/*.yaml &> /dev/null; then
|
||||
"$SCRIPT_DIR/secret-update" dev || echo -e "${YELLOW} (Some secrets missing - copy from .example files)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ No secrets found. Copy from .example files:${NC}"
|
||||
@@ -180,13 +222,30 @@ deploy_services() {
|
||||
echo -e "${GREEN}→${NC} Updating configs..."
|
||||
"$SCRIPT_DIR/config-update" dev
|
||||
|
||||
# Apply kustomize with image tag substitution
|
||||
# Create a temporary kustomization overlay with image tags
|
||||
echo -e "${GREEN}→${NC} Setting image tags in kustomization..."
|
||||
cat >> kustomization.yaml <<EOF
|
||||
|
||||
# Image tags (added by bin/dev)
|
||||
images:
|
||||
- name: dexorder/relay
|
||||
newTag: $RELAY_TAG
|
||||
- name: dexorder/ingestor
|
||||
newTag: $INGEST_TAG
|
||||
- name: dexorder/flink
|
||||
newTag: $FLINK_TAG
|
||||
- name: dexorder/gateway
|
||||
newTag: $GATEWAY_TAG
|
||||
- name: dexorder/ai-web
|
||||
newTag: $WEB_TAG
|
||||
EOF
|
||||
|
||||
# Apply kustomize
|
||||
echo -e "${GREEN}→${NC} Applying Kubernetes manifests..."
|
||||
kubectl kustomize deploy/k8s/dev/ | \
|
||||
sed "s|image: dexorder/flink:latest|image: dexorder/flink:$FLINK_TAG|g" | \
|
||||
sed "s|image: dexorder/relay:latest|image: dexorder/relay:$RELAY_TAG|g" | \
|
||||
sed "s|image: dexorder/ingestor:latest|image: dexorder/ingestor:$INGEST_TAG|g" | \
|
||||
kubectl apply -f -
|
||||
kubectl apply -k .
|
||||
|
||||
# Clean up the appended image tags from kustomization.yaml
|
||||
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
|
||||
|
||||
echo -e "${GREEN}✓ Services deployed${NC}"
|
||||
|
||||
@@ -200,11 +259,105 @@ deploy_services() {
|
||||
deployment/flink-taskmanager \
|
||||
2>/dev/null || echo -e "${YELLOW}(Some deployments not ready yet)${NC}"
|
||||
|
||||
# Initialize gateway database schema
|
||||
echo -e "${BLUE}Initializing gateway database schema...${NC}"
|
||||
echo -e "${GREEN}→${NC} Waiting for postgres..."
|
||||
kubectl wait --for=condition=ready --timeout=120s pod -l app=postgres 2>/dev/null || {
|
||||
echo -e "${YELLOW}⚠️ Postgres not ready yet${NC}"
|
||||
}
|
||||
|
||||
pg_pod=$(kubectl get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -n "$pg_pod" ]; then
|
||||
table_count=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'user';" 2>/dev/null | tr -d ' ')
|
||||
if [ "$table_count" = "1" ]; then
|
||||
echo -e "${GREEN}✓ Gateway schema already exists${NC}"
|
||||
else
|
||||
echo -e "${GREEN}→${NC} Applying gateway schema..."
|
||||
kubectl exec -i "$pg_pod" -- psql -U postgres -d iceberg < "$ROOT_DIR/gateway/schema.sql" > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
echo -e "${GREEN}✓ Gateway schema initialized${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Failed to initialize gateway schema${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create dev user via Better Auth API (skip if already exists)
|
||||
echo -e "${GREEN}→${NC} Checking for dev user..."
|
||||
user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = 'cryptochimp@dexorder.ai';" 2>/dev/null | tr -d ' ')
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
echo -e "${GREEN}✓ Dev user already exists (cryptochimp@dexorder.ai)${NC}"
|
||||
else
|
||||
echo -e "${GREEN}→${NC} Creating dev user via Better Auth API..."
|
||||
echo -e "${BLUE}Waiting for gateway to be ready...${NC}"
|
||||
kubectl wait --for=condition=available --timeout=120s deployment/gateway 2>/dev/null || {
|
||||
echo -e "${YELLOW}⚠️ Gateway not ready after 120s${NC}"
|
||||
}
|
||||
|
||||
# Give gateway a few seconds to start accepting requests
|
||||
sleep 5
|
||||
|
||||
# Create user via custom auth endpoint
|
||||
response=$(curl -s -w "\n%{http_code}" -X POST "http://dexorder.local/api/auth/register" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"email": "cryptochimp@dexorder.ai",
|
||||
"password": "moon2the",
|
||||
"name": "Crypto Chimp"
|
||||
}' 2>&1)
|
||||
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then
|
||||
echo -e "${GREEN}✓ User created via auth API${NC}"
|
||||
elif [ "$http_code" = "400" ]; then
|
||||
echo -e "${YELLOW}⚠️ User may already exist (status 400)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ API call returned status $http_code${NC}"
|
||||
fi
|
||||
|
||||
# Wait a moment for database to be updated
|
||||
sleep 2
|
||||
|
||||
# Check again if user exists now
|
||||
user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = 'cryptochimp@dexorder.ai';" 2>/dev/null | tr -d ' ')
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
echo -e "${GREEN}✓ Dev user confirmed in database${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
# Create/update license for the user
|
||||
echo -e "${GREEN}→${NC} Creating pro license for dev user..."
|
||||
kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "
|
||||
INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url, features, resource_limits, preferred_model)
|
||||
VALUES (
|
||||
'$user_id',
|
||||
'cryptochimp@dexorder.ai',
|
||||
'pro',
|
||||
'http://localhost:8080/mcp',
|
||||
'{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true}',
|
||||
'{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60}',
|
||||
'{\"provider\":\"anthropic\",\"model\":\"claude-3-5-sonnet-20241022\",\"temperature\":0.7}'
|
||||
)
|
||||
ON CONFLICT (user_id) DO UPDATE SET
|
||||
license_type = EXCLUDED.license_type,
|
||||
features = EXCLUDED.features,
|
||||
resource_limits = EXCLUDED.resource_limits,
|
||||
preferred_model = EXCLUDED.preferred_model,
|
||||
updated_at = NOW();
|
||||
" > /dev/null 2>&1
|
||||
echo -e "${GREEN}✓ Dev user ready (cryptochimp@dexorder.ai / moon2the)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Could not create dev user (gateway may not be ready)${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}✓ Dev environment ready!${NC}"
|
||||
echo ""
|
||||
echo -e "${BLUE}Access the application:${NC}"
|
||||
echo -e " Web UI: http://dexorder.local/cryptochimp/"
|
||||
echo -e " Web UI: http://dexorder.local/"
|
||||
echo -e " Backend WS: ws://dexorder.local/ws"
|
||||
echo ""
|
||||
echo -e "${BLUE}Admin UIs (use port-forward):${NC}"
|
||||
@@ -266,6 +419,91 @@ open_shell() {
|
||||
kubectl exec -it "$pod" -- /bin/sh || kubectl exec -it "$pod" -- /bin/bash
|
||||
}
|
||||
|
||||
delete_pvcs() {
|
||||
local service="${1:-all}"
|
||||
|
||||
echo -e "${BLUE}Deleting PVCs for: $service${NC}"
|
||||
|
||||
case "$service" in
|
||||
kafka)
|
||||
kubectl delete pvc -l app=kafka || true
|
||||
;;
|
||||
postgres)
|
||||
kubectl delete pvc -l app=postgres || true
|
||||
;;
|
||||
minio)
|
||||
kubectl delete pvc -l app=minio || true
|
||||
;;
|
||||
qdrant)
|
||||
kubectl delete pvc -l app=qdrant || true
|
||||
;;
|
||||
all)
|
||||
echo -e "${YELLOW}Deleting all StatefulSet PVCs...${NC}"
|
||||
kubectl delete pvc -l app=kafka 2>/dev/null || true
|
||||
kubectl delete pvc -l app=postgres 2>/dev/null || true
|
||||
kubectl delete pvc -l app=minio 2>/dev/null || true
|
||||
kubectl delete pvc -l app=qdrant 2>/dev/null || true
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error: Unknown service '$service'${NC}"
|
||||
echo "Valid services: kafka, postgres, minio, qdrant, all"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -e "${GREEN}✓ PVCs deleted${NC}"
|
||||
}
|
||||
|
||||
deep_restart() {
|
||||
local service="${1:-all}"
|
||||
|
||||
echo -e "${BLUE}Deep restart for: $service${NC}"
|
||||
echo -e "${YELLOW}This will delete the StatefulSet(s) and their PVCs, then redeploy.${NC}"
|
||||
|
||||
case "$service" in
|
||||
kafka)
|
||||
echo -e "${GREEN}→${NC} Deleting kafka StatefulSet..."
|
||||
kubectl delete statefulset kafka || true
|
||||
sleep 2
|
||||
delete_pvcs kafka
|
||||
;;
|
||||
postgres)
|
||||
echo -e "${GREEN}→${NC} Deleting postgres StatefulSet..."
|
||||
kubectl delete statefulset postgres || true
|
||||
sleep 2
|
||||
delete_pvcs postgres
|
||||
;;
|
||||
minio)
|
||||
echo -e "${GREEN}→${NC} Deleting minio StatefulSet..."
|
||||
kubectl delete statefulset minio || true
|
||||
sleep 2
|
||||
delete_pvcs minio
|
||||
;;
|
||||
qdrant)
|
||||
echo -e "${GREEN}→${NC} Deleting qdrant StatefulSet..."
|
||||
kubectl delete statefulset qdrant || true
|
||||
sleep 2
|
||||
delete_pvcs qdrant
|
||||
;;
|
||||
all)
|
||||
echo -e "${GREEN}→${NC} Deleting all StatefulSets..."
|
||||
kubectl delete statefulset kafka postgres minio qdrant || true
|
||||
sleep 2
|
||||
delete_pvcs all
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error: Unknown service '$service'${NC}"
|
||||
echo "Valid services: kafka, postgres, minio, qdrant, all"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -e "${GREEN}→${NC} Redeploying services..."
|
||||
deploy_services
|
||||
|
||||
echo -e "${GREEN}✓ Deep restart complete${NC}"
|
||||
}
|
||||
|
||||
clean_all() {
|
||||
echo -e "${RED}⚠️ WARNING: This will delete all resources and volumes!${NC}"
|
||||
read -p "Are you sure? (yes/no): " confirm
|
||||
@@ -288,7 +526,7 @@ start_tunnel() {
|
||||
minikube tunnel
|
||||
}
|
||||
|
||||
# Deploy a single service using kubectl set image with the dev tag (never uses 'latest')
|
||||
# Deploy a single service by re-applying full kustomize (ensures patches are applied)
|
||||
deploy_service() {
|
||||
local service="$1"
|
||||
|
||||
@@ -296,28 +534,35 @@ deploy_service() {
|
||||
source "$ROOT_DIR/.dev-image-tag"
|
||||
fi
|
||||
|
||||
local image
|
||||
case "$service" in
|
||||
relay) image="dexorder/relay:$RELAY_TAG" ;;
|
||||
ingestor) image="dexorder/ingestor:$INGEST_TAG" ;;
|
||||
flink) image="dexorder/flink:$FLINK_TAG" ;;
|
||||
*)
|
||||
echo -e "${RED}Unknown service: $service. Use relay, ingestor, or flink.${NC}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo -e "${GREEN}→${NC} Deploying $service (via kustomize)..."
|
||||
|
||||
echo -e "${GREEN}→${NC} Deploying $service with image $image..."
|
||||
case "$service" in
|
||||
flink)
|
||||
kubectl set image deployment/flink-jobmanager flink-jobmanager=$image
|
||||
kubectl set image deployment/flink-taskmanager flink-taskmanager=$image
|
||||
;;
|
||||
*)
|
||||
kubectl set image deployment/$service $service=$image
|
||||
;;
|
||||
esac
|
||||
echo -e "${GREEN}✓ $service updated to $image${NC}"
|
||||
# Re-apply full kustomize with image tags properly set
|
||||
# This ensures all patches (including imagePullPolicy) are properly applied
|
||||
cd "$ROOT_DIR/deploy/k8s/dev"
|
||||
|
||||
# Create a temporary kustomization overlay with image tags
|
||||
cat >> kustomization.yaml <<EOF
|
||||
|
||||
# Image tags (added by bin/dev)
|
||||
images:
|
||||
- name: dexorder/relay
|
||||
newTag: $RELAY_TAG
|
||||
- name: dexorder/ingestor
|
||||
newTag: $INGEST_TAG
|
||||
- name: dexorder/flink
|
||||
newTag: $FLINK_TAG
|
||||
- name: dexorder/gateway
|
||||
newTag: $GATEWAY_TAG
|
||||
- name: dexorder/ai-web
|
||||
newTag: $WEB_TAG
|
||||
EOF
|
||||
|
||||
kubectl apply -k .
|
||||
|
||||
# Clean up the appended image tags from kustomization.yaml
|
||||
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
|
||||
|
||||
echo -e "${GREEN}✓ $service deployed${NC}"
|
||||
}
|
||||
|
||||
# Main command routing
|
||||
@@ -331,9 +576,28 @@ case "$COMMAND" in
|
||||
deploy_services
|
||||
;;
|
||||
stop)
|
||||
echo -e "${BLUE}Stopping minikube...${NC}"
|
||||
minikube stop
|
||||
echo -e "${GREEN}✓ Minikube stopped${NC}"
|
||||
# Check for --keep-data flag
|
||||
if [[ "$2" == "--keep-data" ]]; then
|
||||
echo -e "${BLUE}Stopping minikube (keeping data)...${NC}"
|
||||
minikube stop
|
||||
echo -e "${GREEN}✓ Minikube stopped (PVCs preserved)${NC}"
|
||||
else
|
||||
echo -e "${BLUE}Stopping minikube and deleting PVCs...${NC}"
|
||||
# Scale down StatefulSets first to release PVCs
|
||||
echo -e "${GREEN}→${NC} Scaling down StatefulSets..."
|
||||
kubectl scale statefulset kafka postgres minio qdrant --replicas=0 2>/dev/null || true
|
||||
# Wait for pods to terminate
|
||||
echo -e "${GREEN}→${NC} Waiting for pods to terminate..."
|
||||
kubectl wait --for=delete pod -l app=kafka --timeout=60s 2>/dev/null || true
|
||||
kubectl wait --for=delete pod -l app=postgres --timeout=60s 2>/dev/null || true
|
||||
kubectl wait --for=delete pod -l app=minio --timeout=60s 2>/dev/null || true
|
||||
kubectl wait --for=delete pod -l app=qdrant --timeout=60s 2>/dev/null || true
|
||||
# Now delete PVCs
|
||||
delete_pvcs all
|
||||
minikube stop
|
||||
echo -e "${GREEN}✓ Minikube stopped and PVCs deleted${NC}"
|
||||
echo -e "${YELLOW}Tip: Use 'bin/dev stop --keep-data' to preserve PVCs${NC}"
|
||||
fi
|
||||
;;
|
||||
restart)
|
||||
if [ -n "$2" ]; then
|
||||
@@ -366,6 +630,12 @@ case "$COMMAND" in
|
||||
clean)
|
||||
clean_all
|
||||
;;
|
||||
deep-restart)
|
||||
deep_restart "${2:-all}"
|
||||
;;
|
||||
delete-pvcs)
|
||||
delete_pvcs "${2:-all}"
|
||||
;;
|
||||
tunnel)
|
||||
start_tunnel
|
||||
;;
|
||||
|
||||
@@ -93,6 +93,7 @@ else
|
||||
"minio-secret"
|
||||
"ingestor-secrets"
|
||||
"flink-secrets"
|
||||
"gateway-secrets"
|
||||
)
|
||||
|
||||
FAILED=0
|
||||
|
||||
Reference in New Issue
Block a user