chart data loading
This commit is contained in:
11
.gitignore
vendored
11
.gitignore
vendored
@@ -111,3 +111,14 @@ deploy/k8s/prod/secrets/*.yaml
|
||||
|
||||
# Dev environment image tags
|
||||
.dev-image-tag
|
||||
|
||||
# Protobuf copies (canonical files are in /protobuf/)
|
||||
flink/protobuf/
|
||||
relay/protobuf/
|
||||
ingestor/protobuf/
|
||||
gateway/protobuf/
|
||||
client-py/protobuf/
|
||||
|
||||
# Generated protobuf code
|
||||
gateway/src/generated/
|
||||
client-py/dexorder/generated/
|
||||
|
||||
11
bin/deploy
11
bin/deploy
@@ -3,9 +3,9 @@
|
||||
#REMOTE=northamerica-northeast2-docker.pkg.dev/dexorder-430504/dexorder
|
||||
REMOTE=${REMOTE:-git.dxod.org/dexorder/dexorder}
|
||||
|
||||
if [ "$1" != "flink" ] && [ "$1" != "relay" ] && [ "$1" != "ingestor" ] && [ "$1" != "web" ]; then
|
||||
if [ "$1" != "flink" ] && [ "$1" != "relay" ] && [ "$1" != "ingestor" ] && [ "$1" != "web" ] && [ "$1" != "gateway" ] && [ "$1" != "lifecycle-sidecar" ] && [ "$1" != "client-py" ]; then
|
||||
echo
|
||||
echo usage: "$0 "'{flink|relay|ingestor|web} [''dev''] [config] [deployment] [kubernetes] [image_tag]'
|
||||
echo usage: "$0 "'{flink|relay|ingestor|web|gateway|lifecycle-sidecar|client-py} [''dev''] [config] [deployment] [kubernetes] [image_tag]'
|
||||
echo
|
||||
echo ' [''dev''] if the literal string ''dev'' is not the second argument, then the build refuses to run if source code is not checked in. Otherwise, the git revision numbers are used in the image tag.'
|
||||
echo
|
||||
@@ -94,9 +94,11 @@ fi
|
||||
|
||||
echo $ACTION $PROJECT config=$CONFIG deployment=$DEPLOYMENT '=>' $TAG
|
||||
|
||||
# Copy protobuf definitions into project directory for Docker build
|
||||
# Copy protobuf definitions into project directory for Docker build (if not gateway or lifecycle-sidecar)
|
||||
# Using rsync --checksum so unchanged files keep their timestamps (preserves docker layer cache)
|
||||
if [ "$PROJECT" != "lifecycle-sidecar" ]; then
|
||||
rsync -a --checksum --delete protobuf/ $PROJECT/protobuf/
|
||||
fi
|
||||
|
||||
docker build $NO_CACHE -f $PROJECT/Dockerfile --build-arg="CONFIG=$CONFIG" --build-arg="DEPLOYMENT=$DEPLOYMENT" -t dexorder/ai-$PROJECT:latest $PROJECT || exit 1
|
||||
|
||||
@@ -110,6 +112,9 @@ if [ "$IMG_TAG" != "" ]; then
|
||||
fi
|
||||
echo "$(date)" built $REMOTE/ai-$PROJECT:$TAG
|
||||
|
||||
# Output just the tag for scripting purposes (to stderr so scripts can capture it)
|
||||
echo "$TAG" >&2
|
||||
|
||||
if [ "$DEPLOY" == "1" ]; then
|
||||
docker push $REMOTE/ai-$PROJECT:$TAG
|
||||
YAML=$(sed "s#image: dexorder/ai-$PROJECT*#image: $REMOTE/ai-$PROJECT:$TAG#" deploy/k8s/$KUBERNETES.yaml)
|
||||
|
||||
254
bin/dev
254
bin/dev
@@ -19,7 +19,7 @@ usage() {
|
||||
echo "Commands:"
|
||||
echo " start Start minikube and deploy all services"
|
||||
echo " stop [--keep-data] Stop minikube (deletes PVCs by default)"
|
||||
echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web)"
|
||||
echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web|client-py)"
|
||||
echo " deep-restart [svc] Restart StatefulSet(s) and delete their PVCs (kafka|postgres|minio|qdrant|all)"
|
||||
echo " rebuild [svc] Rebuild all custom images, or just one"
|
||||
echo " deploy [svc] Deploy/update all services, or just one"
|
||||
@@ -115,14 +115,18 @@ rebuild_images() {
|
||||
fi
|
||||
|
||||
# Helper: run build, show output, and return just the dev tag via stdout
|
||||
# Build output goes to stderr so the caller can capture only the tag via $()
|
||||
# bin/build now outputs the tag on its last line to stderr
|
||||
build_and_get_tag() {
|
||||
local svc="$1"
|
||||
local output
|
||||
local tag
|
||||
# Capture stderr (which contains both output and the tag)
|
||||
output=$("$SCRIPT_DIR/build" "$svc" dev 2>&1) || { echo "$output" >&2; return 1; }
|
||||
echo "$output" >&2
|
||||
# Extract tag from "built <remote>/ai-<svc>:<tag>" line
|
||||
echo "$output" | grep -oE "ai-${svc}:dev[0-9]+" | tail -1 | cut -d: -f2
|
||||
# Show the build output (excluding the final tag line)
|
||||
echo "$output" | head -n -1 >&2
|
||||
# Return just the tag (last line)
|
||||
tag=$(echo "$output" | tail -n 1)
|
||||
echo "$tag"
|
||||
}
|
||||
|
||||
if [ "$service" == "all" ] || [ "$service" == "relay" ]; then
|
||||
@@ -146,31 +150,27 @@ rebuild_images() {
|
||||
# Build gateway (Node.js application)
|
||||
if [ "$service" == "all" ] || [ "$service" == "gateway" ]; then
|
||||
echo -e "${GREEN}→${NC} Building gateway..."
|
||||
cd "$ROOT_DIR/gateway"
|
||||
GATEWAY_TAG="dev$(date +%Y%m%d%H%M%S)"
|
||||
docker build -t dexorder/gateway:latest -t dexorder/gateway:$GATEWAY_TAG . || exit 1
|
||||
echo -e "${GREEN}✓ Built dexorder/gateway:$GATEWAY_TAG${NC}"
|
||||
cd "$ROOT_DIR"
|
||||
GATEWAY_TAG=$(build_and_get_tag gateway) || exit 1
|
||||
docker tag "dexorder/ai-gateway:$GATEWAY_TAG" "dexorder/gateway:$GATEWAY_TAG"
|
||||
fi
|
||||
|
||||
# Build lifecycle-sidecar (Go binary)
|
||||
if [ "$service" == "all" ] || [ "$service" == "lifecycle-sidecar" ] || [ "$service" == "sidecar" ]; then
|
||||
echo -e "${GREEN}→${NC} Building lifecycle-sidecar..."
|
||||
cd "$ROOT_DIR/lifecycle-sidecar"
|
||||
SIDECAR_TAG="dev$(date +%Y%m%d%H%M%S)"
|
||||
docker build -t lifecycle-sidecar:latest -t lifecycle-sidecar:$SIDECAR_TAG . || exit 1
|
||||
echo -e "${GREEN}✓ Built lifecycle-sidecar:$SIDECAR_TAG${NC}"
|
||||
cd "$ROOT_DIR"
|
||||
SIDECAR_TAG=$(build_and_get_tag lifecycle-sidecar) || exit 1
|
||||
docker tag "dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG" "lifecycle-sidecar:$SIDECAR_TAG"
|
||||
fi
|
||||
|
||||
# Build web (Vue.js application)
|
||||
if [ "$service" == "all" ] || [ "$service" == "web" ]; then
|
||||
echo -e "${GREEN}→${NC} Building web..."
|
||||
cd "$ROOT_DIR/web"
|
||||
WEB_TAG="dev$(date +%Y%m%d%H%M%S)"
|
||||
docker build -t dexorder/ai-web:latest -t dexorder/ai-web:$WEB_TAG . || exit 1
|
||||
echo -e "${GREEN}✓ Built dexorder/ai-web:$WEB_TAG${NC}"
|
||||
cd "$ROOT_DIR"
|
||||
WEB_TAG=$(build_and_get_tag web) || exit 1
|
||||
fi
|
||||
|
||||
# Build client-py (Python client library)
|
||||
if [ "$service" == "all" ] || [ "$service" == "client-py" ]; then
|
||||
echo -e "${GREEN}→${NC} Building client-py..."
|
||||
CLIENT_PY_TAG=$(build_and_get_tag client-py) || exit 1
|
||||
fi
|
||||
|
||||
# Save the tags for deployment (all services, preserving any we didn't rebuild)
|
||||
@@ -180,8 +180,9 @@ rebuild_images() {
|
||||
echo "GATEWAY_TAG=$GATEWAY_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
echo "SIDECAR_TAG=$SIDECAR_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
echo "WEB_TAG=$WEB_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
echo "CLIENT_PY_TAG=$CLIENT_PY_TAG" >> "$ROOT_DIR/.dev-image-tag"
|
||||
|
||||
echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG${NC}"
|
||||
echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG, client-py=$CLIENT_PY_TAG${NC}"
|
||||
}
|
||||
|
||||
deploy_services() {
|
||||
@@ -268,89 +269,43 @@ EOF
|
||||
|
||||
pg_pod=$(kubectl get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -n "$pg_pod" ]; then
|
||||
# Wait for postgres to actually be ready to accept connections
|
||||
echo -e "${GREEN}→${NC} Verifying postgres is ready to accept connections..."
|
||||
for i in {1..30}; do
|
||||
if kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "SELECT 1;" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓ Postgres ready${NC}"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
echo -e "${RED}✗ Postgres not ready after 30 seconds${NC}"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
table_count=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'user';" 2>/dev/null | tr -d ' ')
|
||||
if [ "$table_count" = "1" ]; then
|
||||
echo -e "${GREEN}✓ Gateway schema already exists${NC}"
|
||||
else
|
||||
echo -e "${GREEN}→${NC} Applying gateway schema..."
|
||||
kubectl exec -i "$pg_pod" -- psql -U postgres -d iceberg < "$ROOT_DIR/gateway/schema.sql" > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
if kubectl exec -i "$pg_pod" -- psql -U postgres -d iceberg < "$ROOT_DIR/gateway/schema.sql" > /dev/null 2>&1; then
|
||||
# Verify schema was actually created
|
||||
sleep 1
|
||||
table_count=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'user';" 2>/dev/null | tr -d ' ')
|
||||
if [ "$table_count" = "1" ]; then
|
||||
echo -e "${GREEN}✓ Gateway schema initialized${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Failed to initialize gateway schema${NC}"
|
||||
echo -e "${RED}✗ Failed to verify schema creation${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create dev user via Better Auth API (skip if already exists)
|
||||
echo -e "${GREEN}→${NC} Checking for dev user..."
|
||||
user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = 'cryptochimp@dexorder.ai';" 2>/dev/null | tr -d ' ')
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
echo -e "${GREEN}✓ Dev user already exists (cryptochimp@dexorder.ai)${NC}"
|
||||
else
|
||||
echo -e "${GREEN}→${NC} Creating dev user via Better Auth API..."
|
||||
echo -e "${BLUE}Waiting for gateway to be ready...${NC}"
|
||||
kubectl wait --for=condition=available --timeout=120s deployment/gateway 2>/dev/null || {
|
||||
echo -e "${YELLOW}⚠️ Gateway not ready after 120s${NC}"
|
||||
}
|
||||
|
||||
# Give gateway a few seconds to start accepting requests
|
||||
sleep 5
|
||||
|
||||
# Create user via custom auth endpoint
|
||||
response=$(curl -s -w "\n%{http_code}" -X POST "http://dexorder.local/api/auth/register" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"email": "cryptochimp@dexorder.ai",
|
||||
"password": "moon2the",
|
||||
"name": "Crypto Chimp"
|
||||
}' 2>&1)
|
||||
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then
|
||||
echo -e "${GREEN}✓ User created via auth API${NC}"
|
||||
elif [ "$http_code" = "400" ]; then
|
||||
echo -e "${YELLOW}⚠️ User may already exist (status 400)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ API call returned status $http_code${NC}"
|
||||
fi
|
||||
|
||||
# Wait a moment for database to be updated
|
||||
sleep 2
|
||||
|
||||
# Check again if user exists now
|
||||
user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = 'cryptochimp@dexorder.ai';" 2>/dev/null | tr -d ' ')
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
echo -e "${GREEN}✓ Dev user confirmed in database${NC}"
|
||||
echo -e "${RED}✗ Failed to initialize gateway schema${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
# Create/update license for the user
|
||||
echo -e "${GREEN}→${NC} Creating pro license for dev user..."
|
||||
kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "
|
||||
INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url, features, resource_limits, preferred_model)
|
||||
VALUES (
|
||||
'$user_id',
|
||||
'cryptochimp@dexorder.ai',
|
||||
'pro',
|
||||
'http://localhost:8080/mcp',
|
||||
'{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true}',
|
||||
'{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60}',
|
||||
'{\"provider\":\"anthropic\",\"model\":\"claude-3-5-sonnet-20241022\",\"temperature\":0.7}'
|
||||
)
|
||||
ON CONFLICT (user_id) DO UPDATE SET
|
||||
license_type = EXCLUDED.license_type,
|
||||
features = EXCLUDED.features,
|
||||
resource_limits = EXCLUDED.resource_limits,
|
||||
preferred_model = EXCLUDED.preferred_model,
|
||||
updated_at = NOW();
|
||||
" > /dev/null 2>&1
|
||||
echo -e "${GREEN}✓ Dev user ready (cryptochimp@dexorder.ai / moon2the)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Could not create dev user (gateway may not be ready)${NC}"
|
||||
fi
|
||||
# Create dev user (refactored into reusable function)
|
||||
create_dev_user
|
||||
fi
|
||||
|
||||
echo ""
|
||||
@@ -369,6 +324,97 @@ EOF
|
||||
echo -e "${YELLOW}Note: Run 'minikube tunnel' in another terminal for dexorder.local ingress to work${NC}"
|
||||
}
|
||||
|
||||
create_dev_user() {
|
||||
# Dev user configuration (single source of truth)
|
||||
local DEV_EMAIL="tim@dexorder.ai"
|
||||
local DEV_PASSWORD="test1234"
|
||||
local DEV_NAME="Tim"
|
||||
local LICENSE_TYPE="pro"
|
||||
|
||||
echo -e "${BLUE}Initializing dev user...${NC}"
|
||||
|
||||
# Find postgres pod
|
||||
local pg_pod=$(kubectl get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$pg_pod" ]; then
|
||||
echo -e "${YELLOW}⚠️ Postgres pod not found${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if user already exists
|
||||
echo -e "${GREEN}→${NC} Checking for dev user..."
|
||||
local user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = '$DEV_EMAIL';" 2>/dev/null | tr -d ' ')
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
echo -e "${GREEN}✓ Dev user already exists ($DEV_EMAIL)${NC}"
|
||||
else
|
||||
echo -e "${GREEN}→${NC} Creating dev user via Better Auth API..."
|
||||
echo -e "${BLUE}Waiting for gateway to be ready...${NC}"
|
||||
kubectl wait --for=condition=available --timeout=120s deployment/gateway 2>/dev/null || {
|
||||
echo -e "${YELLOW}⚠️ Gateway not ready after 120s${NC}"
|
||||
}
|
||||
|
||||
# Give gateway a few seconds to start accepting requests
|
||||
sleep 5
|
||||
|
||||
# Create user via custom auth endpoint
|
||||
local response=$(curl -s -w "\n%{http_code}" -X POST "http://dexorder.local/api/auth/register" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"email": "'"$DEV_EMAIL"'",
|
||||
"password": "'"$DEV_PASSWORD"'",
|
||||
"name": "'"$DEV_NAME"'"
|
||||
}' 2>&1)
|
||||
|
||||
local http_code=$(echo "$response" | tail -n1)
|
||||
if [ "$http_code" = "200" ] || [ "$http_code" = "201" ]; then
|
||||
echo -e "${GREEN}✓ User created via auth API${NC}"
|
||||
elif [ "$http_code" = "400" ]; then
|
||||
echo -e "${YELLOW}⚠️ User may already exist (status 400)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ API call returned status $http_code${NC}"
|
||||
local body=$(echo "$response" | head -n -1)
|
||||
echo -e "${YELLOW}Response: $body${NC}"
|
||||
fi
|
||||
|
||||
# Wait a moment for database to be updated
|
||||
sleep 2
|
||||
|
||||
# Check again if user exists now
|
||||
user_id=$(kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -t -c "SELECT id FROM \"user\" WHERE email = '$DEV_EMAIL';" 2>/dev/null | tr -d ' ')
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
echo -e "${GREEN}✓ Dev user confirmed in database${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$user_id" ]; then
|
||||
# Create/update license for the user
|
||||
echo -e "${GREEN}→${NC} Creating $LICENSE_TYPE license for dev user..."
|
||||
kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "
|
||||
INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url, features, resource_limits, preferred_model)
|
||||
VALUES (
|
||||
'$user_id',
|
||||
'$DEV_EMAIL',
|
||||
'$LICENSE_TYPE',
|
||||
'http://localhost:8080/mcp',
|
||||
'{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true}',
|
||||
'{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60}',
|
||||
'{\"provider\":\"anthropic\",\"model\":\"claude-3-5-sonnet-20241022\",\"temperature\":0.7}'
|
||||
)
|
||||
ON CONFLICT (user_id) DO UPDATE SET
|
||||
license_type = EXCLUDED.license_type,
|
||||
features = EXCLUDED.features,
|
||||
resource_limits = EXCLUDED.resource_limits,
|
||||
preferred_model = EXCLUDED.preferred_model,
|
||||
updated_at = NOW();
|
||||
" > /dev/null 2>&1
|
||||
echo -e "${GREEN}✓ Dev user ready ($DEV_EMAIL / $DEV_PASSWORD)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Could not create dev user (gateway may not be ready)${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
show_status() {
|
||||
echo -e "${BLUE}Kubernetes Resources:${NC}"
|
||||
echo ""
|
||||
@@ -472,12 +518,18 @@ deep_restart() {
|
||||
kubectl delete statefulset postgres || true
|
||||
sleep 2
|
||||
delete_pvcs postgres
|
||||
# Force restart iceberg-catalog since it depends on postgres
|
||||
echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on postgres)..."
|
||||
kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true
|
||||
;;
|
||||
minio)
|
||||
echo -e "${GREEN}→${NC} Deleting minio StatefulSet..."
|
||||
kubectl delete statefulset minio || true
|
||||
sleep 2
|
||||
delete_pvcs minio
|
||||
# Force restart iceberg-catalog since it depends on minio
|
||||
echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on minio)..."
|
||||
kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true
|
||||
;;
|
||||
qdrant)
|
||||
echo -e "${GREEN}→${NC} Deleting qdrant StatefulSet..."
|
||||
@@ -490,6 +542,9 @@ deep_restart() {
|
||||
kubectl delete statefulset kafka postgres minio qdrant || true
|
||||
sleep 2
|
||||
delete_pvcs all
|
||||
# Force restart iceberg-catalog since it depends on postgres and minio
|
||||
echo -e "${GREEN}→${NC} Force restarting iceberg-catalog (depends on postgres/minio)..."
|
||||
kubectl delete pod -l app=iceberg-catalog 2>/dev/null || true
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error: Unknown service '$service'${NC}"
|
||||
@@ -501,6 +556,8 @@ deep_restart() {
|
||||
echo -e "${GREEN}→${NC} Redeploying services..."
|
||||
deploy_services
|
||||
|
||||
# Note: deploy_services already calls create_dev_user, so no need to call it again here
|
||||
|
||||
echo -e "${GREEN}✓ Deep restart complete${NC}"
|
||||
}
|
||||
|
||||
@@ -600,12 +657,17 @@ case "$COMMAND" in
|
||||
fi
|
||||
;;
|
||||
restart)
|
||||
if [ -n "$2" ]; then
|
||||
rebuild_images "$2"
|
||||
deploy_service "$2"
|
||||
else
|
||||
shift # Remove 'restart' from args
|
||||
if [ $# -eq 0 ]; then
|
||||
# No services specified, restart all
|
||||
rebuild_images
|
||||
deploy_services
|
||||
else
|
||||
# Multiple services specified
|
||||
for service in "$@"; do
|
||||
rebuild_images "$service"
|
||||
deploy_service "$service"
|
||||
done
|
||||
fi
|
||||
;;
|
||||
rebuild)
|
||||
|
||||
@@ -1,18 +1,27 @@
|
||||
# Multi-stage build for DexOrder user container
|
||||
FROM python:3.11-slim as builder
|
||||
FROM python:3.11-slim AS builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install build dependencies
|
||||
# Install build dependencies including protobuf compiler
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gcc \
|
||||
g++ \
|
||||
protobuf-compiler \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy dependency specifications
|
||||
COPY setup.py .
|
||||
COPY dexorder/ dexorder/
|
||||
|
||||
# Copy protobuf definitions (copied by bin/build from canonical /protobuf/)
|
||||
COPY protobuf/ protobuf/
|
||||
|
||||
# Compile protobufs to Python
|
||||
RUN mkdir -p dexorder/generated && \
|
||||
protoc --python_out=dexorder/generated --proto_path=protobuf protobuf/*.proto && \
|
||||
touch dexorder/generated/__init__.py
|
||||
|
||||
# Install dependencies to a target directory
|
||||
RUN pip install --no-cache-dir --target=/build/deps .
|
||||
|
||||
@@ -38,6 +47,9 @@ COPY --from=builder /build/deps /usr/local/lib/python3.11/site-packages/
|
||||
COPY dexorder/ /app/dexorder/
|
||||
COPY main.py /app/
|
||||
|
||||
# Copy generated protobuf code from builder
|
||||
COPY --from=builder /build/dexorder/generated/ /app/dexorder/generated/
|
||||
|
||||
# Create directories for config, secrets, and data
|
||||
RUN mkdir -p /app/config /app/secrets /app/data && \
|
||||
chown -R dexorder:dexorder /app
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
# Example configuration file for DexOrder user container
|
||||
# Mount this at /app/config/config.yaml in k8s
|
||||
|
||||
# Data directory for persistent storage (workspace, strategies, etc.)
|
||||
# Defaults to ./data relative to working directory if not set
|
||||
# In k8s this is mounted as a PVC at /app/data
|
||||
data_dir: "/app/data"
|
||||
|
||||
# User-specific settings
|
||||
user:
|
||||
timezone: "UTC"
|
||||
|
||||
40
client-py/dexorder/api/ChartingAPI.py
Normal file
40
client-py/dexorder/api/ChartingAPI.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import logging
|
||||
from matplotlib import pyplot as plt
|
||||
import pandas as pd
|
||||
from abc import abstractmethod, ABC
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChartingAPI(ABC):
|
||||
"""
|
||||
User-facing pyplot charts. Start a Figure with plot_ohlc() or gca(), continue plotting indicators and other
|
||||
time-series using plot_indicator(), add any ad-hoc axes you need, then call show() to send an image to the user.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def plot_ohlc(self, ohlc: pd.DataFrame, axes: plt.Axes = None, **plot_args) -> plt.Figure:
|
||||
"""
|
||||
Plots a standard OHLC candlestick chart in the user's preferred style. Use this to overlay any price-series data
|
||||
or to have a chart for reference above a time-series indicator or other value.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def plot_indicator(self, indicator: pd.DataFrame, domain: tuple[float, float] = None, axes: plt.Axes = None,
|
||||
**plot_args) -> None:
|
||||
"""
|
||||
Plots an indicator in the user's standard style. If axes is None then new axes will be created at the bottom
|
||||
of the current figure.
|
||||
:param indicator:
|
||||
:param domain: The minimum and maximum possible values of the indicator. If None, the domain will be inferred from the data
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def gca(self) -> plt.Figure:
|
||||
"""
|
||||
Returns a generic pyplot gca() pre-configured with the user's preferred styling. Calling show() will
|
||||
send the chart image to the user.
|
||||
Use this only if it doesn't make sense to have a candlestick chart shown anywhere in the figure. Otherwise
|
||||
for most indicators, price series, and other time-series values, it's better to start with plot_ohlc() to
|
||||
at least give the user a chart for reference, even if the primary data you want to show has separate axes.
|
||||
"""
|
||||
3
client-py/dexorder/api/__init__.py
Normal file
3
client-py/dexorder/api/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
384
client-py/dexorder/api/workspace_tools.py
Normal file
384
client-py/dexorder/api/workspace_tools.py
Normal file
@@ -0,0 +1,384 @@
|
||||
"""
|
||||
Workspace Tools for MCP Server
|
||||
|
||||
Provides read/write/patch tools for workspace stores that are persisted
|
||||
in the user container. These stores sync with the gateway and web client.
|
||||
|
||||
Storage location: {DATA_DIR}/workspace/{store_name}.json
|
||||
|
||||
Available tools:
|
||||
- workspace_read(store_name) -> dict
|
||||
- workspace_write(store_name, data) -> None
|
||||
- workspace_patch(store_name, patch) -> dict
|
||||
|
||||
Future: Path-based triggers for container-side reactions to state changes.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import jsonpatch
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Default workspace storage directory (relative to working dir for local dev)
|
||||
# In production, this is overridden by passing workspace_dir from Config
|
||||
DEFAULT_WORKSPACE_DIR = Path("data/workspace")
|
||||
|
||||
|
||||
class WorkspaceStore:
|
||||
"""
|
||||
Manages persistent workspace stores on the filesystem.
|
||||
|
||||
Stores are JSON files at: {workspace_dir}/{store_name}.json
|
||||
"""
|
||||
|
||||
def __init__(self, workspace_dir: Path = DEFAULT_WORKSPACE_DIR):
|
||||
self.workspace_dir = workspace_dir
|
||||
self.workspace_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Path triggers (for future use)
|
||||
# Map of "store_name/json/pointer/path" -> list of callbacks
|
||||
self._triggers: dict[str, list[Callable[[Any, Any], None]]] = {}
|
||||
|
||||
def _store_path(self, store_name: str) -> Path:
|
||||
"""Get the filesystem path for a store."""
|
||||
# Sanitize store name to prevent directory traversal
|
||||
safe_name = store_name.replace("/", "_").replace("\\", "_").replace("..", "_")
|
||||
return self.workspace_dir / f"{safe_name}.json"
|
||||
|
||||
def read(self, store_name: str) -> dict[str, Any]:
|
||||
"""
|
||||
Read a workspace store from disk.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- exists: bool - whether the store exists
|
||||
- data: Any - the store data (if exists)
|
||||
- error: str - error message (if any)
|
||||
"""
|
||||
path = self._store_path(store_name)
|
||||
|
||||
if not path.exists():
|
||||
log.debug(f"Store '{store_name}' does not exist at {path}")
|
||||
return {"exists": False}
|
||||
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
log.debug(f"Read store '{store_name}' from {path}")
|
||||
return {"exists": True, "data": data}
|
||||
except json.JSONDecodeError as e:
|
||||
log.error(f"Failed to parse store '{store_name}': {e}")
|
||||
return {"exists": False, "error": f"Invalid JSON: {e}"}
|
||||
except Exception as e:
|
||||
log.error(f"Failed to read store '{store_name}': {e}")
|
||||
return {"exists": False, "error": str(e)}
|
||||
|
||||
def write(self, store_name: str, data: Any) -> dict[str, Any]:
|
||||
"""
|
||||
Write a workspace store to disk.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- error: str - error message (if any)
|
||||
"""
|
||||
path = self._store_path(store_name)
|
||||
|
||||
try:
|
||||
# Read old state for triggers
|
||||
old_state = None
|
||||
if path.exists():
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
old_state = json.load(f)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Write new state
|
||||
with open(path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
log.debug(f"Wrote store '{store_name}' to {path}")
|
||||
|
||||
# Fire triggers if state changed
|
||||
if old_state != data:
|
||||
self._fire_triggers(store_name, old_state, data)
|
||||
|
||||
return {"success": True}
|
||||
except Exception as e:
|
||||
log.error(f"Failed to write store '{store_name}': {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def patch(self, store_name: str, patch: list[dict]) -> dict[str, Any]:
|
||||
"""
|
||||
Apply a JSON patch (RFC 6902) to a store.
|
||||
|
||||
Args:
|
||||
store_name: Name of the store
|
||||
patch: List of JSON patch operations
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- data: Any - the new state (if success)
|
||||
- error: str - error message (if any)
|
||||
"""
|
||||
path = self._store_path(store_name)
|
||||
|
||||
try:
|
||||
# Read current state (or empty dict if doesn't exist)
|
||||
old_state = {}
|
||||
if path.exists():
|
||||
with open(path, "r") as f:
|
||||
old_state = json.load(f)
|
||||
|
||||
# Apply patch
|
||||
new_state = jsonpatch.apply_patch(old_state, patch)
|
||||
|
||||
# Write new state
|
||||
with open(path, "w") as f:
|
||||
json.dump(new_state, f, indent=2)
|
||||
|
||||
log.debug(f"Patched store '{store_name}' with {len(patch)} operations")
|
||||
|
||||
# Fire triggers
|
||||
self._fire_triggers(store_name, old_state, new_state)
|
||||
|
||||
return {"success": True, "data": new_state}
|
||||
except jsonpatch.JsonPatchConflict as e:
|
||||
log.error(f"Patch conflict for store '{store_name}': {e}")
|
||||
return {"success": False, "error": f"Patch conflict: {e}"}
|
||||
except Exception as e:
|
||||
log.error(f"Failed to patch store '{store_name}': {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def delete(self, store_name: str) -> dict[str, Any]:
|
||||
"""
|
||||
Delete a workspace store.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- error: str - error message (if any)
|
||||
"""
|
||||
path = self._store_path(store_name)
|
||||
|
||||
try:
|
||||
if path.exists():
|
||||
path.unlink()
|
||||
log.debug(f"Deleted store '{store_name}'")
|
||||
return {"success": True}
|
||||
except Exception as e:
|
||||
log.error(f"Failed to delete store '{store_name}': {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def list_stores(self) -> list[str]:
|
||||
"""List all existing store names."""
|
||||
stores = []
|
||||
for path in self.workspace_dir.glob("*.json"):
|
||||
stores.append(path.stem)
|
||||
return stores
|
||||
|
||||
# =========================================================================
|
||||
# Triggers (for future use)
|
||||
# =========================================================================
|
||||
|
||||
def on_change(
|
||||
self,
|
||||
store_name: str,
|
||||
path: str,
|
||||
callback: Callable[[Any, Any], None]
|
||||
) -> Callable[[], None]:
|
||||
"""
|
||||
Register a trigger for when a path changes.
|
||||
|
||||
Args:
|
||||
store_name: Name of the store to watch
|
||||
path: JSON pointer path (e.g., "/drawings" or "/*" for any change)
|
||||
callback: Function called with (old_value, new_value)
|
||||
|
||||
Returns:
|
||||
Unsubscribe function
|
||||
"""
|
||||
key = f"{store_name}{path}"
|
||||
if key not in self._triggers:
|
||||
self._triggers[key] = []
|
||||
self._triggers[key].append(callback)
|
||||
|
||||
log.debug(f"Registered trigger for {key}")
|
||||
|
||||
def unsubscribe():
|
||||
if key in self._triggers:
|
||||
try:
|
||||
self._triggers[key].remove(callback)
|
||||
if not self._triggers[key]:
|
||||
del self._triggers[key]
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return unsubscribe
|
||||
|
||||
def _fire_triggers(self, store_name: str, old_state: Any, new_state: Any) -> None:
|
||||
"""Fire triggers for changes between old and new state."""
|
||||
if not self._triggers:
|
||||
return
|
||||
|
||||
# Fire store-level wildcard triggers
|
||||
wildcard_key = f"{store_name}/*"
|
||||
if wildcard_key in self._triggers:
|
||||
for callback in self._triggers[wildcard_key]:
|
||||
try:
|
||||
callback(old_state, new_state)
|
||||
except Exception as e:
|
||||
log.error(f"Error in trigger callback for {wildcard_key}: {e}")
|
||||
|
||||
# Fire path-specific triggers by computing a patch and checking paths
|
||||
if old_state is not None and new_state is not None:
|
||||
try:
|
||||
patch = jsonpatch.make_patch(old_state, new_state)
|
||||
for op in patch.patch:
|
||||
op_path = op.get("path", "")
|
||||
trigger_key = f"{store_name}{op_path}"
|
||||
if trigger_key in self._triggers:
|
||||
old_value = self._get_value_at_path(old_state, op_path)
|
||||
new_value = self._get_value_at_path(new_state, op_path)
|
||||
for callback in self._triggers[trigger_key]:
|
||||
try:
|
||||
callback(old_value, new_value)
|
||||
except Exception as e:
|
||||
log.error(f"Error in trigger callback for {trigger_key}: {e}")
|
||||
except Exception as e:
|
||||
log.error(f"Error computing patch for triggers: {e}")
|
||||
|
||||
def _get_value_at_path(self, obj: Any, path: str) -> Any:
|
||||
"""Get value at a JSON pointer path."""
|
||||
if not path or path == "/":
|
||||
return obj
|
||||
|
||||
parts = path.split("/")[1:] # Skip empty first part
|
||||
current = obj
|
||||
|
||||
for part in parts:
|
||||
if current is None:
|
||||
return None
|
||||
if isinstance(current, dict):
|
||||
current = current.get(part)
|
||||
elif isinstance(current, list):
|
||||
try:
|
||||
current = current[int(part)]
|
||||
except (ValueError, IndexError):
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
return current
|
||||
|
||||
|
||||
# Global workspace store instance
|
||||
_workspace_store: Optional[WorkspaceStore] = None
|
||||
|
||||
|
||||
def get_workspace_store(workspace_dir: Optional[Path] = None) -> WorkspaceStore:
|
||||
"""Get or create the global workspace store."""
|
||||
global _workspace_store
|
||||
if _workspace_store is None:
|
||||
_workspace_store = WorkspaceStore(workspace_dir or DEFAULT_WORKSPACE_DIR)
|
||||
return _workspace_store
|
||||
|
||||
|
||||
def register_workspace_tools(server):
|
||||
"""
|
||||
Register workspace tools on an MCP server.
|
||||
|
||||
Args:
|
||||
server: MCP Server instance
|
||||
"""
|
||||
store = get_workspace_store()
|
||||
|
||||
@server.call_tool()
|
||||
async def handle_tool_call(name: str, arguments: dict) -> Any:
|
||||
"""Handle workspace tool calls."""
|
||||
if name == "workspace_read":
|
||||
return store.read(arguments.get("store_name", ""))
|
||||
elif name == "workspace_write":
|
||||
return store.write(
|
||||
arguments.get("store_name", ""),
|
||||
arguments.get("data")
|
||||
)
|
||||
elif name == "workspace_patch":
|
||||
return store.patch(
|
||||
arguments.get("store_name", ""),
|
||||
arguments.get("patch", [])
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
@server.list_tools()
|
||||
async def list_tools():
|
||||
"""List available workspace tools."""
|
||||
return [
|
||||
{
|
||||
"name": "workspace_read",
|
||||
"description": "Read a workspace store from persistent storage",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"store_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the store (e.g., 'chartStore', 'userPreferences')"
|
||||
}
|
||||
},
|
||||
"required": ["store_name"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "workspace_write",
|
||||
"description": "Write a workspace store to persistent storage",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"store_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the store"
|
||||
},
|
||||
"data": {
|
||||
"description": "Data to write"
|
||||
}
|
||||
},
|
||||
"required": ["store_name", "data"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "workspace_patch",
|
||||
"description": "Apply JSON patch operations to a workspace store",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"store_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the store"
|
||||
},
|
||||
"patch": {
|
||||
"type": "array",
|
||||
"description": "JSON Patch operations (RFC 6902)",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"op": {"type": "string", "enum": ["add", "remove", "replace", "move", "copy", "test"]},
|
||||
"path": {"type": "string"},
|
||||
"value": {}
|
||||
},
|
||||
"required": ["op", "path"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["store_name", "patch"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
log.info("Registered workspace tools")
|
||||
@@ -23,6 +23,23 @@ from mcp.server.stdio import stdio_server
|
||||
|
||||
from dexorder import EventPublisher, start_lifecycle_manager, get_lifecycle_manager
|
||||
from dexorder.events import EventType, UserEvent, DeliverySpec
|
||||
from dexorder.api.workspace_tools import get_workspace_store
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Global Data Directory
|
||||
# =============================================================================
|
||||
|
||||
# Default data directory (relative to working directory for local dev)
|
||||
DEFAULT_DATA_DIR = Path("data")
|
||||
|
||||
# Global data directory - set after config is loaded
|
||||
DATA_DIR: Path = DEFAULT_DATA_DIR
|
||||
|
||||
|
||||
def get_data_dir() -> Path:
|
||||
"""Get the global data directory."""
|
||||
return DATA_DIR
|
||||
|
||||
|
||||
# =============================================================================
|
||||
@@ -60,8 +77,13 @@ class Config:
|
||||
self.config_data: dict = {}
|
||||
self.secrets_data: dict = {}
|
||||
|
||||
# Data directory (set after config load)
|
||||
self.data_dir: Path = DEFAULT_DATA_DIR
|
||||
|
||||
def load(self) -> None:
|
||||
"""Load configuration and secrets from YAML files"""
|
||||
global DATA_DIR
|
||||
|
||||
# Load config.yaml if exists
|
||||
if self.config_path.exists():
|
||||
with open(self.config_path) as f:
|
||||
@@ -78,16 +100,40 @@ class Config:
|
||||
else:
|
||||
logging.warning(f"Secrets file not found: {self.secrets_path}")
|
||||
|
||||
# Set data directory from config or environment
|
||||
# Priority: env var > config file > default
|
||||
data_dir_str = os.getenv("DATA_DIR") or self.config_data.get("data_dir")
|
||||
if data_dir_str:
|
||||
self.data_dir = Path(data_dir_str)
|
||||
else:
|
||||
self.data_dir = DEFAULT_DATA_DIR
|
||||
|
||||
# Update global DATA_DIR
|
||||
DATA_DIR = self.data_dir
|
||||
|
||||
# Ensure data directory exists
|
||||
self.data_dir.mkdir(parents=True, exist_ok=True)
|
||||
logging.info(f"Data directory: {self.data_dir}")
|
||||
|
||||
@property
|
||||
def workspace_dir(self) -> Path:
|
||||
"""Workspace directory under DATA_DIR."""
|
||||
return self.data_dir / "workspace"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# MCP Server Setup
|
||||
# =============================================================================
|
||||
|
||||
def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server:
|
||||
"""Create MCP server with minimal hello world resource"""
|
||||
"""Create MCP server with resources and workspace tools"""
|
||||
|
||||
server = Server(config.mcp_server_name)
|
||||
|
||||
# Initialize workspace store
|
||||
workspace_store = get_workspace_store(config.workspace_dir)
|
||||
logging.info(f"Workspace store initialized at {config.workspace_dir}")
|
||||
|
||||
@server.list_resources()
|
||||
async def list_resources():
|
||||
"""List available resources"""
|
||||
@@ -122,7 +168,89 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
else:
|
||||
raise ValueError(f"Unknown resource: {uri}")
|
||||
|
||||
logging.info(f"MCP server '{config.mcp_server_name}' created")
|
||||
@server.list_tools()
|
||||
async def list_tools():
|
||||
"""List available tools including workspace tools"""
|
||||
return [
|
||||
{
|
||||
"name": "workspace_read",
|
||||
"description": "Read a workspace store from persistent storage",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"store_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the store (e.g., 'chartStore', 'userPreferences')"
|
||||
}
|
||||
},
|
||||
"required": ["store_name"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "workspace_write",
|
||||
"description": "Write a workspace store to persistent storage",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"store_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the store"
|
||||
},
|
||||
"data": {
|
||||
"description": "Data to write"
|
||||
}
|
||||
},
|
||||
"required": ["store_name", "data"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "workspace_patch",
|
||||
"description": "Apply JSON patch operations to a workspace store",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"store_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the store"
|
||||
},
|
||||
"patch": {
|
||||
"type": "array",
|
||||
"description": "JSON Patch operations (RFC 6902)",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"op": {"type": "string", "enum": ["add", "remove", "replace", "move", "copy", "test"]},
|
||||
"path": {"type": "string"},
|
||||
"value": {}
|
||||
},
|
||||
"required": ["op", "path"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["store_name", "patch"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@server.call_tool()
|
||||
async def handle_tool_call(name: str, arguments: dict):
|
||||
"""Handle tool calls including workspace tools"""
|
||||
if name == "workspace_read":
|
||||
return workspace_store.read(arguments.get("store_name", ""))
|
||||
elif name == "workspace_write":
|
||||
return workspace_store.write(
|
||||
arguments.get("store_name", ""),
|
||||
arguments.get("data")
|
||||
)
|
||||
elif name == "workspace_patch":
|
||||
return workspace_store.patch(
|
||||
arguments.get("store_name", ""),
|
||||
arguments.get("patch", [])
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
logging.info(f"MCP server '{config.mcp_server_name}' created with workspace tools")
|
||||
return server
|
||||
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ setup(
|
||||
"pyyaml>=6.0",
|
||||
"aiofiles>=23.0.0",
|
||||
"mcp>=0.9.0",
|
||||
"jsonpatch>=1.33",
|
||||
],
|
||||
extras_require={
|
||||
"dev": [
|
||||
|
||||
@@ -23,7 +23,10 @@ spec:
|
||||
c.image.startsWith('ghcr.io/dexorder/agent:') ||
|
||||
c.image.startsWith('ghcr.io/dexorder/agent-') ||
|
||||
c.image.startsWith('localhost:5000/dexorder/agent') ||
|
||||
c.image.startsWith('dexorder/agent'))
|
||||
c.image.startsWith('dexorder/agent') ||
|
||||
c.image.startsWith('dexorder/ai-client-py') ||
|
||||
c.image.startsWith('ai-client-py') ||
|
||||
c.image.startsWith('lifecycle-sidecar'))
|
||||
message: "Only approved dexorder agent images are allowed"
|
||||
reason: Forbidden
|
||||
|
||||
|
||||
47
deploy/k8s/dev/agent-config.yaml
Normal file
47
deploy/k8s/dev/agent-config.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
# Agent ConfigMap in dexorder-agents namespace
|
||||
# This is mounted into dynamically created agent pods
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: agent-config
|
||||
namespace: dexorder-agents
|
||||
labels:
|
||||
app.kubernetes.io/name: agent
|
||||
app.kubernetes.io/component: config
|
||||
data:
|
||||
config.yaml: |
|
||||
# Default configuration for user agent containers
|
||||
# This is mounted at /app/config/config.yaml in agent pods
|
||||
|
||||
# Data directory for persistent storage (workspace, strategies, etc.)
|
||||
# This is mounted as a PVC at /app/data
|
||||
data_dir: "/app/data"
|
||||
|
||||
# User-specific settings (defaults, can be overridden per-user)
|
||||
user:
|
||||
timezone: "UTC"
|
||||
|
||||
# Data sources
|
||||
data:
|
||||
iceberg:
|
||||
catalog_name: "dexorder"
|
||||
# Catalog properties loaded from secrets
|
||||
|
||||
relay:
|
||||
endpoint: "tcp://relay.dexorder.svc.cluster.local:5560"
|
||||
timeout_ms: 5000
|
||||
|
||||
# Strategy settings
|
||||
strategies:
|
||||
max_concurrent: 5
|
||||
default_timeout_minutes: 60
|
||||
|
||||
# Alert settings
|
||||
alerts:
|
||||
max_active: 100
|
||||
|
||||
# Logging
|
||||
logging:
|
||||
level: "INFO"
|
||||
include_timestamps: true
|
||||
@@ -23,7 +23,7 @@ kafka_topics_file: "/topics-dev.yaml" # Use topics-dev.yaml for single broker d
|
||||
|
||||
# Iceberg catalog
|
||||
iceberg_catalog_uri: "http://iceberg-catalog:8181"
|
||||
iceberg_warehouse: "s3://trading-warehouse/"
|
||||
iceberg_warehouse: "s3://warehouse/"
|
||||
iceberg_namespace: "trading"
|
||||
iceberg_table_prefix: "market"
|
||||
hadoop_conf_dir: "/etc/hadoop/conf"
|
||||
|
||||
@@ -31,7 +31,7 @@ data:
|
||||
kubernetes:
|
||||
namespace: dexorder-agents
|
||||
in_cluster: true
|
||||
agent_image: ghcr.io/dexorder/agent:latest
|
||||
agent_image: ai-client-py:latest
|
||||
sidecar_image: lifecycle-sidecar:latest
|
||||
storage_class: standard
|
||||
|
||||
@@ -48,6 +48,8 @@ data:
|
||||
iceberg:
|
||||
catalog_uri: http://iceberg-catalog:8181
|
||||
namespace: gateway
|
||||
ohlc_catalog_uri: http://iceberg-catalog:8181
|
||||
ohlc_namespace: trading
|
||||
s3_endpoint: http://minio:9000
|
||||
|
||||
# Event router (ZeroMQ)
|
||||
|
||||
@@ -158,9 +158,24 @@ spec:
|
||||
- -c
|
||||
- |
|
||||
CLUSTER_ID="dexorder-dev-cluster"
|
||||
if [ ! -f /var/lib/kafka/data/meta.properties ]; then
|
||||
/opt/kafka/bin/kafka-storage.sh format -t $CLUSTER_ID -c /opt/kafka/config/kraft/server.properties
|
||||
LOG_DIR="/var/lib/kafka/data"
|
||||
|
||||
# Ensure log directory exists
|
||||
mkdir -p $LOG_DIR
|
||||
|
||||
# Create temporary config with custom log.dirs for formatting
|
||||
cp /opt/kafka/config/kraft/server.properties /tmp/server.properties
|
||||
echo "log.dirs=$LOG_DIR" >> /tmp/server.properties
|
||||
|
||||
# Format storage if not already formatted
|
||||
if [ ! -f $LOG_DIR/meta.properties ]; then
|
||||
echo "Formatting Kafka storage with cluster ID: $CLUSTER_ID"
|
||||
/opt/kafka/bin/kafka-storage.sh format -t $CLUSTER_ID -c /tmp/server.properties
|
||||
else
|
||||
echo "Kafka storage already formatted, skipping format step"
|
||||
fi
|
||||
|
||||
# Start Kafka server
|
||||
/opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/kraft/server.properties \
|
||||
--override node.id=1 \
|
||||
--override process.roles=broker,controller \
|
||||
@@ -169,7 +184,7 @@ spec:
|
||||
--override controller.quorum.voters=1@kafka:9093 \
|
||||
--override controller.listener.names=CONTROLLER \
|
||||
--override listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT \
|
||||
--override log.dirs=/var/lib/kafka/data \
|
||||
--override log.dirs=$LOG_DIR \
|
||||
--override offsets.topic.replication.factor=1 \
|
||||
--override transaction.state.log.replication.factor=1 \
|
||||
--override transaction.state.log.min.isr=1
|
||||
@@ -315,6 +330,44 @@ spec:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
# MinIO bucket initialization job
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: minio-init-buckets
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: create-buckets
|
||||
image: minio/mc:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
echo "Waiting for MinIO to be ready..."
|
||||
until mc alias set minio http://minio:9000 $MINIO_ROOT_USER $MINIO_ROOT_PASSWORD; do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Creating warehouse bucket..."
|
||||
mc mb minio/warehouse --ignore-existing
|
||||
|
||||
echo "Buckets initialized successfully"
|
||||
env:
|
||||
- name: MINIO_ROOT_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: minio-secret
|
||||
key: root-user
|
||||
- name: MINIO_ROOT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: minio-secret
|
||||
key: root-password
|
||||
---
|
||||
# Iceberg REST Catalog
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
@@ -422,6 +475,9 @@ spec:
|
||||
- name: wait-for-kafka
|
||||
image: busybox:1.36
|
||||
command: ['sh', '-c', 'until nc -z kafka 9092; do echo waiting for kafka; sleep 2; done;']
|
||||
- name: wait-for-iceberg-catalog
|
||||
image: busybox:1.36
|
||||
command: ['sh', '-c', 'until nc -z iceberg-catalog 8181; do echo waiting for iceberg-catalog; sleep 2; done;']
|
||||
containers:
|
||||
- name: flink-jobmanager
|
||||
image: dexorder/flink:latest
|
||||
|
||||
@@ -8,6 +8,7 @@ resources:
|
||||
- storage-class.yaml
|
||||
- configs/gateway-config.yaml
|
||||
- gateway-health-ingress.yaml
|
||||
- agent-config.yaml # ConfigMap for agent pods in dexorder-agents namespace
|
||||
|
||||
# Dev-specific patches
|
||||
patches:
|
||||
@@ -62,4 +63,92 @@ generatorOptions:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -37,6 +37,14 @@ OHLC periods are represented as seconds.
|
||||
* Relay subscribes to Flink (XSUB) and fanouts to clients (XPUB)
|
||||
* Clients subscribe to specific tickers
|
||||
|
||||
### Symbol Metadata Update Flow (Flink → Gateways)
|
||||
* Ingestors write symbol metadata to Kafka
|
||||
* Flink reads from Kafka, writes to Iceberg symbol_metadata table
|
||||
* After committing to Iceberg, Flink publishes SymbolMetadataUpdated notification on MARKET_DATA_PUB
|
||||
* Gateways subscribe to METADATA_UPDATE topic on startup
|
||||
* Upon receiving notification, gateways reload symbol metadata from Iceberg
|
||||
* This prevents race conditions where gateways start before symbol metadata is available
|
||||
|
||||
### Data Processing (Kafka → Flink → Iceberg)
|
||||
* All market data flows through Kafka (durable event log)
|
||||
* Flink processes streams for aggregations and CEP
|
||||
@@ -90,10 +98,11 @@ All sockets bind on **Relay** (well-known endpoint). Components connect to relay
|
||||
- **Socket Type**:
|
||||
- Relay XPUB (bind) ← Clients SUB (connect) - Port 5558
|
||||
- Relay XSUB (connect) → Flink PUB (bind) - Port 5557
|
||||
- **Message Types**: `Tick`, `OHLC`, `HistoryReadyNotification`
|
||||
- **Message Types**: `Tick`, `OHLC`, `HistoryReadyNotification`, `SymbolMetadataUpdated`
|
||||
- **Topic Formats**:
|
||||
- Market data: `{ticker}|{data_type}` (e.g., `BINANCE:BTC/USDT|tick`)
|
||||
- Notifications: `RESPONSE:{client_id}` or `HISTORY_READY:{request_id}`
|
||||
- System notifications: `METADATA_UPDATE` (for symbol metadata updates)
|
||||
- **Behavior**:
|
||||
- Clients subscribe to ticker topics and notification topics via Relay XPUB
|
||||
- Relay forwards subscriptions to Flink via XSUB
|
||||
@@ -150,6 +159,7 @@ The two-frame envelope is the **logical protocol format**, but physical transmis
|
||||
| 0x10 | SubmitHistoricalRequest | Client request for historical data (async) |
|
||||
| 0x11 | SubmitResponse | Immediate ack with notification topic |
|
||||
| 0x12 | HistoryReadyNotification | Notification that data is ready in Iceberg |
|
||||
| 0x13 | SymbolMetadataUpdated | Notification that symbol metadata refreshed |
|
||||
|
||||
## User Container Event System
|
||||
|
||||
@@ -355,6 +365,11 @@ enum AckStatus {
|
||||
}
|
||||
```
|
||||
|
||||
### Language Notes
|
||||
- JavaScript protobufs will convert field names to camelCase.
|
||||
- Python will retain snake_case.
|
||||
|
||||
|
||||
### Delivery Examples
|
||||
|
||||
```python
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
version: '3.8'
|
||||
services:
|
||||
zookeeper:
|
||||
image: confluentinc/cp-zookeeper:7.7.0
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ports:
|
||||
- "2181:2181"
|
||||
|
||||
kafka:
|
||||
image: confluentinc/cp-kafka:7.7.0
|
||||
depends_on:
|
||||
- zookeeper
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
|
||||
postgres:
|
||||
image: postgres:15
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DB: iceberg
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
|
||||
# MinIO for S3-compatible storage (Iceberg warehouse)
|
||||
minio:
|
||||
image: minio/minio:latest
|
||||
command: server /data --console-address ":9001"
|
||||
environment:
|
||||
MINIO_ROOT_USER: minio
|
||||
MINIO_ROOT_PASSWORD: minio123
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
volumes:
|
||||
- minio_data:/data
|
||||
|
||||
# Iceberg REST Catalog
|
||||
iceberg-catalog:
|
||||
image: tabulario/iceberg-rest:latest
|
||||
environment:
|
||||
- CATALOG_WAREHOUSE=s3://warehouse/
|
||||
- CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
|
||||
- CATALOG_S3_ENDPOINT=http://minio:9000
|
||||
- CATALOG_S3_ACCESS__KEY__ID=minio
|
||||
- CATALOG_S3_SECRET__ACCESS__KEY=minio123
|
||||
- CATALOG_S3_PATH__STYLE__ACCESS=true
|
||||
ports:
|
||||
- "8181:8181"
|
||||
depends_on:
|
||||
- postgres
|
||||
- minio
|
||||
|
||||
flink-jobmanager:
|
||||
image: flink:1.20-scala_2.12
|
||||
command: jobmanager
|
||||
environment:
|
||||
- JOB_MANAGER_RPC_ADDRESS=flink-jobmanager
|
||||
ports:
|
||||
- "6123:6123"
|
||||
- "8081:8081"
|
||||
depends_on:
|
||||
- kafka
|
||||
- postgres
|
||||
|
||||
flink-taskmanager:
|
||||
image: flink:1.20-scala_2.12
|
||||
command: taskmanager
|
||||
environment:
|
||||
- JOB_MANAGER_RPC_ADDRESS=flink-jobmanager
|
||||
depends_on:
|
||||
- flink-jobmanager
|
||||
- kafka
|
||||
|
||||
relay:
|
||||
build:
|
||||
context: relay
|
||||
dockerfile: relay/Dockerfile
|
||||
ports:
|
||||
- "5555:5555" # Ingestor work queue
|
||||
- "5556:5556" # Ingestor responses
|
||||
- "5558:5558" # Market data (clients)
|
||||
- "5559:5559" # Client requests
|
||||
environment:
|
||||
- RUST_LOG=relay=info
|
||||
- CONFIG_PATH=/config/config.yaml
|
||||
volumes:
|
||||
- ./relay/config.example.yaml:/config/config.yaml:ro
|
||||
depends_on:
|
||||
- flink-jobmanager
|
||||
restart: unless-stopped
|
||||
|
||||
ingestor:
|
||||
build:
|
||||
context: ingestor
|
||||
dockerfile: ingestor/Dockerfile
|
||||
environment:
|
||||
- LOG_LEVEL=info
|
||||
- CONFIG_PATH=/config/config.yaml
|
||||
volumes:
|
||||
- ./ingestor/config.example.yaml:/config/config.yaml:ro
|
||||
depends_on:
|
||||
- kafka
|
||||
- relay
|
||||
restart: unless-stopped
|
||||
|
||||
history-test-client:
|
||||
build:
|
||||
context: test/history_client
|
||||
dockerfile: test/history_client/Dockerfile
|
||||
depends_on:
|
||||
- relay
|
||||
- ingestor
|
||||
- flink-jobmanager
|
||||
- iceberg-catalog
|
||||
environment:
|
||||
- ICEBERG_CATALOG_URI=http://iceberg-catalog:8181
|
||||
- RELAY_ENDPOINT=tcp://relay:5555
|
||||
- NOTIFICATION_ENDPOINT=tcp://flink:5557
|
||||
volumes:
|
||||
- ./client-py:/client-py:ro
|
||||
profiles:
|
||||
- test
|
||||
# Wait for services to start up, then run new OHLCClient-based test
|
||||
command: sh -c "sleep 10 && pip install -e /client-py && python client_ohlc_api.py"
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
minio_data:
|
||||
@@ -9,7 +9,10 @@ import com.dexorder.flink.publisher.HistoryNotificationForwarder;
|
||||
import com.dexorder.flink.publisher.HistoryNotificationFunction;
|
||||
import com.dexorder.flink.publisher.OHLCBatchWrapper;
|
||||
import com.dexorder.flink.publisher.OHLCBatchDeserializer;
|
||||
import com.dexorder.flink.publisher.MarketWrapper;
|
||||
import com.dexorder.flink.publisher.MarketDeserializer;
|
||||
import com.dexorder.flink.sink.HistoricalBatchWriter;
|
||||
import com.dexorder.flink.sink.SymbolMetadataWriter;
|
||||
import com.dexorder.flink.zmq.ZmqChannelManager;
|
||||
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
|
||||
import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
|
||||
@@ -189,6 +192,42 @@ public class TradingFlinkApp {
|
||||
|
||||
LOG.info("Historical pipeline configured: HistoricalBatchWriter -> HistoryNotificationFunction");
|
||||
|
||||
// Symbol metadata pipeline: Kafka -> Iceberg -> Broadcast
|
||||
// Set up Kafka source for symbol metadata
|
||||
KafkaSource<MarketWrapper> symbolSource = KafkaSource.<MarketWrapper>builder()
|
||||
.setBootstrapServers(config.getKafkaBootstrapServers())
|
||||
.setTopics("symbol-metadata")
|
||||
.setGroupId("flink-symbol-metadata-consumer")
|
||||
.setStartingOffsets(OffsetsInitializer.earliest())
|
||||
.setValueOnlyDeserializer(new MarketDeserializer())
|
||||
.build();
|
||||
|
||||
DataStream<MarketWrapper> symbolStream = env
|
||||
.fromSource(symbolSource, WatermarkStrategy.noWatermarks(), "Symbol Metadata Kafka Source");
|
||||
|
||||
LOG.info("Symbol metadata Kafka source configured");
|
||||
|
||||
// Create table loader for symbol_metadata table
|
||||
TableLoader symbolTableLoader = TableLoader.fromCatalog(
|
||||
catalogLoader2,
|
||||
TableIdentifier.of(config.getIcebergNamespace(), "symbol_metadata")
|
||||
);
|
||||
|
||||
LOG.info("Symbol metadata table loader configured: {}.symbol_metadata", config.getIcebergNamespace());
|
||||
|
||||
// Symbol metadata pipeline: write to Iceberg and send notification
|
||||
// Uses PUSH socket to job manager's PULL endpoint (same pattern as HistoryNotificationPublisher)
|
||||
// Task managers connect to flink-jobmanager service (not bind address which is *)
|
||||
String notificationEndpoint = "tcp://flink-jobmanager:" + config.getNotificationPullPort();
|
||||
DataStream<MarketWrapper> processedSymbolStream = symbolStream
|
||||
.flatMap(new SymbolMetadataWriter(symbolTableLoader, notificationEndpoint))
|
||||
.setParallelism(1);
|
||||
|
||||
// Discard sink
|
||||
processedSymbolStream.addSink(new DiscardingSink<>()).setParallelism(1);
|
||||
|
||||
LOG.info("Symbol metadata pipeline configured: SymbolMetadataWriter -> Iceberg -> METADATA_UPDATE notification");
|
||||
|
||||
// TODO: Set up CEP patterns and triggers
|
||||
// TODO: Set up realtime tick processing
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@ public class SchemaInitializer {
|
||||
|
||||
// Initialize each table
|
||||
initializeOhlcTable();
|
||||
initializeSymbolMetadataTable();
|
||||
|
||||
// Add more table initializations here as needed
|
||||
|
||||
@@ -86,7 +87,25 @@ public class SchemaInitializer {
|
||||
TableIdentifier tableId = TableIdentifier.of(namespace, "ohlc");
|
||||
|
||||
try {
|
||||
if (catalog.tableExists(tableId)) {
|
||||
boolean tableExists = false;
|
||||
try {
|
||||
tableExists = catalog.tableExists(tableId);
|
||||
} catch (org.apache.iceberg.exceptions.ServiceFailureException e) {
|
||||
// Handle corrupted table metadata (e.g., missing S3 files)
|
||||
if (e.getMessage().contains("Location does not exist")) {
|
||||
LOG.warn("Table {} has corrupted metadata, dropping and recreating", tableId);
|
||||
try {
|
||||
catalog.dropTable(tableId, false);
|
||||
} catch (Exception dropEx) {
|
||||
LOG.warn("Failed to drop corrupted table (may not exist in catalog)", dropEx);
|
||||
}
|
||||
tableExists = false;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
if (tableExists) {
|
||||
Table existing = catalog.loadTable(tableId);
|
||||
String existingVersion = existing.properties().get(SCHEMA_VERSION_PROP);
|
||||
if (!OHLC_SCHEMA_VERSION.equals(existingVersion)) {
|
||||
@@ -152,4 +171,105 @@ public class SchemaInitializer {
|
||||
throw new RuntimeException("OHLC table initialization failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the symbol_metadata table if it doesn't exist.
|
||||
*/
|
||||
private static final String SYMBOL_METADATA_SCHEMA_VERSION = "1";
|
||||
|
||||
private void initializeSymbolMetadataTable() {
|
||||
TableIdentifier tableId = TableIdentifier.of(namespace, "symbol_metadata");
|
||||
|
||||
try {
|
||||
boolean tableExists = false;
|
||||
try {
|
||||
tableExists = catalog.tableExists(tableId);
|
||||
} catch (org.apache.iceberg.exceptions.ServiceFailureException e) {
|
||||
// Handle corrupted table metadata (e.g., missing S3 files)
|
||||
if (e.getMessage().contains("Location does not exist")) {
|
||||
LOG.warn("Table {} has corrupted metadata, dropping and recreating", tableId);
|
||||
try {
|
||||
catalog.dropTable(tableId, false);
|
||||
} catch (Exception dropEx) {
|
||||
LOG.warn("Failed to drop corrupted table (may not exist in catalog)", dropEx);
|
||||
}
|
||||
tableExists = false;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
if (tableExists) {
|
||||
Table existing = catalog.loadTable(tableId);
|
||||
String existingVersion = existing.properties().get(SCHEMA_VERSION_PROP);
|
||||
if (!SYMBOL_METADATA_SCHEMA_VERSION.equals(existingVersion)) {
|
||||
LOG.warn("Table {} has schema version '{}', expected '{}' — manual migration required",
|
||||
tableId, existingVersion, SYMBOL_METADATA_SCHEMA_VERSION);
|
||||
}
|
||||
LOG.info("Table {} already exists at schema version {} — skipping creation", tableId, existingVersion);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG.info("Creating symbol_metadata table: {}", tableId);
|
||||
|
||||
// Define the symbol metadata schema
|
||||
Schema schema = new Schema(
|
||||
// Primary key fields
|
||||
required(1, "exchange_id", Types.StringType.get(), "Exchange identifier (e.g., BINANCE)"),
|
||||
required(2, "market_id", Types.StringType.get(), "Market symbol (e.g., BTC/USDT)"),
|
||||
|
||||
// Market information
|
||||
optional(3, "market_type", Types.StringType.get(), "Market type (spot, futures, swap)"),
|
||||
optional(4, "description", Types.StringType.get(), "Human-readable description"),
|
||||
optional(5, "base_asset", Types.StringType.get(), "Base asset (e.g., BTC)"),
|
||||
optional(6, "quote_asset", Types.StringType.get(), "Quote asset (e.g., USDT)"),
|
||||
|
||||
// Precision/denominator information
|
||||
optional(7, "tick_denom", Types.LongType.get(), "Tick price denominator (10^n for n decimals)"),
|
||||
optional(8, "base_denom", Types.LongType.get(), "Base asset denominator"),
|
||||
optional(9, "quote_denom", Types.LongType.get(), "Quote asset denominator"),
|
||||
|
||||
// Supported timeframes
|
||||
optional(10, "supported_period_seconds", Types.ListType.ofRequired(11, Types.IntegerType.get()), "Supported OHLC periods in seconds"),
|
||||
|
||||
// Optional timing information
|
||||
optional(12, "earliest_time", Types.LongType.get(), "Earliest available data timestamp (microseconds)"),
|
||||
|
||||
// Metadata
|
||||
required(13, "updated_at", Types.LongType.get(), "Timestamp when metadata was last updated (microseconds)")
|
||||
);
|
||||
|
||||
// Create the table with partitioning and properties
|
||||
// Use format version 2 with UPSERT capabilities via equality deletes
|
||||
Table table = catalog.buildTable(tableId, schema)
|
||||
.withPartitionSpec(org.apache.iceberg.PartitionSpec.builderFor(schema)
|
||||
.identity("exchange_id")
|
||||
.build())
|
||||
.withProperty("write.format.default", "parquet")
|
||||
.withProperty("write.parquet.compression-codec", "snappy")
|
||||
.withProperty("write.metadata.compression-codec", "gzip")
|
||||
.withProperty("format-version", "2")
|
||||
.withProperty("write.upsert.enabled", "true")
|
||||
.withProperty(SCHEMA_VERSION_PROP, SYMBOL_METADATA_SCHEMA_VERSION)
|
||||
.create();
|
||||
|
||||
// Add identifier fields for UPSERT operations
|
||||
// This allows Iceberg to use equality deletes for deduplication
|
||||
table.updateProperties()
|
||||
.set("write.upsert.enabled", "true")
|
||||
.commit();
|
||||
|
||||
// Set the identifier fields (primary key) for the table
|
||||
// Iceberg will use these for equality deletes during UPSERT
|
||||
table.updateSchema()
|
||||
.setIdentifierFields("exchange_id", "market_id")
|
||||
.commit();
|
||||
|
||||
LOG.info("Successfully created symbol_metadata table: {}", tableId);
|
||||
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed to initialize symbol_metadata table: {}", tableId, e);
|
||||
throw new RuntimeException("symbol_metadata table initialization failed", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
package com.dexorder.flink.publisher;
|
||||
|
||||
import com.dexorder.proto.Market;
|
||||
import org.apache.flink.api.common.serialization.DeserializationSchema;
|
||||
import org.apache.flink.api.common.typeinfo.TypeInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Deserializes Market protobuf messages from Kafka.
|
||||
* Expects ZMQ protocol envelope: [version byte][type_id byte][protobuf payload]
|
||||
*/
|
||||
public class MarketDeserializer implements DeserializationSchema<MarketWrapper> {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MarketDeserializer.class);
|
||||
|
||||
private static final byte PROTOCOL_VERSION = 0x01;
|
||||
private static final byte MARKET_TYPE_ID = 0x05;
|
||||
|
||||
@Override
|
||||
public MarketWrapper deserialize(byte[] message) throws IOException {
|
||||
if (message == null || message.length < 3) {
|
||||
LOG.warn("Invalid message: too short (length={})", message == null ? 0 : message.length);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Parse ZMQ protocol envelope
|
||||
byte version = message[0];
|
||||
byte typeId = message[1];
|
||||
|
||||
if (version != PROTOCOL_VERSION) {
|
||||
LOG.warn("Unknown protocol version: 0x{}", Integer.toHexString(version & 0xFF));
|
||||
return null;
|
||||
}
|
||||
|
||||
if (typeId != MARKET_TYPE_ID) {
|
||||
LOG.warn("Expected MARKET type (0x05), got: 0x{}", Integer.toHexString(typeId & 0xFF));
|
||||
return null;
|
||||
}
|
||||
|
||||
// Extract protobuf payload (everything after first 2 bytes)
|
||||
byte[] payload = new byte[message.length - 2];
|
||||
System.arraycopy(message, 2, payload, 0, payload.length);
|
||||
|
||||
try {
|
||||
// Deserialize protobuf
|
||||
Market market = Market.parseFrom(payload);
|
||||
|
||||
// Debug log the deserialized market
|
||||
LOG.info("Deserialized Market: exchange_id='{}', market_id='{}', base='{}', quote='{}'",
|
||||
market.getExchangeId(), market.getMarketId(), market.getBaseAsset(), market.getQuoteAsset());
|
||||
|
||||
// Convert to MarketWrapper
|
||||
MarketWrapper wrapper = new MarketWrapper();
|
||||
wrapper.setExchangeId(market.getExchangeId());
|
||||
wrapper.setMarketId(market.getMarketId());
|
||||
wrapper.setMarketType(market.getMarketType());
|
||||
wrapper.setDescription(market.getDescription());
|
||||
wrapper.setBaseAsset(market.getBaseAsset());
|
||||
wrapper.setQuoteAsset(market.getQuoteAsset());
|
||||
wrapper.setTickDenom(market.getTickDenom());
|
||||
wrapper.setBaseDenom(market.getBaseDenom());
|
||||
wrapper.setQuoteDenom(market.getQuoteDenom());
|
||||
|
||||
// Convert repeated field to List
|
||||
List<Integer> supportedPeriods = new ArrayList<>(market.getSupportedPeriodSecondsList());
|
||||
wrapper.setSupportedPeriodSeconds(supportedPeriods);
|
||||
|
||||
wrapper.setEarliestTime(market.getEarliestTime());
|
||||
|
||||
return wrapper;
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed to deserialize Market protobuf", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEndOfStream(MarketWrapper nextElement) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TypeInformation<MarketWrapper> getProducedType() {
|
||||
return TypeInformation.of(MarketWrapper.class);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,143 @@
|
||||
package com.dexorder.flink.publisher;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Wrapper for Market protobuf message from Kafka.
|
||||
* Represents symbol metadata for a trading pair.
|
||||
*/
|
||||
public class MarketWrapper implements Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private String exchangeId;
|
||||
private String marketId;
|
||||
private String marketType;
|
||||
private String description;
|
||||
private String baseAsset;
|
||||
private String quoteAsset;
|
||||
private long tickDenom;
|
||||
private long baseDenom;
|
||||
private long quoteDenom;
|
||||
private List<Integer> supportedPeriodSeconds;
|
||||
private long earliestTime;
|
||||
|
||||
public MarketWrapper() {
|
||||
}
|
||||
|
||||
public MarketWrapper(String exchangeId, String marketId, String marketType, String description,
|
||||
String baseAsset, String quoteAsset, long tickDenom, long baseDenom,
|
||||
long quoteDenom, List<Integer> supportedPeriodSeconds, long earliestTime) {
|
||||
this.exchangeId = exchangeId;
|
||||
this.marketId = marketId;
|
||||
this.marketType = marketType;
|
||||
this.description = description;
|
||||
this.baseAsset = baseAsset;
|
||||
this.quoteAsset = quoteAsset;
|
||||
this.tickDenom = tickDenom;
|
||||
this.baseDenom = baseDenom;
|
||||
this.quoteDenom = quoteDenom;
|
||||
this.supportedPeriodSeconds = supportedPeriodSeconds;
|
||||
this.earliestTime = earliestTime;
|
||||
}
|
||||
|
||||
// Getters and setters
|
||||
public String getExchangeId() {
|
||||
return exchangeId;
|
||||
}
|
||||
|
||||
public void setExchangeId(String exchangeId) {
|
||||
this.exchangeId = exchangeId;
|
||||
}
|
||||
|
||||
public String getMarketId() {
|
||||
return marketId;
|
||||
}
|
||||
|
||||
public void setMarketId(String marketId) {
|
||||
this.marketId = marketId;
|
||||
}
|
||||
|
||||
public String getMarketType() {
|
||||
return marketType;
|
||||
}
|
||||
|
||||
public void setMarketType(String marketType) {
|
||||
this.marketType = marketType;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public void setDescription(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public String getBaseAsset() {
|
||||
return baseAsset;
|
||||
}
|
||||
|
||||
public void setBaseAsset(String baseAsset) {
|
||||
this.baseAsset = baseAsset;
|
||||
}
|
||||
|
||||
public String getQuoteAsset() {
|
||||
return quoteAsset;
|
||||
}
|
||||
|
||||
public void setQuoteAsset(String quoteAsset) {
|
||||
this.quoteAsset = quoteAsset;
|
||||
}
|
||||
|
||||
public long getTickDenom() {
|
||||
return tickDenom;
|
||||
}
|
||||
|
||||
public void setTickDenom(long tickDenom) {
|
||||
this.tickDenom = tickDenom;
|
||||
}
|
||||
|
||||
public long getBaseDenom() {
|
||||
return baseDenom;
|
||||
}
|
||||
|
||||
public void setBaseDenom(long baseDenom) {
|
||||
this.baseDenom = baseDenom;
|
||||
}
|
||||
|
||||
public long getQuoteDenom() {
|
||||
return quoteDenom;
|
||||
}
|
||||
|
||||
public void setQuoteDenom(long quoteDenom) {
|
||||
this.quoteDenom = quoteDenom;
|
||||
}
|
||||
|
||||
public List<Integer> getSupportedPeriodSeconds() {
|
||||
return supportedPeriodSeconds;
|
||||
}
|
||||
|
||||
public void setSupportedPeriodSeconds(List<Integer> supportedPeriodSeconds) {
|
||||
this.supportedPeriodSeconds = supportedPeriodSeconds;
|
||||
}
|
||||
|
||||
public long getEarliestTime() {
|
||||
return earliestTime;
|
||||
}
|
||||
|
||||
public void setEarliestTime(long earliestTime) {
|
||||
this.earliestTime = earliestTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MarketWrapper{" +
|
||||
"exchangeId='" + exchangeId + '\'' +
|
||||
", marketId='" + marketId + '\'' +
|
||||
", marketType='" + marketType + '\'' +
|
||||
", baseAsset='" + baseAsset + '\'' +
|
||||
", quoteAsset='" + quoteAsset + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,257 @@
|
||||
package com.dexorder.flink.sink;
|
||||
|
||||
import com.dexorder.flink.publisher.MarketWrapper;
|
||||
import org.apache.flink.api.common.functions.RichFlatMapFunction;
|
||||
import org.apache.flink.configuration.Configuration;
|
||||
import org.apache.flink.util.Collector;
|
||||
import org.apache.iceberg.FileFormat;
|
||||
import org.apache.iceberg.PartitionKey;
|
||||
import org.apache.iceberg.Table;
|
||||
import org.apache.iceberg.data.GenericAppenderFactory;
|
||||
import org.apache.iceberg.data.GenericRecord;
|
||||
import org.apache.iceberg.data.Record;
|
||||
import org.apache.iceberg.encryption.EncryptedOutputFile;
|
||||
import org.apache.iceberg.flink.TableLoader;
|
||||
import org.apache.iceberg.io.DataWriter;
|
||||
import org.apache.iceberg.io.OutputFileFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.zeromq.SocketType;
|
||||
import org.zeromq.ZContext;
|
||||
import org.zeromq.ZMQ;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Writes symbol metadata to Iceberg symbol_metadata table.
|
||||
*
|
||||
* Deduplicates symbols in-memory to prevent writing duplicates.
|
||||
* Batches writes by exchange to minimize file fragmentation.
|
||||
* After committing to Iceberg, sends a notification via ZMQ PUSH socket to job manager.
|
||||
*/
|
||||
public class SymbolMetadataWriter extends RichFlatMapFunction<MarketWrapper, MarketWrapper> {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SymbolMetadataWriter.class);
|
||||
private static final int BATCH_SIZE = 500; // Commit every 500 symbols per exchange
|
||||
private static final byte PROTOCOL_VERSION = 0x01;
|
||||
private static final byte MSG_TYPE_SYMBOL_METADATA_UPDATED = 0x13;
|
||||
|
||||
private final TableLoader tableLoader;
|
||||
private final String notificationEndpoint; // Job manager's PULL socket endpoint
|
||||
|
||||
private transient Table table;
|
||||
private transient Set<String> seenSymbols; // Track seen symbols to prevent duplicates
|
||||
private transient GenericAppenderFactory appenderFactory;
|
||||
private transient OutputFileFactory fileFactory;
|
||||
private transient ZContext zmqContext;
|
||||
private transient ZMQ.Socket pushSocket; // PUSH socket to job manager
|
||||
|
||||
// Batching state per exchange
|
||||
private transient java.util.Map<String, DataWriter<Record>> writersByExchange;
|
||||
private transient java.util.Map<String, Integer> countsPerExchange;
|
||||
private transient java.util.Map<String, List<MarketWrapper>> pendingOutputPerExchange;
|
||||
|
||||
public SymbolMetadataWriter(TableLoader tableLoader, String notificationEndpoint) {
|
||||
this.tableLoader = tableLoader;
|
||||
this.notificationEndpoint = notificationEndpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void open(Configuration parameters) throws Exception {
|
||||
tableLoader.open();
|
||||
table = tableLoader.loadTable();
|
||||
seenSymbols = new HashSet<>();
|
||||
writersByExchange = new java.util.HashMap<>();
|
||||
countsPerExchange = new java.util.HashMap<>();
|
||||
pendingOutputPerExchange = new java.util.HashMap<>();
|
||||
|
||||
// Initialize ZMQ PUSH socket to job manager (mimics HistoryNotificationPublisher pattern)
|
||||
zmqContext = new ZContext();
|
||||
pushSocket = zmqContext.createSocket(SocketType.PUSH);
|
||||
pushSocket.setLinger(1000);
|
||||
pushSocket.setSndHWM(10000);
|
||||
pushSocket.connect(notificationEndpoint);
|
||||
|
||||
appenderFactory = new GenericAppenderFactory(table.schema(), table.spec());
|
||||
fileFactory = OutputFileFactory
|
||||
.builderFor(table, getRuntimeContext().getIndexOfThisSubtask(), System.nanoTime())
|
||||
.format(FileFormat.PARQUET)
|
||||
.build();
|
||||
|
||||
LOG.info("SymbolMetadataWriter opened, table loaded: {}", table.name());
|
||||
LOG.info("Connected PUSH socket to notification endpoint: {}", notificationEndpoint);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flatMap(MarketWrapper market, Collector<MarketWrapper> out) throws Exception {
|
||||
// Create unique key for deduplication
|
||||
String symbolKey = market.getExchangeId() + ":" + market.getMarketId();
|
||||
|
||||
// Skip if we've already seen this symbol
|
||||
if (seenSymbols.contains(symbolKey)) {
|
||||
LOG.debug("Skipping duplicate symbol: {}", symbolKey);
|
||||
out.collect(market); // Still emit for downstream processing
|
||||
return;
|
||||
}
|
||||
|
||||
// Mark as seen
|
||||
seenSymbols.add(symbolKey);
|
||||
|
||||
String exchangeId = market.getExchangeId();
|
||||
|
||||
// Create Iceberg record from Market protobuf
|
||||
GenericRecord record = GenericRecord.create(table.schema());
|
||||
record.setField("exchange_id", exchangeId);
|
||||
record.setField("market_id", market.getMarketId());
|
||||
record.setField("market_type", market.getMarketType());
|
||||
record.setField("description", market.getDescription());
|
||||
record.setField("base_asset", market.getBaseAsset());
|
||||
record.setField("quote_asset", market.getQuoteAsset());
|
||||
record.setField("tick_denom", market.getTickDenom());
|
||||
record.setField("base_denom", market.getBaseDenom());
|
||||
record.setField("quote_denom", market.getQuoteDenom());
|
||||
|
||||
// Convert supported_period_seconds to List<Integer>
|
||||
List<Integer> supportedPeriods = new ArrayList<>(market.getSupportedPeriodSeconds());
|
||||
record.setField("supported_period_seconds", supportedPeriods);
|
||||
|
||||
record.setField("earliest_time", market.getEarliestTime() != 0 ? market.getEarliestTime() : null);
|
||||
record.setField("updated_at", System.currentTimeMillis() * 1000); // Current time in microseconds
|
||||
|
||||
// Get or create writer for this exchange
|
||||
DataWriter<Record> writer = writersByExchange.get(exchangeId);
|
||||
if (writer == null) {
|
||||
// Compute partition key from exchange_id
|
||||
GenericRecord partitionRecord = GenericRecord.create(table.schema());
|
||||
partitionRecord.setField("exchange_id", exchangeId);
|
||||
PartitionKey partitionKey = new PartitionKey(table.spec(), table.schema());
|
||||
partitionKey.partition(partitionRecord);
|
||||
|
||||
// Create new writer for this exchange's partition
|
||||
EncryptedOutputFile encryptedFile = fileFactory.newOutputFile(partitionKey);
|
||||
writer = appenderFactory.newDataWriter(encryptedFile, FileFormat.PARQUET, partitionKey);
|
||||
writersByExchange.put(exchangeId, writer);
|
||||
countsPerExchange.put(exchangeId, 0);
|
||||
pendingOutputPerExchange.put(exchangeId, new ArrayList<>());
|
||||
}
|
||||
|
||||
// Write record to batch
|
||||
writer.write(record);
|
||||
|
||||
// Track count and pending output
|
||||
int count = countsPerExchange.get(exchangeId) + 1;
|
||||
countsPerExchange.put(exchangeId, count);
|
||||
pendingOutputPerExchange.get(exchangeId).add(market);
|
||||
|
||||
// Flush batch if we've reached the batch size
|
||||
if (count >= BATCH_SIZE) {
|
||||
flushExchange(exchangeId, out);
|
||||
}
|
||||
}
|
||||
|
||||
private void flushExchange(String exchangeId, Collector<MarketWrapper> out) throws Exception {
|
||||
DataWriter<Record> writer = writersByExchange.get(exchangeId);
|
||||
if (writer == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
writer.close();
|
||||
table.newAppend()
|
||||
.appendFile(writer.toDataFile())
|
||||
.commit();
|
||||
|
||||
int count = countsPerExchange.get(exchangeId);
|
||||
LOG.info("Committed batch of {} symbols for exchange: {}", count, exchangeId);
|
||||
|
||||
// Send notification to gateways to reload symbol metadata
|
||||
sendMetadataUpdateNotification(exchangeId, count);
|
||||
|
||||
// Emit all pending outputs
|
||||
for (MarketWrapper market : pendingOutputPerExchange.get(exchangeId)) {
|
||||
out.collect(market);
|
||||
}
|
||||
} finally {
|
||||
// Clear state for this exchange
|
||||
writersByExchange.remove(exchangeId);
|
||||
countsPerExchange.remove(exchangeId);
|
||||
pendingOutputPerExchange.remove(exchangeId);
|
||||
}
|
||||
}
|
||||
|
||||
private void sendMetadataUpdateNotification(String exchangeId, int count) {
|
||||
if (pushSocket == null) {
|
||||
LOG.warn("Push socket is null - cannot send METADATA_UPDATE notification for exchange: {} ({} symbols)", exchangeId, count);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Topic for metadata updates (broadcast to all gateways)
|
||||
String topic = "METADATA_UPDATE";
|
||||
|
||||
// Empty payload (notification only, no additional data needed)
|
||||
byte[] payload = new byte[0];
|
||||
|
||||
// Build message frame: [msg_type][payload]
|
||||
byte[] messageFrame = new byte[1 + payload.length];
|
||||
messageFrame[0] = MSG_TYPE_SYMBOL_METADATA_UPDATED;
|
||||
System.arraycopy(payload, 0, messageFrame, 1, payload.length);
|
||||
|
||||
// Send three-frame message via PUSH: [topic][version][message]
|
||||
// Job manager's forwarder will republish via MARKET_DATA_PUB
|
||||
pushSocket.sendMore(topic);
|
||||
pushSocket.sendMore(new byte[]{PROTOCOL_VERSION});
|
||||
pushSocket.send(messageFrame, 0);
|
||||
|
||||
LOG.info("Sent METADATA_UPDATE notification for exchange: {} ({} symbols)", exchangeId, count);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed to send metadata update notification for exchange: {}", exchangeId, e);
|
||||
// Don't throw - notification is best-effort
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
// Flush any remaining batches
|
||||
try {
|
||||
for (String exchangeId : new ArrayList<>(writersByExchange.keySet())) {
|
||||
DataWriter<Record> writer = writersByExchange.get(exchangeId);
|
||||
if (writer != null) {
|
||||
try {
|
||||
writer.close();
|
||||
table.newAppend()
|
||||
.appendFile(writer.toDataFile())
|
||||
.commit();
|
||||
|
||||
int count = countsPerExchange.get(exchangeId);
|
||||
LOG.info("Final flush: committed {} remaining symbols for exchange: {}", count, exchangeId);
|
||||
|
||||
// Send final notification
|
||||
sendMetadataUpdateNotification(exchangeId, count);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed to flush remaining batch for exchange: {}", exchangeId, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
writersByExchange.clear();
|
||||
countsPerExchange.clear();
|
||||
pendingOutputPerExchange.clear();
|
||||
}
|
||||
|
||||
// Close ZMQ resources
|
||||
if (pushSocket != null) {
|
||||
pushSocket.close();
|
||||
}
|
||||
if (zmqContext != null) {
|
||||
zmqContext.close();
|
||||
}
|
||||
|
||||
if (tableLoader != null) {
|
||||
tableLoader.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,3 +27,12 @@ topics:
|
||||
retention.ms: 2592000000 # 30 days
|
||||
compression.type: snappy
|
||||
cleanup.policy: delete
|
||||
|
||||
# Symbol metadata from ingestors
|
||||
- name: symbol-metadata
|
||||
partitions: 2
|
||||
replication: 1
|
||||
config:
|
||||
retention.ms: 604800000 # 7 days
|
||||
compression.type: snappy
|
||||
cleanup.policy: compact # Keep latest per symbol key
|
||||
|
||||
@@ -27,3 +27,12 @@ topics:
|
||||
retention.ms: 2592000000 # 30 days
|
||||
compression.type: snappy
|
||||
cleanup.policy: delete
|
||||
|
||||
# Symbol metadata from ingestors
|
||||
- name: symbol-metadata
|
||||
partitions: 3
|
||||
replication: 2
|
||||
config:
|
||||
retention.ms: 604800000 # 7 days
|
||||
compression.type: snappy
|
||||
cleanup.policy: compact # Keep latest per symbol key
|
||||
|
||||
@@ -9,10 +9,13 @@ COPY tsconfig.json ./
|
||||
# Install dependencies
|
||||
RUN npm install
|
||||
|
||||
# Copy protobuf definitions
|
||||
COPY protobuf ../protobuf/
|
||||
|
||||
# Copy source
|
||||
COPY src ./src
|
||||
|
||||
# Build
|
||||
# Build (includes protobuf generation)
|
||||
RUN npm run build
|
||||
|
||||
# Production image
|
||||
@@ -53,6 +56,12 @@ RUN npm install --omit=dev
|
||||
# Copy built application
|
||||
COPY --from=builder /app/dist ./dist
|
||||
|
||||
# Copy protobuf definitions for runtime loading
|
||||
COPY protobuf ./protobuf
|
||||
|
||||
# Copy k8s templates (not included in TypeScript build)
|
||||
COPY src/k8s/templates ./dist/k8s/templates
|
||||
|
||||
# Copy entrypoint script
|
||||
COPY entrypoint.sh ./
|
||||
RUN chmod +x entrypoint.sh
|
||||
|
||||
@@ -43,8 +43,16 @@ qdrant:
|
||||
iceberg:
|
||||
catalog_uri: http://iceberg-catalog:8181
|
||||
namespace: gateway
|
||||
# Future: Separate OHLC database
|
||||
# ohlc_catalog_uri: http://iceberg-catalog-trading:8181
|
||||
# ohlc_namespace: trading
|
||||
s3_endpoint: http://minio:9000
|
||||
|
||||
# ZMQ Relay configuration for historical data
|
||||
relay:
|
||||
request_endpoint: tcp://relay:5559
|
||||
notification_endpoint: tcp://relay:5558
|
||||
|
||||
# Event router (ZeroMQ)
|
||||
events:
|
||||
router_bind: tcp://*:5571
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
-- Development seed data
|
||||
-- This file contains sample data for local development and testing
|
||||
--
|
||||
-- Dev user: cryptochimp@dexorder.ai / moon2the
|
||||
-- Dev user: tim@test / test
|
||||
-- User is created via Better Auth API in bin/dev script
|
||||
-- License is also created in bin/dev script
|
||||
--
|
||||
|
||||
@@ -5,8 +5,9 @@
|
||||
"private": true,
|
||||
"description": "Multi-channel gateway with agent harness for Dexorder AI platform",
|
||||
"scripts": {
|
||||
"proto": "mkdir -p src/generated && pbjs -t static-module -w es6 -o src/generated/proto.js ../protobuf/*.proto && pbts -o src/generated/proto.d.ts src/generated/proto.js && sed -i 's/from \"protobufjs\\/minimal\"/from \"protobufjs\\/minimal.js\"/g' src/generated/proto.js",
|
||||
"dev": "tsx watch src/main.ts",
|
||||
"build": "tsc",
|
||||
"build": "npm run proto && tsc",
|
||||
"start": "node dist/main.js",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
@@ -22,8 +23,9 @@
|
||||
"@qdrant/js-client-rest": "^1.17.0",
|
||||
"argon2": "^0.41.1",
|
||||
"better-auth": "^1.5.3",
|
||||
"duckdb": "^1.1.3",
|
||||
"fast-json-patch": "^3.1.1",
|
||||
"fastify": "^5.2.0",
|
||||
"iceberg-js": "latest",
|
||||
"ioredis": "^5.4.2",
|
||||
"js-yaml": "^4.1.0",
|
||||
"kysely": "^0.27.3",
|
||||
@@ -31,6 +33,7 @@
|
||||
"pg": "^8.13.1",
|
||||
"pino": "^9.6.0",
|
||||
"pino-pretty": "^13.0.0",
|
||||
"protobufjs": "^7.4.0",
|
||||
"zeromq": "^6.0.0-beta.20",
|
||||
"zod": "^3.24.1"
|
||||
},
|
||||
@@ -38,6 +41,7 @@
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"@types/node": "^22.10.2",
|
||||
"@types/pg": "^8.11.10",
|
||||
"protobufjs-cli": "^1.1.2",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "^5.7.2"
|
||||
},
|
||||
|
||||
@@ -1,258 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "com.dexorder.proto";
|
||||
|
||||
// User container event system for delivering notifications to users
|
||||
// via active sessions or external channels (Telegram, email, push).
|
||||
//
|
||||
// Two ZMQ patterns:
|
||||
// - XPUB/SUB (port 5570): Fast path for informational events to active sessions
|
||||
// - DEALER/ROUTER (port 5571): Guaranteed delivery for critical events with ack
|
||||
//
|
||||
// See doc/protocol.md and doc/user_container_events.md for details.
|
||||
|
||||
// =============================================================================
|
||||
// User Event (Container → Gateway)
|
||||
// Message Type ID: 0x20
|
||||
// =============================================================================
|
||||
|
||||
message UserEvent {
|
||||
// User ID this event belongs to
|
||||
string user_id = 1;
|
||||
|
||||
// Unique event ID for deduplication and ack tracking (UUID)
|
||||
string event_id = 2;
|
||||
|
||||
// Timestamp when event was generated (Unix milliseconds)
|
||||
int64 timestamp = 3;
|
||||
|
||||
// Type of event
|
||||
EventType event_type = 4;
|
||||
|
||||
// Event payload (JSON or nested protobuf, depending on event_type)
|
||||
bytes payload = 5;
|
||||
|
||||
// Delivery specification (priority and channel preferences)
|
||||
DeliverySpec delivery = 6;
|
||||
}
|
||||
|
||||
enum EventType {
|
||||
// Trading events
|
||||
ORDER_PLACED = 0;
|
||||
ORDER_FILLED = 1;
|
||||
ORDER_CANCELLED = 2;
|
||||
ORDER_REJECTED = 3;
|
||||
ORDER_EXPIRED = 4;
|
||||
|
||||
// Alert events
|
||||
ALERT_TRIGGERED = 10;
|
||||
ALERT_CREATED = 11;
|
||||
ALERT_DELETED = 12;
|
||||
|
||||
// Position events
|
||||
POSITION_OPENED = 20;
|
||||
POSITION_CLOSED = 21;
|
||||
POSITION_UPDATED = 22;
|
||||
POSITION_LIQUIDATED = 23;
|
||||
|
||||
// Workspace/chart events
|
||||
WORKSPACE_CHANGED = 30;
|
||||
CHART_ANNOTATION_ADDED = 31;
|
||||
CHART_ANNOTATION_REMOVED = 32;
|
||||
INDICATOR_UPDATED = 33;
|
||||
|
||||
// Strategy events
|
||||
STRATEGY_STARTED = 40;
|
||||
STRATEGY_STOPPED = 41;
|
||||
STRATEGY_LOG = 42;
|
||||
STRATEGY_ERROR = 43;
|
||||
BACKTEST_COMPLETED = 44;
|
||||
|
||||
// System events
|
||||
CONTAINER_STARTING = 50;
|
||||
CONTAINER_READY = 51;
|
||||
CONTAINER_SHUTTING_DOWN = 52;
|
||||
ERROR = 53;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Delivery Specification
|
||||
// =============================================================================
|
||||
|
||||
message DeliverySpec {
|
||||
// Priority determines routing behavior
|
||||
Priority priority = 1;
|
||||
|
||||
// Ordered list of channel preferences (try first, then second, etc.)
|
||||
repeated ChannelPreference channels = 2;
|
||||
}
|
||||
|
||||
enum Priority {
|
||||
// Drop if no active session (fire-and-forget via XPUB)
|
||||
// Use for: indicator updates, chart syncs, strategy logs when watching
|
||||
INFORMATIONAL = 0;
|
||||
|
||||
// Best effort delivery - queue briefly, deliver when possible
|
||||
// Uses XPUB if subscribed, otherwise DEALER
|
||||
// Use for: alerts, position updates
|
||||
NORMAL = 1;
|
||||
|
||||
// Must deliver - retry until acked, escalate channels
|
||||
// Always uses DEALER for guaranteed delivery
|
||||
// Use for: order fills, liquidations, critical errors
|
||||
CRITICAL = 2;
|
||||
}
|
||||
|
||||
message ChannelPreference {
|
||||
// Channel to deliver to
|
||||
ChannelType channel = 1;
|
||||
|
||||
// If true, skip this channel if user is not connected to it
|
||||
// If false, deliver even if user is not actively connected
|
||||
// (e.g., send Telegram message even if user isn't in Telegram chat)
|
||||
bool only_if_active = 2;
|
||||
}
|
||||
|
||||
enum ChannelType {
|
||||
// Whatever channel the user currently has open (WebSocket, Telegram session)
|
||||
ACTIVE_SESSION = 0;
|
||||
|
||||
// Specific channels
|
||||
WEB = 1; // WebSocket to web UI
|
||||
TELEGRAM = 2; // Telegram bot message
|
||||
EMAIL = 3; // Email notification
|
||||
PUSH = 4; // Mobile push notification (iOS/Android)
|
||||
DISCORD = 5; // Discord webhook (future)
|
||||
SLACK = 6; // Slack webhook (future)
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Event Acknowledgment (Gateway → Container)
|
||||
// Message Type ID: 0x21
|
||||
// =============================================================================
|
||||
|
||||
message EventAck {
|
||||
// Event ID being acknowledged
|
||||
string event_id = 1;
|
||||
|
||||
// Delivery status
|
||||
AckStatus status = 2;
|
||||
|
||||
// Error message if status is ERROR
|
||||
string error_message = 3;
|
||||
|
||||
// Channel that successfully delivered (for logging/debugging)
|
||||
ChannelType delivered_via = 4;
|
||||
}
|
||||
|
||||
enum AckStatus {
|
||||
// Successfully delivered to at least one channel
|
||||
DELIVERED = 0;
|
||||
|
||||
// Accepted and queued for delivery (e.g., rate limited, will retry)
|
||||
QUEUED = 1;
|
||||
|
||||
// Permanent failure - all channels failed
|
||||
ERROR = 2;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Event Payloads
|
||||
// These are JSON-encoded in the UserEvent.payload field.
|
||||
// Defined here for documentation; actual encoding is JSON for flexibility.
|
||||
// =============================================================================
|
||||
|
||||
// Payload for ORDER_PLACED, ORDER_FILLED, ORDER_CANCELLED, etc.
|
||||
message OrderEventPayload {
|
||||
string order_id = 1;
|
||||
string symbol = 2;
|
||||
string side = 3; // "buy" or "sell"
|
||||
string order_type = 4; // "market", "limit", "stop_limit", etc.
|
||||
string quantity = 5; // Decimal string
|
||||
string price = 6; // Decimal string (for limit orders)
|
||||
string fill_price = 7; // Decimal string (for fills)
|
||||
string fill_quantity = 8; // Decimal string (for partial fills)
|
||||
string status = 9; // "open", "filled", "cancelled", etc.
|
||||
string exchange = 10;
|
||||
int64 timestamp = 11; // Unix milliseconds
|
||||
string strategy_id = 12; // If order was placed by a strategy
|
||||
string error_message = 13; // If rejected/failed
|
||||
}
|
||||
|
||||
// Payload for ALERT_TRIGGERED
|
||||
message AlertEventPayload {
|
||||
string alert_id = 1;
|
||||
string symbol = 2;
|
||||
string condition = 3; // Human-readable condition (e.g., "BTC > 50000")
|
||||
string triggered_price = 4; // Decimal string
|
||||
int64 timestamp = 5;
|
||||
}
|
||||
|
||||
// Payload for POSITION_OPENED, POSITION_CLOSED, POSITION_UPDATED
|
||||
message PositionEventPayload {
|
||||
string position_id = 1;
|
||||
string symbol = 2;
|
||||
string side = 3; // "long" or "short"
|
||||
string size = 4; // Decimal string
|
||||
string entry_price = 5; // Decimal string
|
||||
string current_price = 6; // Decimal string
|
||||
string unrealized_pnl = 7; // Decimal string
|
||||
string realized_pnl = 8; // Decimal string (for closed positions)
|
||||
string leverage = 9; // Decimal string (for margin)
|
||||
string liquidation_price = 10;
|
||||
string exchange = 11;
|
||||
int64 timestamp = 12;
|
||||
}
|
||||
|
||||
// Payload for WORKSPACE_CHANGED, CHART_ANNOTATION_*, INDICATOR_UPDATED
|
||||
message WorkspaceEventPayload {
|
||||
string workspace_id = 1;
|
||||
string change_type = 2; // "symbol_changed", "timeframe_changed", "annotation_added", etc.
|
||||
string symbol = 3;
|
||||
string timeframe = 4;
|
||||
|
||||
// For annotations
|
||||
string annotation_id = 5;
|
||||
string annotation_type = 6; // "trendline", "horizontal", "rectangle", "text", etc.
|
||||
string annotation_data = 7; // JSON string with coordinates, style, etc.
|
||||
|
||||
// For indicators
|
||||
string indicator_name = 8;
|
||||
string indicator_params = 9; // JSON string with indicator parameters
|
||||
|
||||
int64 timestamp = 10;
|
||||
}
|
||||
|
||||
// Payload for STRATEGY_LOG, STRATEGY_ERROR
|
||||
message StrategyEventPayload {
|
||||
string strategy_id = 1;
|
||||
string strategy_name = 2;
|
||||
string log_level = 3; // "debug", "info", "warn", "error"
|
||||
string message = 4;
|
||||
string details = 5; // JSON string with additional context
|
||||
int64 timestamp = 6;
|
||||
}
|
||||
|
||||
// Payload for BACKTEST_COMPLETED
|
||||
message BacktestEventPayload {
|
||||
string backtest_id = 1;
|
||||
string strategy_id = 2;
|
||||
string strategy_name = 3;
|
||||
string symbol = 4;
|
||||
string timeframe = 5;
|
||||
int64 start_time = 6;
|
||||
int64 end_time = 7;
|
||||
|
||||
// Results summary
|
||||
int32 total_trades = 8;
|
||||
int32 winning_trades = 9;
|
||||
int32 losing_trades = 10;
|
||||
string total_pnl = 11; // Decimal string
|
||||
string win_rate = 12; // Decimal string (0-1)
|
||||
string sharpe_ratio = 13; // Decimal string
|
||||
string max_drawdown = 14; // Decimal string (0-1)
|
||||
|
||||
string results_path = 15; // Path to full results file
|
||||
int64 completed_at = 16;
|
||||
}
|
||||
@@ -19,25 +19,38 @@ export class AuthService {
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify JWT token and return user ID
|
||||
* Replaces the placeholder implementation in UserService
|
||||
* Verify session token and return user ID
|
||||
* Uses Better Auth's bearer plugin for token verification
|
||||
*/
|
||||
async verifyToken(token: string): Promise<string | null> {
|
||||
try {
|
||||
// Better Auth's session verification
|
||||
this.config.logger.debug({
|
||||
tokenLength: token?.length,
|
||||
tokenPrefix: token?.substring(0, 8),
|
||||
}, 'Verifying token');
|
||||
|
||||
// Use Better Auth's getSession with Bearer token
|
||||
// The bearer plugin allows us to pass the session token via Authorization header
|
||||
const session = await this.config.auth.api.getSession({
|
||||
headers: {
|
||||
authorization: `Bearer ${token}`,
|
||||
},
|
||||
headers: new Headers({
|
||||
'Authorization': `Bearer ${token}`,
|
||||
}),
|
||||
});
|
||||
|
||||
this.config.logger.debug({
|
||||
hasSession: !!session,
|
||||
hasUser: !!session?.user,
|
||||
userId: session?.user?.id,
|
||||
}, 'Session verification result');
|
||||
|
||||
if (!session || !session.user) {
|
||||
this.config.logger.warn('Session verification failed: no session or user');
|
||||
return null;
|
||||
}
|
||||
|
||||
return session.user.id;
|
||||
} catch (error) {
|
||||
this.config.logger.debug({ error }, 'Token verification failed');
|
||||
this.config.logger.error({ error }, 'Token verification failed with error');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -76,17 +89,47 @@ export class AuthService {
|
||||
|
||||
/**
|
||||
* Sign in with email and password
|
||||
* Returns the bearer token from response headers
|
||||
*/
|
||||
async signIn(email: string, password: string): Promise<{ token: string; userId: string; error?: string }> {
|
||||
try {
|
||||
const result = await this.config.auth.api.signInEmail({
|
||||
this.config.logger.debug({ email }, 'Attempting sign in');
|
||||
|
||||
// Use asResponse: true to get the full Response object with headers
|
||||
const response = await this.config.auth.api.signInEmail({
|
||||
body: {
|
||||
email,
|
||||
password,
|
||||
},
|
||||
asResponse: true,
|
||||
});
|
||||
|
||||
if (!result.token || !result.user) {
|
||||
// Extract bearer token from response headers (set by bearer plugin)
|
||||
const token = response.headers.get('set-auth-token');
|
||||
|
||||
if (!token) {
|
||||
this.config.logger.error('Bearer token not found in response headers');
|
||||
return {
|
||||
token: '',
|
||||
userId: '',
|
||||
error: 'Authentication token not generated',
|
||||
};
|
||||
}
|
||||
|
||||
// Parse the response body to get user info
|
||||
const result = await response.json() as {
|
||||
user?: { id: string; email: string; name: string };
|
||||
error?: string;
|
||||
};
|
||||
|
||||
this.config.logger.debug({
|
||||
hasUser: !!result.user,
|
||||
userId: result.user?.id,
|
||||
hasToken: !!token,
|
||||
}, 'Sign in result');
|
||||
|
||||
if (!result.user) {
|
||||
this.config.logger.warn('Sign in failed: no user in result');
|
||||
return {
|
||||
token: '',
|
||||
userId: '',
|
||||
@@ -95,11 +138,11 @@ export class AuthService {
|
||||
}
|
||||
|
||||
return {
|
||||
token: result.token,
|
||||
token,
|
||||
userId: result.user.id,
|
||||
};
|
||||
} catch (error: any) {
|
||||
this.config.logger.error({ error }, 'Sign in failed');
|
||||
this.config.logger.error({ error }, 'Sign in failed with error');
|
||||
return {
|
||||
token: '',
|
||||
userId: '',
|
||||
@@ -115,7 +158,8 @@ export class AuthService {
|
||||
try {
|
||||
await this.config.auth.api.signOut({
|
||||
headers: {
|
||||
authorization: `Bearer ${token}`,
|
||||
// Better Auth expects the session token in the cookie header
|
||||
cookie: `better-auth.session_token=${token}`,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -133,7 +177,8 @@ export class AuthService {
|
||||
try {
|
||||
const session = await this.config.auth.api.getSession({
|
||||
headers: {
|
||||
authorization: `Bearer ${token}`,
|
||||
// Better Auth expects the session token in the cookie header
|
||||
cookie: `better-auth.session_token=${token}`,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -3,6 +3,11 @@ import { UserService } from '../db/user-service.js';
|
||||
import { ChannelType, type AuthContext } from '../types/user.js';
|
||||
import type { ContainerManager } from '../k8s/container-manager.js';
|
||||
|
||||
export interface AuthResult {
|
||||
authContext: AuthContext | null;
|
||||
isSpinningUp: boolean;
|
||||
}
|
||||
|
||||
export interface AuthenticatorConfig {
|
||||
userService: UserService;
|
||||
containerManager: ContainerManager;
|
||||
@@ -23,40 +28,49 @@ export class Authenticator {
|
||||
/**
|
||||
* Authenticate WebSocket connection via JWT token
|
||||
* Also ensures the user's container is running
|
||||
* Returns immediately if container is spinning up (non-blocking)
|
||||
*/
|
||||
async authenticateWebSocket(
|
||||
request: FastifyRequest
|
||||
): Promise<AuthContext | null> {
|
||||
): Promise<AuthResult> {
|
||||
try {
|
||||
const token = this.extractBearerToken(request);
|
||||
if (!token) {
|
||||
this.config.logger.warn('No bearer token in WebSocket connection');
|
||||
return null;
|
||||
return { authContext: null, isSpinningUp: false };
|
||||
}
|
||||
|
||||
const userId = await this.config.userService.verifyWebToken(token);
|
||||
if (!userId) {
|
||||
this.config.logger.warn('Invalid JWT token');
|
||||
return null;
|
||||
return { authContext: null, isSpinningUp: false };
|
||||
}
|
||||
|
||||
const license = await this.config.userService.getUserLicense(userId);
|
||||
if (!license) {
|
||||
this.config.logger.warn({ userId }, 'User license not found');
|
||||
return null;
|
||||
return { authContext: null, isSpinningUp: false };
|
||||
}
|
||||
|
||||
// Ensure container is running (may take time if creating new container)
|
||||
// Ensure container is running (non-blocking - returns immediately if creating new)
|
||||
this.config.logger.info({ userId }, 'Ensuring user container is running');
|
||||
const { mcpEndpoint, wasCreated } = await this.config.containerManager.ensureContainerRunning(
|
||||
const { mcpEndpoint, wasCreated, isSpinningUp } = await this.config.containerManager.ensureContainerRunning(
|
||||
userId,
|
||||
license
|
||||
license,
|
||||
false // Don't wait for ready
|
||||
);
|
||||
|
||||
if (isSpinningUp) {
|
||||
this.config.logger.info(
|
||||
{ userId, wasCreated },
|
||||
'Container is spinning up'
|
||||
);
|
||||
} else {
|
||||
this.config.logger.info(
|
||||
{ userId, mcpEndpoint, wasCreated },
|
||||
'Container is ready'
|
||||
);
|
||||
}
|
||||
|
||||
// Update license with actual MCP endpoint
|
||||
license.mcpServerUrl = mcpEndpoint;
|
||||
@@ -64,16 +78,19 @@ export class Authenticator {
|
||||
const sessionId = `ws_${userId}_${Date.now()}`;
|
||||
|
||||
return {
|
||||
authContext: {
|
||||
userId,
|
||||
channelType: ChannelType.WEBSOCKET,
|
||||
channelUserId: userId, // For WebSocket, same as userId
|
||||
sessionId,
|
||||
license,
|
||||
authenticatedAt: new Date(),
|
||||
},
|
||||
isSpinningUp,
|
||||
};
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error }, 'WebSocket authentication error');
|
||||
return null;
|
||||
return { authContext: null, isSpinningUp: false };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,13 +151,22 @@ export class Authenticator {
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract bearer token from request headers
|
||||
* Extract bearer token from request headers or query parameters
|
||||
* WebSocket connections can't set custom headers in browsers, so we support token in query params
|
||||
*/
|
||||
private extractBearerToken(request: FastifyRequest): string | null {
|
||||
// Try Authorization header first
|
||||
const auth = request.headers.authorization;
|
||||
if (!auth || !auth.startsWith('Bearer ')) {
|
||||
return null;
|
||||
}
|
||||
if (auth && auth.startsWith('Bearer ')) {
|
||||
return auth.substring(7);
|
||||
}
|
||||
|
||||
// Fall back to query parameter (for WebSocket connections)
|
||||
const query = request.query as { token?: string };
|
||||
if (query.token) {
|
||||
return query.token;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { betterAuth } from 'better-auth';
|
||||
import { bearer } from 'better-auth/plugins/bearer';
|
||||
import { Pool } from 'pg';
|
||||
import { Kysely, PostgresDialect } from 'kysely';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
@@ -88,6 +89,11 @@ export async function createBetterAuth(config: BetterAuthConfig) {
|
||||
},
|
||||
},
|
||||
|
||||
// Plugins
|
||||
plugins: [
|
||||
bearer(), // Enable Bearer token authentication for API/WebSocket
|
||||
],
|
||||
|
||||
});
|
||||
|
||||
config.logger.debug('Better Auth instance created');
|
||||
|
||||
@@ -7,12 +7,36 @@ import { randomUUID } from 'crypto';
|
||||
|
||||
import type { ProviderConfig } from '../llm/provider.js';
|
||||
import type { SessionRegistry, EventSubscriber, Session } from '../events/index.js';
|
||||
import type { OHLCService } from '../services/ohlc-service.js';
|
||||
import type { SymbolIndexService } from '../services/symbol-index-service.js';
|
||||
import type { ContainerManager } from '../k8s/container-manager.js';
|
||||
import {
|
||||
WorkspaceManager,
|
||||
DEFAULT_STORES,
|
||||
type ChannelAdapter,
|
||||
type ChannelCapabilities,
|
||||
type SnapshotMessage,
|
||||
type PatchMessage,
|
||||
} from '../workspace/index.js';
|
||||
|
||||
/**
|
||||
* Safe JSON stringifier that handles BigInt values
|
||||
* Converts BigInt to Number (safe for timestamps and other integer values)
|
||||
*/
|
||||
function jsonStringifySafe(obj: any): string {
|
||||
return JSON.stringify(obj, (_key, value) =>
|
||||
typeof value === 'bigint' ? Number(value) : value
|
||||
);
|
||||
}
|
||||
|
||||
export interface WebSocketHandlerConfig {
|
||||
authenticator: Authenticator;
|
||||
containerManager: ContainerManager;
|
||||
providerConfig: ProviderConfig;
|
||||
sessionRegistry: SessionRegistry;
|
||||
eventSubscriber: EventSubscriber;
|
||||
ohlcService?: OHLCService; // Optional for historical data support
|
||||
symbolIndexService?: SymbolIndexService; // Optional for symbol search
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -24,6 +48,7 @@ export interface WebSocketHandlerConfig {
|
||||
export class WebSocketHandler {
|
||||
private config: WebSocketHandlerConfig;
|
||||
private harnesses = new Map<string, AgentHarness>();
|
||||
private workspaces = new Map<string, WorkspaceManager>();
|
||||
|
||||
constructor(config: WebSocketHandlerConfig) {
|
||||
this.config = config;
|
||||
@@ -61,8 +86,8 @@ export class WebSocketHandler {
|
||||
})
|
||||
);
|
||||
|
||||
// Authenticate (this may take time if creating container)
|
||||
const authContext = await this.config.authenticator.authenticateWebSocket(request);
|
||||
// Authenticate (returns immediately if container is spinning up)
|
||||
const { authContext, isSpinningUp } = await this.config.authenticator.authenticateWebSocket(request);
|
||||
if (!authContext) {
|
||||
logger.warn('WebSocket authentication failed');
|
||||
socket.send(
|
||||
@@ -76,10 +101,27 @@ export class WebSocketHandler {
|
||||
}
|
||||
|
||||
logger.info(
|
||||
{ userId: authContext.userId, sessionId: authContext.sessionId },
|
||||
{ userId: authContext.userId, sessionId: authContext.sessionId, isSpinningUp },
|
||||
'WebSocket connection authenticated'
|
||||
);
|
||||
|
||||
// If container is spinning up, send status and start background polling
|
||||
if (isSpinningUp) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'status',
|
||||
status: 'spinning_up',
|
||||
message: 'Your workspace is starting up, please wait...',
|
||||
})
|
||||
);
|
||||
|
||||
// Start background polling for container readiness
|
||||
this.pollContainerReadiness(socket, authContext, app).catch((error) => {
|
||||
logger.error({ error, userId: authContext.userId }, 'Error polling container readiness');
|
||||
});
|
||||
|
||||
// Don't return - continue with session setup so we can receive messages once ready
|
||||
} else {
|
||||
// Send workspace starting message
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
@@ -88,6 +130,33 @@ export class WebSocketHandler {
|
||||
message: 'Starting your workspace...',
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Create workspace manager for this session
|
||||
const workspace = new WorkspaceManager({
|
||||
userId: authContext.userId,
|
||||
sessionId: authContext.sessionId,
|
||||
stores: DEFAULT_STORES,
|
||||
// containerSync will be added when MCP client is implemented
|
||||
logger,
|
||||
});
|
||||
|
||||
// Create WebSocket channel adapter
|
||||
const wsAdapter: ChannelAdapter = {
|
||||
sendSnapshot: (msg: SnapshotMessage) => {
|
||||
socket.send(JSON.stringify(msg));
|
||||
},
|
||||
sendPatch: (msg: PatchMessage) => {
|
||||
socket.send(JSON.stringify(msg));
|
||||
},
|
||||
getCapabilities: (): ChannelCapabilities => ({
|
||||
supportsSync: true,
|
||||
supportsImages: true,
|
||||
supportsMarkdown: true,
|
||||
supportsStreaming: true,
|
||||
supportsTradingViewEmbed: true,
|
||||
}),
|
||||
};
|
||||
|
||||
// Create agent harness
|
||||
const harness = new AgentHarness({
|
||||
@@ -99,6 +168,11 @@ export class WebSocketHandler {
|
||||
});
|
||||
|
||||
try {
|
||||
// Initialize workspace and harness
|
||||
await workspace.initialize();
|
||||
workspace.setAdapter(wsAdapter);
|
||||
this.workspaces.set(authContext.sessionId, workspace);
|
||||
|
||||
await harness.initialize();
|
||||
this.harnesses.set(authContext.sessionId, harness);
|
||||
|
||||
@@ -125,7 +199,8 @@ export class WebSocketHandler {
|
||||
'Session registered for events'
|
||||
);
|
||||
|
||||
// Send connected message
|
||||
// Send connected message (only if not spinning up - otherwise sent by pollContainerReadiness)
|
||||
if (!isSpinningUp) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'connected',
|
||||
@@ -135,13 +210,18 @@ export class WebSocketHandler {
|
||||
message: 'Connected to Dexorder AI',
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Handle messages
|
||||
socket.on('message', async (data: Buffer) => {
|
||||
try {
|
||||
logger.info({ rawMessage: data.toString().substring(0, 500) }, 'WebSocket message received');
|
||||
const payload = JSON.parse(data.toString());
|
||||
logger.info({ type: payload.type, request_id: payload.request_id }, 'WebSocket message parsed');
|
||||
|
||||
// Route based on message type
|
||||
if (payload.type === 'message') {
|
||||
// Chat message - send to agent harness
|
||||
const inboundMessage: InboundMessage = {
|
||||
messageId: randomUUID(),
|
||||
userId: authContext.userId,
|
||||
@@ -159,6 +239,20 @@ export class WebSocketHandler {
|
||||
...response,
|
||||
})
|
||||
);
|
||||
} else if (payload.type === 'hello') {
|
||||
// Workspace sync: hello message
|
||||
logger.debug({ seqs: payload.seqs }, 'Handling workspace hello');
|
||||
await workspace.handleHello(payload.seqs || {});
|
||||
} else if (payload.type === 'patch') {
|
||||
// Workspace sync: patch message
|
||||
logger.debug({ store: payload.store, seq: payload.seq }, 'Handling workspace patch');
|
||||
await workspace.handlePatch(payload.store, payload.seq, payload.patch || []);
|
||||
} else if (this.isDatafeedMessage(payload)) {
|
||||
// Historical data request - send to OHLC service
|
||||
logger.info({ type: payload.type }, 'Routing to datafeed handler');
|
||||
await this.handleDatafeedMessage(socket, payload, logger);
|
||||
} else {
|
||||
logger.warn({ type: payload.type }, 'Unknown message type received');
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error({ error }, 'Error handling WebSocket message');
|
||||
@@ -181,6 +275,10 @@ export class WebSocketHandler {
|
||||
await this.config.eventSubscriber.onSessionDisconnect(removedSession);
|
||||
}
|
||||
|
||||
// Cleanup workspace
|
||||
await workspace.shutdown();
|
||||
this.workspaces.delete(authContext.sessionId);
|
||||
|
||||
// Cleanup harness
|
||||
await harness.cleanup();
|
||||
this.harnesses.delete(authContext.sessionId);
|
||||
@@ -190,12 +288,76 @@ export class WebSocketHandler {
|
||||
logger.error({ error, sessionId: authContext.sessionId }, 'WebSocket error');
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error({ error }, 'Failed to initialize agent harness');
|
||||
logger.error({ error }, 'Failed to initialize session');
|
||||
socket.close(1011, 'Internal server error');
|
||||
await workspace.shutdown();
|
||||
this.workspaces.delete(authContext.sessionId);
|
||||
await harness.cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Poll for container readiness in the background
|
||||
* Sends notification to client when container is ready
|
||||
*/
|
||||
private async pollContainerReadiness(
|
||||
socket: WebSocket,
|
||||
authContext: any,
|
||||
app: FastifyInstance
|
||||
): Promise<void> {
|
||||
const logger = app.log;
|
||||
const userId = authContext.userId;
|
||||
|
||||
logger.info({ userId }, 'Starting background poll for container readiness');
|
||||
|
||||
try {
|
||||
// Wait for container to become ready (2 minute timeout)
|
||||
const ready = await this.config.containerManager.waitForContainerReady(userId, 120000);
|
||||
|
||||
if (ready) {
|
||||
logger.info({ userId }, 'Container is now ready, notifying client');
|
||||
|
||||
// Send ready notification
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'status',
|
||||
status: 'ready',
|
||||
message: 'Your workspace is ready!',
|
||||
})
|
||||
);
|
||||
|
||||
// Also send the 'connected' message
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'connected',
|
||||
sessionId: authContext.sessionId,
|
||||
userId: authContext.userId,
|
||||
licenseType: authContext.license.licenseType,
|
||||
message: 'Connected to Dexorder AI',
|
||||
})
|
||||
);
|
||||
} else {
|
||||
logger.warn({ userId }, 'Container failed to become ready within timeout');
|
||||
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
message: 'Workspace failed to start. Please try again later.',
|
||||
})
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error({ error, userId }, 'Error waiting for container readiness');
|
||||
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
message: 'Error starting workspace. Please try again later.',
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Derive the container's XPUB event endpoint from the MCP server URL.
|
||||
*
|
||||
@@ -212,4 +374,173 @@ export class WebSocketHandler {
|
||||
return mcpServerUrl.replace('http://', 'tcp://').replace(':3000', ':5570');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if message is a datafeed message (TradingView protocol)
|
||||
*/
|
||||
private isDatafeedMessage(payload: any): boolean {
|
||||
const datafeedTypes = [
|
||||
'get_config',
|
||||
'search_symbols',
|
||||
'resolve_symbol',
|
||||
'get_bars',
|
||||
'subscribe_bars',
|
||||
'unsubscribe_bars',
|
||||
];
|
||||
return datafeedTypes.includes(payload.type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle datafeed messages (TradingView protocol)
|
||||
*/
|
||||
private async handleDatafeedMessage(
|
||||
socket: WebSocket,
|
||||
payload: any,
|
||||
logger: any
|
||||
): Promise<void> {
|
||||
logger.info({ type: payload.type, payload }, 'handleDatafeedMessage called');
|
||||
const ohlcService = this.config.ohlcService;
|
||||
const symbolIndexService = this.config.symbolIndexService;
|
||||
|
||||
logger.info({
|
||||
hasOhlcService: !!ohlcService,
|
||||
hasSymbolIndexService: !!symbolIndexService
|
||||
}, 'Service availability');
|
||||
|
||||
if (!ohlcService && !symbolIndexService) {
|
||||
logger.warn('No datafeed services available');
|
||||
return;
|
||||
}
|
||||
|
||||
const requestId = payload.request_id || randomUUID();
|
||||
|
||||
try {
|
||||
switch (payload.type) {
|
||||
case 'get_config': {
|
||||
const config = ohlcService ? await ohlcService.getConfig() : { supported_resolutions: ['1', '5', '15', '60', '1D'] };
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'get_config_response',
|
||||
request_id: requestId,
|
||||
config,
|
||||
})
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'search_symbols': {
|
||||
logger.info({ query: payload.query, limit: payload.limit }, 'Handling search_symbols');
|
||||
// Use SymbolIndexService if available, otherwise fallback to OHLCService stub
|
||||
const symbolIndexService = this.config.symbolIndexService;
|
||||
logger.info({ hasSymbolIndexService: !!symbolIndexService }, 'Service check for search');
|
||||
|
||||
const results = symbolIndexService
|
||||
? await symbolIndexService.search(payload.query, payload.limit || 30)
|
||||
: (ohlcService ? await ohlcService.searchSymbols(
|
||||
payload.query,
|
||||
payload.symbol_type,
|
||||
payload.exchange,
|
||||
payload.limit || 30
|
||||
) : []);
|
||||
|
||||
logger.info({ resultsCount: results.length }, 'Search complete');
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'search_symbols_response',
|
||||
request_id: requestId,
|
||||
results,
|
||||
})
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'resolve_symbol': {
|
||||
logger.info({ symbol: payload.symbol }, 'Handling resolve_symbol');
|
||||
// Use SymbolIndexService if available, otherwise fallback to OHLCService stub
|
||||
const symbolIndexService = this.config.symbolIndexService;
|
||||
logger.info({ hasSymbolIndexService: !!symbolIndexService }, 'Service check for resolve');
|
||||
|
||||
const symbolInfo = symbolIndexService
|
||||
? await symbolIndexService.resolveSymbol(payload.symbol)
|
||||
: (ohlcService ? await ohlcService.resolveSymbol(payload.symbol) : null);
|
||||
|
||||
logger.info({ found: !!symbolInfo }, 'Symbol resolution complete');
|
||||
|
||||
if (!symbolInfo) {
|
||||
logger.warn({ symbol: payload.symbol }, 'Symbol not found');
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
request_id: requestId,
|
||||
error_message: `Symbol not found: ${payload.symbol}`,
|
||||
})
|
||||
);
|
||||
} else {
|
||||
logger.info({ symbolInfo }, 'Sending symbol_info response');
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'resolve_symbol_response',
|
||||
request_id: requestId,
|
||||
symbol_info: symbolInfo,
|
||||
})
|
||||
);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'get_bars': {
|
||||
if (!ohlcService) {
|
||||
socket.send(JSON.stringify({
|
||||
type: 'error',
|
||||
request_id: requestId,
|
||||
error_message: 'OHLC service not available'
|
||||
}));
|
||||
break;
|
||||
}
|
||||
const history = await ohlcService.fetchOHLC(
|
||||
payload.symbol,
|
||||
payload.resolution,
|
||||
payload.from_time,
|
||||
payload.to_time,
|
||||
payload.countback
|
||||
);
|
||||
socket.send(
|
||||
jsonStringifySafe({
|
||||
type: 'get_bars_response',
|
||||
request_id: requestId,
|
||||
history,
|
||||
})
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'subscribe_bars':
|
||||
case 'unsubscribe_bars':
|
||||
// TODO: Implement real-time subscriptions
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: `${payload.type}_response`,
|
||||
request_id: requestId,
|
||||
subscription_id: payload.subscription_id,
|
||||
success: false,
|
||||
message: 'Real-time subscriptions not yet implemented',
|
||||
})
|
||||
);
|
||||
break;
|
||||
|
||||
default:
|
||||
logger.warn({ type: payload.type }, 'Unknown datafeed message type');
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error({ error, type: payload.type }, 'Error handling datafeed message');
|
||||
socket.send(
|
||||
jsonStringifySafe({
|
||||
type: 'error',
|
||||
request_id: requestId,
|
||||
error_code: 'INTERNAL_ERROR',
|
||||
error_message: error.message || 'Internal server error',
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
546
gateway/src/clients/duckdb-client.ts
Normal file
546
gateway/src/clients/duckdb-client.ts
Normal file
@@ -0,0 +1,546 @@
|
||||
/**
|
||||
* DuckDB Client for querying Apache Iceberg tables
|
||||
*
|
||||
* Uses DuckDB's native Iceberg and Parquet support to query data
|
||||
* directly from S3/MinIO without needing catalog-only libraries.
|
||||
*/
|
||||
|
||||
import duckdb from 'duckdb';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import { promisify } from 'util';
|
||||
|
||||
type Database = duckdb.Database;
|
||||
type Connection = duckdb.Connection;
|
||||
const { Database, Connection } = duckdb;
|
||||
|
||||
export interface DuckDBConfig {
|
||||
catalogUri: string;
|
||||
namespace: string;
|
||||
ohlcCatalogUri?: string;
|
||||
ohlcNamespace?: string;
|
||||
s3Endpoint?: string;
|
||||
s3AccessKey?: string;
|
||||
s3SecretKey?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* DuckDB Client with Iceberg support
|
||||
*
|
||||
* Provides SQL-based queries against Iceberg tables stored in S3/MinIO.
|
||||
*/
|
||||
export class DuckDBClient {
|
||||
private db: Database | null = null;
|
||||
private conn: Connection | null = null;
|
||||
private namespace: string;
|
||||
private ohlcNamespace: string;
|
||||
private catalogUri: string;
|
||||
private ohlcCatalogUri: string;
|
||||
private s3Config: {
|
||||
endpoint?: string;
|
||||
accessKey?: string;
|
||||
secretKey?: string;
|
||||
};
|
||||
private logger: FastifyBaseLogger;
|
||||
private initialized = false;
|
||||
|
||||
constructor(config: DuckDBConfig, logger: FastifyBaseLogger) {
|
||||
this.logger = logger;
|
||||
this.namespace = config.namespace;
|
||||
this.catalogUri = config.catalogUri;
|
||||
this.ohlcCatalogUri = config.ohlcCatalogUri || config.catalogUri;
|
||||
this.ohlcNamespace = config.ohlcNamespace || 'trading';
|
||||
this.s3Config = {
|
||||
endpoint: config.s3Endpoint,
|
||||
accessKey: config.s3AccessKey,
|
||||
secretKey: config.s3SecretKey,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize DuckDB connection and configure S3/Iceberg extensions
|
||||
*/
|
||||
async initialize(): Promise<void> {
|
||||
if (this.initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
this.db = new Database(':memory:');
|
||||
this.conn = this.db.connect();
|
||||
|
||||
const all = promisify(this.conn.all.bind(this.conn));
|
||||
|
||||
// Install and load required extensions
|
||||
await all('INSTALL httpfs;');
|
||||
await all('LOAD httpfs;');
|
||||
await all('INSTALL iceberg;');
|
||||
await all('LOAD iceberg;');
|
||||
|
||||
// Configure S3 credentials if provided
|
||||
if (this.s3Config.endpoint && this.s3Config.accessKey && this.s3Config.secretKey) {
|
||||
const s3Url = new URL(this.s3Config.endpoint);
|
||||
const useSSL = s3Url.protocol === 'https:';
|
||||
|
||||
await all(`SET s3_endpoint='${s3Url.hostname}:${s3Url.port || (useSSL ? 443 : 9000)}';`);
|
||||
await all(`SET s3_access_key_id='${this.s3Config.accessKey}';`);
|
||||
await all(`SET s3_secret_access_key='${this.s3Config.secretKey}';`);
|
||||
await all(`SET s3_use_ssl=${useSSL};`);
|
||||
await all(`SET s3_url_style='path';`);
|
||||
await all(`SET s3_region='us-east-1';`);
|
||||
await all(`SET s3_url_compatibility_mode=true;`);
|
||||
|
||||
this.logger.info({
|
||||
endpoint: this.s3Config.endpoint,
|
||||
useSSL,
|
||||
}, 'Configured DuckDB S3 settings');
|
||||
}
|
||||
|
||||
this.initialized = true;
|
||||
this.logger.info({
|
||||
catalogUri: this.catalogUri,
|
||||
namespace: this.namespace,
|
||||
ohlcCatalogUri: this.ohlcCatalogUri,
|
||||
ohlcNamespace: this.ohlcNamespace,
|
||||
}, 'DuckDB client initialized');
|
||||
} catch (error) {
|
||||
this.logger.error({ error }, 'Failed to initialize DuckDB');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a SQL query and return all rows
|
||||
*/
|
||||
private async query<T = any>(sql: string, params?: any[]): Promise<T[]> {
|
||||
if (!this.conn) {
|
||||
throw new Error('DuckDB connection not initialized');
|
||||
}
|
||||
|
||||
try {
|
||||
const all = promisify(this.conn.all.bind(this.conn)) as (sql: string, ...params: any[]) => Promise<any[]>;
|
||||
const rows = params && params.length > 0 ? await all(sql, ...params) : await all(sql);
|
||||
return rows as T[];
|
||||
} catch (error) {
|
||||
this.logger.error({ error, sql, params }, 'DuckDB query failed');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Iceberg table path from REST catalog
|
||||
*/
|
||||
private async getTablePath(namespace: string, tableName: string, catalogUri: string): Promise<string | null> {
|
||||
try {
|
||||
const tableUrl = `${catalogUri}/v1/namespaces/${namespace}/tables/${tableName}`;
|
||||
|
||||
this.logger.debug({ tableUrl }, 'Fetching Iceberg table metadata');
|
||||
|
||||
const response = await fetch(tableUrl, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) {
|
||||
this.logger.debug({ namespace, tableName }, 'Table not found in catalog');
|
||||
return null;
|
||||
}
|
||||
throw new Error(`Failed to fetch table metadata: ${response.status} ${response.statusText}`);
|
||||
}
|
||||
|
||||
const metadata = await response.json() as any;
|
||||
|
||||
// Extract metadata location (S3 path to metadata.json)
|
||||
const metadataLocation = metadata['metadata-location'] || metadata.location;
|
||||
|
||||
if (!metadataLocation) {
|
||||
this.logger.warn({ metadata }, 'No metadata-location found in table response');
|
||||
return null;
|
||||
}
|
||||
|
||||
this.logger.debug({ metadataLocation }, 'Found Iceberg table location');
|
||||
return metadataLocation;
|
||||
} catch (error: any) {
|
||||
this.logger.error({ error: error.message, namespace, tableName }, 'Failed to get table path');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query messages from gateway.conversations table
|
||||
*/
|
||||
async queryMessages(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
options?: {
|
||||
startTime?: number;
|
||||
endTime?: number;
|
||||
limit?: number;
|
||||
}
|
||||
): Promise<any[]> {
|
||||
await this.initialize();
|
||||
|
||||
try {
|
||||
const tablePath = await this.getTablePath(
|
||||
this.namespace,
|
||||
'conversations',
|
||||
this.catalogUri
|
||||
);
|
||||
|
||||
if (!tablePath) {
|
||||
this.logger.warn('Conversations table not found');
|
||||
return [];
|
||||
}
|
||||
|
||||
// Build SQL query with optional filters
|
||||
let sql = `
|
||||
SELECT
|
||||
id,
|
||||
user_id,
|
||||
session_id,
|
||||
role,
|
||||
content,
|
||||
metadata,
|
||||
timestamp
|
||||
FROM iceberg_scan('${tablePath}')
|
||||
WHERE user_id = ?
|
||||
AND session_id = ?
|
||||
`;
|
||||
|
||||
const params: any[] = [userId, sessionId];
|
||||
|
||||
if (options?.startTime) {
|
||||
sql += ' AND timestamp >= ?';
|
||||
params.push(options.startTime.toString());
|
||||
}
|
||||
|
||||
if (options?.endTime) {
|
||||
sql += ' AND timestamp <= ?';
|
||||
params.push(options.endTime.toString());
|
||||
}
|
||||
|
||||
sql += ' ORDER BY timestamp ASC';
|
||||
|
||||
if (options?.limit) {
|
||||
sql += ' LIMIT ?';
|
||||
params.push(options.limit);
|
||||
}
|
||||
|
||||
this.logger.debug({ userId, sessionId, options }, 'Querying conversation messages');
|
||||
|
||||
const rows = await this.query(sql, params);
|
||||
|
||||
this.logger.info({
|
||||
userId,
|
||||
sessionId,
|
||||
count: rows.length
|
||||
}, 'Loaded conversation messages from Iceberg');
|
||||
|
||||
// Convert timestamp strings back to numbers
|
||||
return rows.map((row: any) => ({
|
||||
...row,
|
||||
timestamp: Number(row.timestamp)
|
||||
}));
|
||||
} catch (error: any) {
|
||||
this.logger.error({
|
||||
error: error.message,
|
||||
userId,
|
||||
sessionId
|
||||
}, 'Failed to query conversation messages');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query checkpoint from gateway.checkpoints table
|
||||
*/
|
||||
async queryCheckpoint(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
checkpointId?: string
|
||||
): Promise<any | null> {
|
||||
await this.initialize();
|
||||
|
||||
try {
|
||||
const tablePath = await this.getTablePath(
|
||||
this.namespace,
|
||||
'checkpoints',
|
||||
this.catalogUri
|
||||
);
|
||||
|
||||
if (!tablePath) {
|
||||
this.logger.warn('Checkpoints table not found');
|
||||
return null;
|
||||
}
|
||||
|
||||
let sql = `
|
||||
SELECT
|
||||
user_id,
|
||||
session_id,
|
||||
checkpoint_id,
|
||||
checkpoint_data,
|
||||
metadata,
|
||||
timestamp
|
||||
FROM iceberg_scan('${tablePath}')
|
||||
WHERE user_id = ?
|
||||
AND session_id = ?
|
||||
`;
|
||||
|
||||
const params: any[] = [userId, sessionId];
|
||||
|
||||
if (checkpointId) {
|
||||
sql += ' AND checkpoint_id = ?';
|
||||
params.push(checkpointId);
|
||||
}
|
||||
|
||||
sql += ' ORDER BY timestamp DESC LIMIT 1';
|
||||
|
||||
this.logger.debug({ userId, sessionId, checkpointId }, 'Querying checkpoint');
|
||||
|
||||
const rows = await this.query(sql, params);
|
||||
|
||||
if (rows.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const row = rows[0];
|
||||
|
||||
this.logger.info({
|
||||
userId,
|
||||
sessionId,
|
||||
checkpointId: row.checkpoint_id
|
||||
}, 'Loaded checkpoint from Iceberg');
|
||||
|
||||
// Convert timestamp string back to number
|
||||
return {
|
||||
...row,
|
||||
timestamp: Number(row.timestamp)
|
||||
};
|
||||
} catch (error: any) {
|
||||
this.logger.error({
|
||||
error: error.message,
|
||||
userId,
|
||||
sessionId,
|
||||
checkpointId
|
||||
}, 'Failed to query checkpoint');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query symbol metadata from trading.symbol_metadata table
|
||||
*/
|
||||
async queryAllSymbols(): Promise<any[]> {
|
||||
await this.initialize();
|
||||
|
||||
try {
|
||||
const tablePath = await this.getTablePath(
|
||||
this.ohlcNamespace,
|
||||
'symbol_metadata',
|
||||
this.ohlcCatalogUri
|
||||
);
|
||||
|
||||
if (!tablePath) {
|
||||
this.logger.warn('Symbol metadata table not found');
|
||||
return [];
|
||||
}
|
||||
|
||||
// Query the Iceberg table using DuckDB
|
||||
const sql = `SELECT * FROM iceberg_scan('${tablePath}')`;
|
||||
|
||||
this.logger.debug({ sql }, 'Querying symbol metadata');
|
||||
|
||||
const rows = await this.query(sql);
|
||||
|
||||
this.logger.info({ count: rows.length }, 'Loaded symbol metadata from Iceberg');
|
||||
|
||||
return rows;
|
||||
} catch (error: any) {
|
||||
this.logger.error({ error: error.message }, 'Failed to query symbol metadata');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query OHLC data from trading.ohlc table
|
||||
*/
|
||||
async queryOHLC(
|
||||
ticker: string,
|
||||
period_seconds: number,
|
||||
start_time: bigint, // microseconds
|
||||
end_time: bigint // microseconds
|
||||
): Promise<any[]> {
|
||||
await this.initialize();
|
||||
|
||||
try {
|
||||
const tablePath = await this.getTablePath(
|
||||
this.ohlcNamespace,
|
||||
'ohlc',
|
||||
this.ohlcCatalogUri
|
||||
);
|
||||
|
||||
if (!tablePath) {
|
||||
this.logger.warn('OHLC table not found');
|
||||
return [];
|
||||
}
|
||||
|
||||
// Query the Iceberg table with filters
|
||||
const sql = `
|
||||
SELECT
|
||||
timestamp,
|
||||
ticker,
|
||||
period_seconds,
|
||||
open,
|
||||
high,
|
||||
low,
|
||||
close,
|
||||
volume
|
||||
FROM iceberg_scan('${tablePath}')
|
||||
WHERE ticker = ?
|
||||
AND period_seconds = ?
|
||||
AND timestamp >= ?
|
||||
AND timestamp <= ?
|
||||
ORDER BY timestamp ASC
|
||||
`;
|
||||
|
||||
const params = [
|
||||
ticker,
|
||||
period_seconds,
|
||||
start_time.toString(),
|
||||
end_time.toString()
|
||||
];
|
||||
|
||||
this.logger.debug({ ticker, period_seconds, start_time, end_time }, 'Querying OHLC data');
|
||||
|
||||
const rows = await this.query(sql, params);
|
||||
|
||||
this.logger.info({
|
||||
ticker,
|
||||
period_seconds,
|
||||
count: rows.length
|
||||
}, 'Loaded OHLC data from Iceberg');
|
||||
|
||||
// Convert timestamp strings to numbers (microseconds as Number is fine for display)
|
||||
return rows.map((row: any) => ({
|
||||
...row,
|
||||
timestamp: Number(row.timestamp)
|
||||
}));
|
||||
} catch (error: any) {
|
||||
this.logger.error({
|
||||
error: error.message,
|
||||
ticker,
|
||||
period_seconds
|
||||
}, 'Failed to query OHLC data');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if OHLC data exists for the given parameters
|
||||
*/
|
||||
async hasOHLCData(
|
||||
ticker: string,
|
||||
period_seconds: number,
|
||||
start_time: bigint,
|
||||
end_time: bigint
|
||||
): Promise<boolean> {
|
||||
await this.initialize();
|
||||
|
||||
try {
|
||||
const tablePath = await this.getTablePath(
|
||||
this.ohlcNamespace,
|
||||
'ohlc',
|
||||
this.ohlcCatalogUri
|
||||
);
|
||||
|
||||
if (!tablePath) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const sql = `
|
||||
SELECT COUNT(*) as count
|
||||
FROM iceberg_scan('${tablePath}')
|
||||
WHERE ticker = ?
|
||||
AND period_seconds = ?
|
||||
AND timestamp >= ?
|
||||
AND timestamp <= ?
|
||||
`;
|
||||
|
||||
const params = [
|
||||
ticker,
|
||||
period_seconds,
|
||||
start_time.toString(),
|
||||
end_time.toString()
|
||||
];
|
||||
|
||||
const rows = await this.query<{ count: number }>(sql, params);
|
||||
return rows.length > 0 && rows[0].count > 0;
|
||||
} catch (error: any) {
|
||||
this.logger.error({ error: error.message }, 'Failed to check OHLC data existence');
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find missing OHLC data ranges
|
||||
*/
|
||||
async findMissingOHLCRanges(
|
||||
ticker: string,
|
||||
period_seconds: number,
|
||||
start_time: bigint,
|
||||
end_time: bigint
|
||||
): Promise<Array<[bigint, bigint]>> {
|
||||
await this.initialize();
|
||||
|
||||
try {
|
||||
const data = await this.queryOHLC(ticker, period_seconds, start_time, end_time);
|
||||
|
||||
if (data.length === 0) {
|
||||
// All data is missing
|
||||
return [[start_time, end_time]];
|
||||
}
|
||||
|
||||
// Check if we have continuous data
|
||||
// For now, simple check: if we have any data, assume complete
|
||||
// TODO: Implement proper gap detection by checking for missing periods
|
||||
const periodMicros = BigInt(period_seconds) * 1000000n;
|
||||
const expectedBars = Number((end_time - start_time) / periodMicros);
|
||||
|
||||
if (data.length < expectedBars * 0.95) { // Allow 5% tolerance
|
||||
this.logger.debug({
|
||||
ticker,
|
||||
expected: expectedBars,
|
||||
actual: data.length,
|
||||
}, 'Incomplete OHLC data detected');
|
||||
return [[start_time, end_time]]; // Request full range
|
||||
}
|
||||
|
||||
// Data appears complete
|
||||
return [];
|
||||
} catch (error: any) {
|
||||
this.logger.error({ error: error.message }, 'Failed to find missing OHLC ranges');
|
||||
// Return full range on error (safe default)
|
||||
return [[start_time, end_time]];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the DuckDB connection
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
if (this.conn) {
|
||||
const close = promisify(this.conn.close.bind(this.conn));
|
||||
await close();
|
||||
this.conn = null;
|
||||
}
|
||||
if (this.db) {
|
||||
const close = promisify(this.db.close.bind(this.db));
|
||||
await close();
|
||||
this.db = null;
|
||||
}
|
||||
this.initialized = false;
|
||||
this.logger.info('DuckDB client closed');
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,32 @@
|
||||
import { IcebergRestCatalog } from 'iceberg-js';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import { DuckDBClient } from './duckdb-client.js';
|
||||
|
||||
/**
|
||||
* Iceberg client configuration
|
||||
*
|
||||
* Supports separate catalog/warehouse configurations for:
|
||||
* 1. Conversation data (catalogUri + namespace, typically 'gateway')
|
||||
* 2. OHLC/Trading data (ohlcCatalogUri + ohlcNamespace, typically 'trading')
|
||||
*
|
||||
* This allows for:
|
||||
* - Different S3 buckets/warehouses per data type
|
||||
* - Different retention policies
|
||||
* - Independent scaling and management
|
||||
* - Cost optimization (e.g., cheaper storage class for old conversations)
|
||||
*/
|
||||
export interface IcebergConfig {
|
||||
// Conversation/Gateway data catalog
|
||||
catalogUri: string;
|
||||
namespace: string;
|
||||
|
||||
// S3 configuration for conversation data
|
||||
s3Endpoint?: string;
|
||||
s3AccessKey?: string;
|
||||
s3SecretKey?: string;
|
||||
|
||||
// OHLC/Trading data catalog (can be same or different from conversation catalog)
|
||||
ohlcCatalogUri?: string;
|
||||
ohlcNamespace?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -40,79 +57,73 @@ export interface IcebergCheckpoint {
|
||||
/**
|
||||
* Iceberg REST client wrapper for durable storage
|
||||
*
|
||||
* Uses Iceberg REST Catalog API to:
|
||||
* - Query conversation history from gateway.conversations
|
||||
* - Query checkpoints from gateway.checkpoints
|
||||
* - Note: Writes are handled by Flink; this is read-only
|
||||
* Architecture:
|
||||
* - Uses DuckDB with Iceberg extension for querying Parquet data
|
||||
* - Supports SEPARATE catalogs/warehouses for conversation vs OHLC data
|
||||
* - Writes are handled by Flink via Kafka; this client is READ-ONLY
|
||||
*
|
||||
* For writes, we'll send to a Kafka topic that Flink consumes
|
||||
* (or implement direct REST catalog write if needed)
|
||||
* Data separation:
|
||||
* 1. Conversation data: catalogUri + namespace (e.g., http://catalog:8181 + 'gateway')
|
||||
* - Tables: conversations, checkpoints
|
||||
* - Can use different warehouse/S3 bucket in the future
|
||||
*
|
||||
* 2. OHLC/Trading data: ohlcCatalogUri + ohlcNamespace (e.g., http://catalog:8181 + 'trading')
|
||||
* - Tables: ohlc, symbol_metadata
|
||||
* - Can use different warehouse/S3 bucket for cost optimization
|
||||
*
|
||||
* To use separate warehouses in production:
|
||||
* 1. Deploy two Iceberg REST catalog instances (or configure multi-warehouse)
|
||||
* 2. Point catalogUri to conversations warehouse
|
||||
* 3. Point ohlcCatalogUri to trading warehouse
|
||||
* 4. Update Flink configuration to write to the correct catalogs
|
||||
*/
|
||||
export class IcebergClient {
|
||||
private namespace: string;
|
||||
private duckdb: DuckDBClient;
|
||||
private logger: FastifyBaseLogger;
|
||||
private namespace: string;
|
||||
private ohlcNamespace: string;
|
||||
|
||||
constructor(config: IcebergConfig, logger: FastifyBaseLogger) {
|
||||
this.logger = logger;
|
||||
this.namespace = config.namespace;
|
||||
this.ohlcNamespace = config.ohlcNamespace || 'trading';
|
||||
|
||||
// Initialize Iceberg REST client
|
||||
const clientConfig: any = {
|
||||
uri: config.catalogUri,
|
||||
};
|
||||
|
||||
if (config.s3Endpoint) {
|
||||
clientConfig.s3 = {
|
||||
endpoint: config.s3Endpoint,
|
||||
'access-key-id': config.s3AccessKey,
|
||||
'secret-access-key': config.s3SecretKey,
|
||||
'path-style-access': 'true',
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: Store client for queries when needed
|
||||
new IcebergRestCatalog(clientConfig);
|
||||
// Initialize DuckDB client for querying Iceberg tables
|
||||
// DuckDB will query tables from the appropriate catalog based on the data type
|
||||
this.duckdb = new DuckDBClient(
|
||||
{
|
||||
catalogUri: config.catalogUri,
|
||||
namespace: config.namespace,
|
||||
ohlcCatalogUri: config.ohlcCatalogUri,
|
||||
ohlcNamespace: config.ohlcNamespace,
|
||||
s3Endpoint: config.s3Endpoint,
|
||||
s3AccessKey: config.s3AccessKey,
|
||||
s3SecretKey: config.s3SecretKey,
|
||||
},
|
||||
logger
|
||||
);
|
||||
|
||||
this.logger.info({
|
||||
catalogUri: config.catalogUri,
|
||||
namespace: this.namespace,
|
||||
}, 'Iceberg client initialized');
|
||||
ohlcCatalogUri: config.ohlcCatalogUri || config.catalogUri,
|
||||
ohlcNamespace: this.ohlcNamespace,
|
||||
}, 'Iceberg client initialized with separate conversation and OHLC catalogs');
|
||||
}
|
||||
|
||||
/**
|
||||
* Query messages from gateway.conversations table
|
||||
*
|
||||
* Note: This is a simplified interface. The actual Iceberg REST API
|
||||
* returns table metadata, and you'd need to query the underlying
|
||||
* Parquet files via S3 or use a query engine like DuckDB/Trino.
|
||||
*
|
||||
* For now, we'll document the expected schema and leave actual
|
||||
* implementation as TODO since Flink handles writes.
|
||||
*/
|
||||
async queryMessages(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
_options?: {
|
||||
options?: {
|
||||
startTime?: number;
|
||||
endTime?: number;
|
||||
limit?: number;
|
||||
}
|
||||
): Promise<IcebergMessage[]> {
|
||||
this.logger.debug({
|
||||
userId,
|
||||
sessionId,
|
||||
table: `${this.namespace}.conversations`,
|
||||
}, 'Querying messages from Iceberg');
|
||||
|
||||
// TODO: Implement actual Iceberg query
|
||||
// Options:
|
||||
// 1. Use iceberg-js to get table metadata and Parquet file locations
|
||||
// 2. Query Parquet files directly via S3 + parquet-wasm
|
||||
// 3. Use external query engine (DuckDB, Trino, Presto)
|
||||
// 4. Use Flink SQL REST endpoint for queries
|
||||
|
||||
this.logger.warn('Iceberg query not yet implemented - returning empty array');
|
||||
return [];
|
||||
return this.duckdb.queryMessages(userId, sessionId, options);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -123,16 +134,7 @@ export class IcebergClient {
|
||||
sessionId: string,
|
||||
checkpointId?: string
|
||||
): Promise<IcebergCheckpoint | null> {
|
||||
this.logger.debug({
|
||||
userId,
|
||||
sessionId,
|
||||
checkpointId,
|
||||
table: `${this.namespace}.checkpoints`,
|
||||
}, 'Querying checkpoint from Iceberg');
|
||||
|
||||
// TODO: Implement actual Iceberg query
|
||||
this.logger.warn('Iceberg query not yet implemented - returning null');
|
||||
return null;
|
||||
return this.duckdb.queryCheckpoint(userId, sessionId, checkpointId);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -188,6 +190,49 @@ export class IcebergClient {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query OHLC data from trading.ohlc table
|
||||
*/
|
||||
async queryOHLC(
|
||||
ticker: string,
|
||||
period_seconds: number,
|
||||
start_time: bigint, // microseconds
|
||||
end_time: bigint // microseconds
|
||||
): Promise<any[]> {
|
||||
return this.duckdb.queryOHLC(ticker, period_seconds, start_time, end_time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if OHLC data exists for the given parameters
|
||||
*/
|
||||
async hasOHLCData(
|
||||
ticker: string,
|
||||
period_seconds: number,
|
||||
start_time: bigint,
|
||||
end_time: bigint
|
||||
): Promise<boolean> {
|
||||
return this.duckdb.hasOHLCData(ticker, period_seconds, start_time, end_time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Find missing OHLC data ranges
|
||||
*/
|
||||
async findMissingOHLCRanges(
|
||||
ticker: string,
|
||||
period_seconds: number,
|
||||
start_time: bigint,
|
||||
end_time: bigint
|
||||
): Promise<Array<[bigint, bigint]>> {
|
||||
return this.duckdb.findMissingOHLCRanges(ticker, period_seconds, start_time, end_time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Query all symbols from symbol_metadata table
|
||||
*/
|
||||
async queryAllSymbols(): Promise<any[]> {
|
||||
return this.duckdb.queryAllSymbols();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
180
gateway/src/clients/zmq-protocol.ts
Normal file
180
gateway/src/clients/zmq-protocol.ts
Normal file
@@ -0,0 +1,180 @@
|
||||
/**
|
||||
* ZMQ Protocol encoding/decoding using Protobuf
|
||||
*
|
||||
* Protocol format (as defined in protobuf/ingestor.proto):
|
||||
* Frame 1: [1 byte: protocol version]
|
||||
* Frame 2: [1 byte: message type ID][N bytes: protobuf message]
|
||||
*
|
||||
* For PUB/SUB: [topic frame][version frame][message frame]
|
||||
*/
|
||||
|
||||
import protobuf from 'protobufjs';
|
||||
import { readFileSync } from 'fs';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname, join } from 'path';
|
||||
import type {
|
||||
SubmitHistoricalRequest,
|
||||
SubmitResponse,
|
||||
HistoryReadyNotification,
|
||||
SubmitStatus,
|
||||
NotificationStatus,
|
||||
} from '../types/ohlc.js';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
/**
|
||||
* Protocol constants
|
||||
*/
|
||||
export const PROTOCOL_VERSION = 0x01;
|
||||
|
||||
export enum MessageType {
|
||||
SUBMIT_HISTORICAL_REQUEST = 0x10,
|
||||
SUBMIT_RESPONSE = 0x11,
|
||||
HISTORY_READY_NOTIFICATION = 0x12,
|
||||
}
|
||||
|
||||
// Load protobuf types at runtime (same pattern as ingestor)
|
||||
// Proto files are copied to /app/protobuf/ in the Docker image
|
||||
const protoDir = join(__dirname, '../..', 'protobuf');
|
||||
const root = new protobuf.Root();
|
||||
|
||||
// Load proto file and parse it
|
||||
const ingestorProto = readFileSync(join(protoDir, 'ingestor.proto'), 'utf8');
|
||||
protobuf.parse(ingestorProto, root);
|
||||
|
||||
// Export message types
|
||||
const SubmitHistoricalRequestType = root.lookupType('SubmitHistoricalRequest');
|
||||
const SubmitResponseType = root.lookupType('SubmitResponse');
|
||||
const HistoryReadyNotificationType = root.lookupType('HistoryReadyNotification');
|
||||
|
||||
/**
|
||||
* Encode SubmitHistoricalRequest to ZMQ frames
|
||||
*
|
||||
* Returns: [version_frame, message_frame]
|
||||
*/
|
||||
export function encodeSubmitHistoricalRequest(req: SubmitHistoricalRequest): Buffer[] {
|
||||
const versionFrame = Buffer.from([PROTOCOL_VERSION]);
|
||||
|
||||
// Convert to protobuf-compatible format (pbjs uses camelCase)
|
||||
// Note: protobufjs handles bigint/number conversion automatically for uint64
|
||||
const protoMessage = {
|
||||
requestId: req.request_id,
|
||||
ticker: req.ticker,
|
||||
startTime: Number(req.start_time), // Convert bigint to number for protobuf
|
||||
endTime: Number(req.end_time),
|
||||
periodSeconds: req.period_seconds,
|
||||
limit: req.limit,
|
||||
clientId: req.client_id,
|
||||
};
|
||||
|
||||
// Encode as protobuf
|
||||
const message = SubmitHistoricalRequestType.create(protoMessage);
|
||||
const payloadBuffer = SubmitHistoricalRequestType.encode(message).finish();
|
||||
|
||||
const messageFrame = Buffer.concat([
|
||||
Buffer.from([MessageType.SUBMIT_HISTORICAL_REQUEST]),
|
||||
Buffer.from(payloadBuffer),
|
||||
]);
|
||||
|
||||
return [versionFrame, messageFrame];
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode SubmitResponse from ZMQ frames
|
||||
*
|
||||
* Input: [version_frame, message_frame]
|
||||
*/
|
||||
export function decodeSubmitResponse(frames: Buffer[]): SubmitResponse {
|
||||
try {
|
||||
if (frames.length < 2) {
|
||||
throw new Error(`Expected 2 frames, got ${frames.length}`);
|
||||
}
|
||||
|
||||
const versionFrame = frames[0];
|
||||
const messageFrame = frames[1];
|
||||
|
||||
// Validate version
|
||||
if (versionFrame[0] !== PROTOCOL_VERSION) {
|
||||
throw new Error(`Unsupported protocol version: ${versionFrame[0]}`);
|
||||
}
|
||||
|
||||
// Validate message type
|
||||
const messageType = messageFrame[0];
|
||||
if (messageType !== MessageType.SUBMIT_RESPONSE) {
|
||||
throw new Error(`Expected SUBMIT_RESPONSE (0x11), got 0x${messageType.toString(16)}`);
|
||||
}
|
||||
|
||||
// Decode protobuf payload
|
||||
const payloadBuffer = messageFrame.slice(1);
|
||||
const decoded = SubmitResponseType.decode(payloadBuffer);
|
||||
const payload = SubmitResponseType.toObject(decoded, {
|
||||
longs: String,
|
||||
enums: Number, // Keep enums as numbers for comparison
|
||||
defaults: true,
|
||||
});
|
||||
|
||||
return {
|
||||
request_id: payload.requestId,
|
||||
status: payload.status as SubmitStatus,
|
||||
error_message: payload.errorMessage || undefined,
|
||||
notification_topic: payload.notificationTopic,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error decoding SubmitResponse:', error);
|
||||
console.error('Frame count:', frames.length);
|
||||
if (frames.length >= 2) {
|
||||
console.error('Version frame:', frames[0].toString('hex'));
|
||||
console.error('Message frame (first 100 bytes):', frames[1].slice(0, 100).toString('hex'));
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode HistoryReadyNotification from ZMQ frames
|
||||
*
|
||||
* Input: [topic_frame, version_frame, message_frame] (for SUB socket)
|
||||
*/
|
||||
export function decodeHistoryReadyNotification(frames: Buffer[]): HistoryReadyNotification {
|
||||
if (frames.length < 3) {
|
||||
throw new Error(`Expected 3 frames (topic, version, message), got ${frames.length}`);
|
||||
}
|
||||
|
||||
const versionFrame = frames[1];
|
||||
const messageFrame = frames[2];
|
||||
|
||||
// Validate version
|
||||
if (versionFrame[0] !== PROTOCOL_VERSION) {
|
||||
throw new Error(`Unsupported protocol version: ${versionFrame[0]}`);
|
||||
}
|
||||
|
||||
// Validate message type
|
||||
const messageType = messageFrame[0];
|
||||
if (messageType !== MessageType.HISTORY_READY_NOTIFICATION) {
|
||||
throw new Error(`Expected HISTORY_READY_NOTIFICATION (0x12), got 0x${messageType.toString(16)}`);
|
||||
}
|
||||
|
||||
// Decode protobuf payload
|
||||
const payloadBuffer = messageFrame.slice(1);
|
||||
const decoded = HistoryReadyNotificationType.decode(payloadBuffer);
|
||||
const payload = HistoryReadyNotificationType.toObject(decoded, {
|
||||
longs: String,
|
||||
enums: Number, // Keep enums as numbers for comparison
|
||||
defaults: true,
|
||||
});
|
||||
|
||||
return {
|
||||
request_id: payload.requestId,
|
||||
ticker: payload.ticker,
|
||||
period_seconds: payload.periodSeconds,
|
||||
start_time: BigInt(payload.startTime),
|
||||
end_time: BigInt(payload.endTime),
|
||||
status: payload.status as NotificationStatus,
|
||||
error_message: payload.errorMessage || undefined,
|
||||
iceberg_namespace: payload.icebergNamespace,
|
||||
iceberg_table: payload.icebergTable,
|
||||
row_count: payload.rowCount,
|
||||
completed_at: BigInt(payload.completedAt),
|
||||
};
|
||||
}
|
||||
356
gateway/src/clients/zmq-relay-client.ts
Normal file
356
gateway/src/clients/zmq-relay-client.ts
Normal file
@@ -0,0 +1,356 @@
|
||||
/**
|
||||
* ZMQ Relay Client for historical data requests
|
||||
*
|
||||
* IMPORTANT: Implements race-condition-free notification subscription
|
||||
* by subscribing to RESPONSE:{client_id} topic BEFORE sending requests.
|
||||
*
|
||||
* Architecture:
|
||||
* - REQ socket to relay (port 5559) for SubmitHistoricalRequest
|
||||
* - SUB socket to relay (port 5558) for HistoryReadyNotification
|
||||
* - Notification topic: RESPONSE:{client_id} (deterministic, client-generated)
|
||||
*/
|
||||
|
||||
import * as zmq from 'zeromq';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import { randomUUID } from 'crypto';
|
||||
import {
|
||||
encodeSubmitHistoricalRequest,
|
||||
decodeSubmitResponse,
|
||||
decodeHistoryReadyNotification,
|
||||
} from './zmq-protocol.js';
|
||||
import type {
|
||||
SubmitHistoricalRequest,
|
||||
HistoryReadyNotification,
|
||||
} from '../types/ohlc.js';
|
||||
import {
|
||||
SubmitStatus,
|
||||
NotificationStatus,
|
||||
} from '../types/ohlc.js';
|
||||
|
||||
export interface ZMQRelayConfig {
|
||||
relayRequestEndpoint: string; // e.g., "tcp://relay:5559"
|
||||
relayNotificationEndpoint: string; // e.g., "tcp://relay:5558"
|
||||
clientId?: string; // Optional client ID, will generate if not provided
|
||||
requestTimeout?: number; // Request timeout in ms (default: 30000)
|
||||
onMetadataUpdate?: () => Promise<void>; // Callback when symbol metadata updates
|
||||
}
|
||||
|
||||
interface PendingRequest {
|
||||
resolve: (notification: HistoryReadyNotification) => void;
|
||||
reject: (error: Error) => void;
|
||||
timeoutHandle: NodeJS.Timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZMQ Relay Client
|
||||
*
|
||||
* Provides async API for submitting historical data requests and waiting for
|
||||
* completion notifications.
|
||||
*/
|
||||
export class ZMQRelayClient {
|
||||
private config: Required<ZMQRelayConfig>;
|
||||
private logger: FastifyBaseLogger;
|
||||
|
||||
private reqSocket?: zmq.Request;
|
||||
private subSocket?: zmq.Subscriber;
|
||||
|
||||
private notificationTopic: string;
|
||||
private pendingRequests: Map<string, PendingRequest> = new Map();
|
||||
|
||||
private connected = false;
|
||||
private notificationListenerRunning = false;
|
||||
|
||||
constructor(config: ZMQRelayConfig, logger: FastifyBaseLogger) {
|
||||
this.config = {
|
||||
relayRequestEndpoint: config.relayRequestEndpoint,
|
||||
relayNotificationEndpoint: config.relayNotificationEndpoint,
|
||||
clientId: config.clientId || `gateway-${randomUUID().slice(0, 8)}`,
|
||||
requestTimeout: config.requestTimeout || 30000,
|
||||
};
|
||||
this.logger = logger;
|
||||
this.notificationTopic = `RESPONSE:${this.config.clientId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to relay and start notification listener
|
||||
*
|
||||
* CRITICAL: This MUST be called before making any requests.
|
||||
* The notification listener subscribes to RESPONSE:{client_id} topic
|
||||
* BEFORE any requests are sent, preventing race conditions.
|
||||
*/
|
||||
async connect(): Promise<void> {
|
||||
if (this.connected) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.info({
|
||||
requestEndpoint: this.config.relayRequestEndpoint,
|
||||
notificationEndpoint: this.config.relayNotificationEndpoint,
|
||||
clientId: this.config.clientId,
|
||||
notificationTopic: this.notificationTopic,
|
||||
}, 'Connecting to ZMQ relay');
|
||||
|
||||
// Create REQ socket for requests
|
||||
this.reqSocket = new zmq.Request();
|
||||
this.reqSocket.connect(this.config.relayRequestEndpoint);
|
||||
|
||||
// Create SUB socket for notifications
|
||||
this.subSocket = new zmq.Subscriber();
|
||||
this.subSocket.connect(this.config.relayNotificationEndpoint);
|
||||
|
||||
// Subscribe to our notification topic BEFORE sending any requests
|
||||
this.subSocket.subscribe(this.notificationTopic);
|
||||
|
||||
// Subscribe to system metadata update notifications
|
||||
this.subSocket.subscribe('METADATA_UPDATE');
|
||||
|
||||
this.logger.info({
|
||||
topics: [this.notificationTopic, 'METADATA_UPDATE']
|
||||
}, 'Subscribed to notification topics');
|
||||
|
||||
// Start notification listener
|
||||
this.startNotificationListener();
|
||||
|
||||
// Give sockets a moment to connect
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
this.connected = true;
|
||||
this.logger.info('ZMQ relay client connected');
|
||||
}
|
||||
|
||||
/**
|
||||
* Request historical OHLC data
|
||||
*
|
||||
* IMPORTANT: Call connect() before using this method.
|
||||
*
|
||||
* @param ticker Market identifier (e.g., "BINANCE:BTC/USDT")
|
||||
* @param period_seconds OHLC period in seconds
|
||||
* @param start_time Start timestamp in MICROSECONDS
|
||||
* @param end_time End timestamp in MICROSECONDS
|
||||
* @param limit Optional limit on number of candles
|
||||
* @returns Promise that resolves when data is ready in Iceberg
|
||||
*/
|
||||
async requestHistoricalOHLC(
|
||||
ticker: string,
|
||||
period_seconds: number,
|
||||
start_time: bigint,
|
||||
end_time: bigint,
|
||||
limit?: number
|
||||
): Promise<HistoryReadyNotification> {
|
||||
if (!this.connected || !this.reqSocket) {
|
||||
throw new Error('Client not connected. Call connect() first.');
|
||||
}
|
||||
|
||||
const request_id = randomUUID();
|
||||
|
||||
this.logger.debug({
|
||||
request_id,
|
||||
ticker,
|
||||
period_seconds,
|
||||
start_time: start_time.toString(),
|
||||
end_time: end_time.toString(),
|
||||
}, 'Submitting historical OHLC request');
|
||||
|
||||
const request: SubmitHistoricalRequest = {
|
||||
request_id,
|
||||
ticker,
|
||||
start_time,
|
||||
end_time,
|
||||
period_seconds,
|
||||
limit,
|
||||
client_id: this.config.clientId,
|
||||
};
|
||||
|
||||
// Register pending request BEFORE sending (notification listener is already running)
|
||||
const resultPromise = new Promise<HistoryReadyNotification>((resolve, reject) => {
|
||||
const timeoutHandle = setTimeout(() => {
|
||||
this.pendingRequests.delete(request_id);
|
||||
reject(new Error(`Request ${request_id} timed out after ${this.config.requestTimeout}ms`));
|
||||
}, this.config.requestTimeout);
|
||||
|
||||
this.pendingRequests.set(request_id, { resolve, reject, timeoutHandle });
|
||||
});
|
||||
|
||||
// Encode and send request
|
||||
const frames = encodeSubmitHistoricalRequest(request);
|
||||
|
||||
try {
|
||||
// Send two frames: version, then message
|
||||
await this.reqSocket.send(frames);
|
||||
|
||||
// Wait for immediate acknowledgment
|
||||
const responseFrames = await this.reqSocket.receive();
|
||||
|
||||
this.logger.debug({
|
||||
frameCount: responseFrames.length,
|
||||
frameLengths: Array.from(responseFrames).map(f => f.length),
|
||||
}, 'Received response frames from relay');
|
||||
|
||||
const response = decodeSubmitResponse(Array.from(responseFrames));
|
||||
|
||||
this.logger.debug({
|
||||
request_id,
|
||||
response,
|
||||
}, 'Decoded SubmitResponse');
|
||||
|
||||
if (response.status !== SubmitStatus.QUEUED) {
|
||||
// Request was rejected - clean up pending request
|
||||
const pending = this.pendingRequests.get(request_id);
|
||||
if (pending) {
|
||||
clearTimeout(pending.timeoutHandle);
|
||||
this.pendingRequests.delete(request_id);
|
||||
}
|
||||
throw new Error(`Request rejected: ${response.error_message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
this.logger.debug({ request_id }, 'Request queued, waiting for notification');
|
||||
|
||||
// Wait for notification (already subscribed to topic)
|
||||
return await resultPromise;
|
||||
|
||||
} catch (error) {
|
||||
// Clean up pending request on error
|
||||
const pending = this.pendingRequests.get(request_id);
|
||||
if (pending) {
|
||||
clearTimeout(pending.timeoutHandle);
|
||||
this.pendingRequests.delete(request_id);
|
||||
}
|
||||
|
||||
this.logger.error({
|
||||
error,
|
||||
request_id,
|
||||
ticker,
|
||||
errorMessage: error instanceof Error ? error.message : String(error),
|
||||
errorStack: error instanceof Error ? error.stack : undefined,
|
||||
}, 'Failed to submit historical OHLC request');
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start notification listener
|
||||
*
|
||||
* CRITICAL: This runs BEFORE any requests are submitted to prevent race condition.
|
||||
* We're already subscribed to RESPONSE:{client_id} and METADATA_UPDATE, so we'll receive all notifications.
|
||||
*/
|
||||
private startNotificationListener(): void {
|
||||
if (this.notificationListenerRunning || !this.subSocket) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.notificationListenerRunning = true;
|
||||
|
||||
// Listen for notifications asynchronously
|
||||
(async () => {
|
||||
try {
|
||||
for await (const frames of this.subSocket!) {
|
||||
try {
|
||||
// First frame is the topic
|
||||
const topic = frames[0].toString();
|
||||
|
||||
// Handle metadata update notifications
|
||||
if (topic === 'METADATA_UPDATE') {
|
||||
this.logger.info('Received METADATA_UPDATE notification');
|
||||
|
||||
// Call the onMetadataUpdate callback if configured
|
||||
if (this.config.onMetadataUpdate) {
|
||||
try {
|
||||
await this.config.onMetadataUpdate();
|
||||
} catch (error) {
|
||||
this.logger.error({ error }, 'Failed to handle metadata update');
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle history ready notifications
|
||||
const notification = decodeHistoryReadyNotification(Array.from(frames));
|
||||
|
||||
this.logger.debug({
|
||||
request_id: notification.request_id,
|
||||
status: NotificationStatus[notification.status],
|
||||
row_count: notification.row_count,
|
||||
}, 'Received history ready notification');
|
||||
|
||||
// Check if we're waiting for this request
|
||||
const pending = this.pendingRequests.get(notification.request_id);
|
||||
if (pending) {
|
||||
clearTimeout(pending.timeoutHandle);
|
||||
this.pendingRequests.delete(notification.request_id);
|
||||
|
||||
if (notification.status === NotificationStatus.OK) {
|
||||
pending.resolve(notification);
|
||||
} else {
|
||||
pending.reject(new Error(
|
||||
`Historical data request failed: ${notification.error_message || NotificationStatus[notification.status]}`
|
||||
));
|
||||
}
|
||||
} else {
|
||||
this.logger.warn({
|
||||
request_id: notification.request_id,
|
||||
}, 'Received notification for unknown request');
|
||||
}
|
||||
} catch (error) {
|
||||
this.logger.error({ error }, 'Failed to process notification');
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
if (this.notificationListenerRunning) {
|
||||
this.logger.error({ error }, 'Notification listener error');
|
||||
}
|
||||
} finally {
|
||||
this.notificationListenerRunning = false;
|
||||
}
|
||||
})();
|
||||
|
||||
this.logger.debug('Notification listener started');
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the client and cleanup resources
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
if (!this.connected) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.info('Closing ZMQ relay client');
|
||||
|
||||
this.notificationListenerRunning = false;
|
||||
|
||||
// Reject all pending requests
|
||||
for (const [, pending] of this.pendingRequests) {
|
||||
clearTimeout(pending.timeoutHandle);
|
||||
pending.reject(new Error('Client closed'));
|
||||
}
|
||||
this.pendingRequests.clear();
|
||||
|
||||
// Close sockets
|
||||
if (this.subSocket) {
|
||||
this.subSocket.close();
|
||||
this.subSocket = undefined;
|
||||
}
|
||||
if (this.reqSocket) {
|
||||
this.reqSocket.close();
|
||||
this.reqSocket = undefined;
|
||||
}
|
||||
|
||||
this.connected = false;
|
||||
this.logger.info('ZMQ relay client closed');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if client is connected
|
||||
*/
|
||||
isConnected(): boolean {
|
||||
return this.connected;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the client ID used for notifications
|
||||
*/
|
||||
getClientId(): string {
|
||||
return this.config.clientId;
|
||||
}
|
||||
}
|
||||
@@ -94,7 +94,8 @@ export class KubernetesClient {
|
||||
});
|
||||
return true;
|
||||
} catch (error: any) {
|
||||
if (error.response?.statusCode === 404) {
|
||||
// @kubernetes/client-node v1.x throws errors with either .code or .response.statusCode
|
||||
if (error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404) {
|
||||
return false;
|
||||
}
|
||||
throw error;
|
||||
@@ -171,7 +172,8 @@ export class KubernetesClient {
|
||||
}
|
||||
} catch (error: any) {
|
||||
// If resource already exists, log warning but continue
|
||||
if (error.response?.statusCode === 409) {
|
||||
const is409 = error.code === 409 || error.response?.statusCode === 409 || error.statusCode === 409;
|
||||
if (is409) {
|
||||
this.config.logger.warn(
|
||||
{ kind: doc.kind, name: doc.metadata?.name },
|
||||
'Resource already exists, skipping'
|
||||
@@ -246,7 +248,7 @@ export class KubernetesClient {
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, pollInterval));
|
||||
} catch (error: any) {
|
||||
if (error.response?.statusCode === 404) {
|
||||
if (error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404) {
|
||||
this.config.logger.warn({ deploymentName }, 'Deployment not found');
|
||||
return false;
|
||||
}
|
||||
@@ -281,7 +283,7 @@ export class KubernetesClient {
|
||||
);
|
||||
return null;
|
||||
} catch (error: any) {
|
||||
if (error.response?.statusCode === 404) {
|
||||
if (error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404) {
|
||||
this.config.logger.warn({ serviceName }, 'Service not found');
|
||||
return null;
|
||||
}
|
||||
@@ -308,7 +310,8 @@ export class KubernetesClient {
|
||||
});
|
||||
this.config.logger.info({ deploymentName }, 'Deleted deployment');
|
||||
} catch (error: any) {
|
||||
if (error.response?.statusCode !== 404) {
|
||||
const is404 = error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404;
|
||||
if (!is404) {
|
||||
this.config.logger.warn({ deploymentName, error }, 'Failed to delete deployment');
|
||||
}
|
||||
}
|
||||
@@ -321,7 +324,8 @@ export class KubernetesClient {
|
||||
});
|
||||
this.config.logger.info({ serviceName }, 'Deleted service');
|
||||
} catch (error: any) {
|
||||
if (error.response?.statusCode !== 404) {
|
||||
const is404 = error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404;
|
||||
if (!is404) {
|
||||
this.config.logger.warn({ serviceName, error }, 'Failed to delete service');
|
||||
}
|
||||
}
|
||||
@@ -334,7 +338,8 @@ export class KubernetesClient {
|
||||
});
|
||||
this.config.logger.info({ pvcName }, 'Deleted PVC');
|
||||
} catch (error: any) {
|
||||
if (error.response?.statusCode !== 404) {
|
||||
const is404 = error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404;
|
||||
if (!is404) {
|
||||
this.config.logger.warn({ pvcName, error }, 'Failed to delete PVC');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,12 @@ export interface ContainerStatus {
|
||||
mcpEndpoint: string;
|
||||
}
|
||||
|
||||
export interface EnsureContainerResult {
|
||||
mcpEndpoint: string;
|
||||
wasCreated: boolean;
|
||||
isSpinningUp: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Container manager orchestrates agent container lifecycle
|
||||
*/
|
||||
@@ -30,11 +36,13 @@ export class ContainerManager {
|
||||
/**
|
||||
* Ensure user's container is running and ready
|
||||
* Returns the MCP endpoint URL
|
||||
* If waitForReady is false, will return immediately after creating the deployment
|
||||
*/
|
||||
async ensureContainerRunning(
|
||||
userId: string,
|
||||
license: UserLicense
|
||||
): Promise<{ mcpEndpoint: string; wasCreated: boolean }> {
|
||||
license: UserLicense,
|
||||
waitForReady: boolean = true
|
||||
): Promise<EnsureContainerResult> {
|
||||
const deploymentName = KubernetesClient.getDeploymentName(userId);
|
||||
const mcpEndpoint = KubernetesClient.getMcpEndpoint(userId, this.config.namespace);
|
||||
|
||||
@@ -49,6 +57,7 @@ export class ContainerManager {
|
||||
if (exists) {
|
||||
this.config.logger.info({ userId, deploymentName }, 'Container deployment already exists');
|
||||
|
||||
if (waitForReady) {
|
||||
// Wait for it to be ready (in case it's starting up)
|
||||
const ready = await this.config.k8sClient.waitForDeploymentReady(deploymentName, 30000);
|
||||
|
||||
@@ -59,8 +68,9 @@ export class ContainerManager {
|
||||
);
|
||||
// Continue anyway - might be an image pull or other transient issue
|
||||
}
|
||||
}
|
||||
|
||||
return { mcpEndpoint, wasCreated: false };
|
||||
return { mcpEndpoint, wasCreated: false, isSpinningUp: false };
|
||||
}
|
||||
|
||||
// Create new deployment
|
||||
@@ -76,6 +86,12 @@ export class ContainerManager {
|
||||
|
||||
await this.config.k8sClient.createAgentDeployment(spec);
|
||||
|
||||
// If not waiting for ready, return immediately with spinning up status
|
||||
if (!waitForReady) {
|
||||
this.config.logger.info({ userId, deploymentName }, 'Container created, spinning up...');
|
||||
return { mcpEndpoint, wasCreated: true, isSpinningUp: true };
|
||||
}
|
||||
|
||||
// Wait for deployment to be ready
|
||||
const ready = await this.config.k8sClient.waitForDeploymentReady(deploymentName, 120000);
|
||||
|
||||
@@ -87,7 +103,16 @@ export class ContainerManager {
|
||||
|
||||
this.config.logger.info({ userId, mcpEndpoint }, 'Container is ready');
|
||||
|
||||
return { mcpEndpoint, wasCreated: true };
|
||||
return { mcpEndpoint, wasCreated: true, isSpinningUp: false };
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for a deployment to become ready
|
||||
* Used for background polling after initial creation
|
||||
*/
|
||||
async waitForContainerReady(userId: string, timeoutMs: number = 120000): Promise<boolean> {
|
||||
const deploymentName = KubernetesClient.getDeploymentName(userId);
|
||||
return await this.config.k8sClient.waitForDeploymentReady(deploymentName, timeoutMs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -84,6 +84,9 @@ spec:
|
||||
volumeMounts:
|
||||
- name: agent-data
|
||||
mountPath: /app/data
|
||||
- name: agent-config
|
||||
mountPath: /app/config
|
||||
readOnly: true
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: shared-run
|
||||
@@ -148,6 +151,9 @@ spec:
|
||||
- name: agent-data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{pvcName}}
|
||||
- name: agent-config
|
||||
configMap:
|
||||
name: agent-config
|
||||
- name: tmp
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
|
||||
@@ -83,6 +83,9 @@ spec:
|
||||
volumeMounts:
|
||||
- name: agent-data
|
||||
mountPath: /app/data
|
||||
- name: agent-config
|
||||
mountPath: /app/config
|
||||
readOnly: true
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: shared-run
|
||||
@@ -147,6 +150,9 @@ spec:
|
||||
- name: agent-data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{pvcName}}
|
||||
- name: agent-config
|
||||
configMap:
|
||||
name: agent-config
|
||||
- name: tmp
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
|
||||
@@ -83,6 +83,9 @@ spec:
|
||||
volumeMounts:
|
||||
- name: agent-data
|
||||
mountPath: /app/data
|
||||
- name: agent-config
|
||||
mountPath: /app/config
|
||||
readOnly: true
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: shared-run
|
||||
@@ -147,6 +150,9 @@ spec:
|
||||
- name: agent-data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{pvcName}}
|
||||
- name: agent-config
|
||||
configMap:
|
||||
name: agent-config
|
||||
- name: tmp
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
|
||||
@@ -13,6 +13,11 @@ import { WebSocketHandler } from './channels/websocket-handler.js';
|
||||
import { TelegramHandler } from './channels/telegram-handler.js';
|
||||
import { KubernetesClient } from './k8s/client.js';
|
||||
import { ContainerManager } from './k8s/container-manager.js';
|
||||
import { ZMQRelayClient } from './clients/zmq-relay-client.js';
|
||||
import { IcebergClient } from './clients/iceberg-client.js';
|
||||
import { OHLCService } from './services/ohlc-service.js';
|
||||
import { SymbolIndexService } from './services/symbol-index-service.js';
|
||||
import { SymbolRoutes } from './routes/symbol-routes.js';
|
||||
|
||||
// Catch unhandled promise rejections for better debugging
|
||||
process.on('unhandledRejection', (reason: any, promise) => {
|
||||
@@ -114,11 +119,19 @@ function loadConfig() {
|
||||
iceberg: {
|
||||
catalogUri: configData.iceberg?.catalog_uri || process.env.ICEBERG_CATALOG_URI || 'http://iceberg-catalog:8181',
|
||||
namespace: configData.iceberg?.namespace || process.env.ICEBERG_NAMESPACE || 'gateway',
|
||||
ohlcCatalogUri: configData.iceberg?.ohlc_catalog_uri || process.env.ICEBERG_OHLC_CATALOG_URI,
|
||||
ohlcNamespace: configData.iceberg?.ohlc_namespace || process.env.ICEBERG_OHLC_NAMESPACE || 'trading',
|
||||
s3Endpoint: configData.iceberg?.s3_endpoint || process.env.S3_ENDPOINT,
|
||||
s3AccessKey: secretsData.iceberg?.s3_access_key || process.env.S3_ACCESS_KEY,
|
||||
s3SecretKey: secretsData.iceberg?.s3_secret_key || process.env.S3_SECRET_KEY,
|
||||
},
|
||||
|
||||
// Relay configuration (for historical data)
|
||||
relay: {
|
||||
requestEndpoint: configData.relay?.request_endpoint || process.env.RELAY_REQUEST_ENDPOINT || 'tcp://relay:5559',
|
||||
notificationEndpoint: configData.relay?.notification_endpoint || process.env.RELAY_NOTIFICATION_ENDPOINT || 'tcp://relay:5558',
|
||||
},
|
||||
|
||||
// Embedding configuration (for RAG)
|
||||
embedding: {
|
||||
provider: (configData.embedding?.provider || process.env.EMBEDDING_PROVIDER || 'ollama') as 'ollama' | 'openai' | 'anthropic' | 'local' | 'voyage' | 'cohere' | 'none',
|
||||
@@ -224,10 +237,18 @@ const qdrantClient = new QdrantClient(config.qdrant, app.log);
|
||||
// Initialize Iceberg client (for durable storage)
|
||||
// const icebergClient = new IcebergClient(config.iceberg, app.log);
|
||||
|
||||
// Initialize ZMQ Relay client (for historical data)
|
||||
// Note: onMetadataUpdate callback will be set after symbolIndexService is initialized
|
||||
const zmqRelayClient = new ZMQRelayClient({
|
||||
relayRequestEndpoint: config.relay.requestEndpoint,
|
||||
relayNotificationEndpoint: config.relay.notificationEndpoint,
|
||||
}, app.log);
|
||||
|
||||
app.log.info({
|
||||
redis: config.redisUrl,
|
||||
qdrant: config.qdrant.url,
|
||||
iceberg: config.iceberg.catalogUri,
|
||||
relay: config.relay.requestEndpoint,
|
||||
embeddingProvider: config.embedding.provider,
|
||||
}, 'Harness storage clients configured');
|
||||
|
||||
@@ -280,12 +301,32 @@ const eventRouter = new EventRouter({
|
||||
});
|
||||
app.log.debug('Event router initialized');
|
||||
|
||||
// Initialize OHLC service (optional - only if relay is available)
|
||||
let ohlcService: OHLCService | undefined;
|
||||
try {
|
||||
const icebergClient = new IcebergClient(config.iceberg, app.log);
|
||||
ohlcService = new OHLCService({
|
||||
icebergClient,
|
||||
relayClient: zmqRelayClient,
|
||||
logger: app.log,
|
||||
});
|
||||
app.log.info('OHLC service initialized');
|
||||
} catch (error) {
|
||||
app.log.warn({ error }, 'Failed to initialize OHLC service - historical data will not be available');
|
||||
}
|
||||
|
||||
// Initialize Symbol Index Service (deferred to after server starts)
|
||||
let symbolIndexService: SymbolIndexService | undefined;
|
||||
|
||||
// Initialize channel handlers
|
||||
const websocketHandler = new WebSocketHandler({
|
||||
authenticator,
|
||||
containerManager,
|
||||
providerConfig: config.providerConfig,
|
||||
sessionRegistry,
|
||||
eventSubscriber,
|
||||
ohlcService, // Optional
|
||||
symbolIndexService, // Optional
|
||||
});
|
||||
app.log.debug('WebSocket handler initialized');
|
||||
|
||||
@@ -317,6 +358,13 @@ app.log.debug('Registering websocket handler...');
|
||||
websocketHandler.register(app);
|
||||
app.log.debug('Registering telegram handler...');
|
||||
telegramHandler.register(app);
|
||||
|
||||
// Register symbol routes (service may not be ready yet, but routes will handle this)
|
||||
app.log.debug('Registering symbol routes...');
|
||||
const getSymbolService = () => symbolIndexService;
|
||||
const symbolRoutes = new SymbolRoutes({ getSymbolIndexService: getSymbolService });
|
||||
symbolRoutes.register(app);
|
||||
|
||||
app.log.debug('All routes registered');
|
||||
|
||||
// Health check
|
||||
@@ -408,6 +456,11 @@ const shutdown = async () => {
|
||||
await eventSubscriber.stop();
|
||||
await eventRouter.stop();
|
||||
|
||||
// Close ZMQ relay client
|
||||
if (zmqRelayClient.isConnected()) {
|
||||
await zmqRelayClient.close();
|
||||
}
|
||||
|
||||
// Disconnect Redis
|
||||
redis.disconnect();
|
||||
|
||||
@@ -433,6 +486,15 @@ try {
|
||||
await redis.connect();
|
||||
app.log.info('Redis connected');
|
||||
|
||||
// Connect to ZMQ Relay
|
||||
app.log.debug('Connecting to ZMQ Relay...');
|
||||
try {
|
||||
await zmqRelayClient.connect();
|
||||
app.log.info('ZMQ Relay connected');
|
||||
} catch (error) {
|
||||
app.log.warn({ error }, 'ZMQ Relay connection failed - historical data will not be available');
|
||||
}
|
||||
|
||||
// Initialize Qdrant collection
|
||||
app.log.debug('Initializing Qdrant...');
|
||||
try {
|
||||
@@ -496,6 +558,34 @@ try {
|
||||
},
|
||||
'Gateway server started'
|
||||
);
|
||||
|
||||
// Initialize Symbol Index Service (after server is running)
|
||||
// This is done asynchronously to not block server startup
|
||||
(async () => {
|
||||
try {
|
||||
const icebergClient = new IcebergClient(config.iceberg, app.log);
|
||||
const indexService = new SymbolIndexService({
|
||||
icebergClient,
|
||||
logger: app.log,
|
||||
});
|
||||
await indexService.initialize();
|
||||
symbolIndexService = indexService;
|
||||
|
||||
// Update websocket handler's config so it can use the service
|
||||
(websocketHandler as any).config.symbolIndexService = indexService;
|
||||
|
||||
// Configure ZMQ relay to reload symbol metadata on updates
|
||||
(zmqRelayClient as any).config.onMetadataUpdate = async () => {
|
||||
app.log.info('Reloading symbol metadata from Iceberg');
|
||||
await indexService.initialize();
|
||||
app.log.info({ stats: indexService.getStats() }, 'Symbol metadata reloaded');
|
||||
};
|
||||
|
||||
app.log.info({ stats: symbolIndexService.getStats() }, 'Symbol index service initialized');
|
||||
} catch (error) {
|
||||
app.log.warn({ error }, 'Failed to initialize symbol index service - symbol search will not be available');
|
||||
}
|
||||
})();
|
||||
} catch (error) {
|
||||
app.log.error({ error }, 'Failed to start server');
|
||||
process.exit(1);
|
||||
|
||||
115
gateway/src/routes/symbol-routes.ts
Normal file
115
gateway/src/routes/symbol-routes.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
/**
|
||||
* Symbol routes for HTTP API
|
||||
*
|
||||
* Provides REST endpoints for symbol search and resolution.
|
||||
*/
|
||||
|
||||
import type { FastifyInstance } from 'fastify';
|
||||
import type { SymbolIndexService } from '../services/symbol-index-service.js';
|
||||
|
||||
export interface SymbolRoutesConfig {
|
||||
getSymbolIndexService: () => SymbolIndexService | undefined;
|
||||
}
|
||||
|
||||
export class SymbolRoutes {
|
||||
private getSymbolIndexService: () => SymbolIndexService | undefined;
|
||||
|
||||
constructor(config: SymbolRoutesConfig) {
|
||||
this.getSymbolIndexService = config.getSymbolIndexService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register symbol routes with Fastify
|
||||
*/
|
||||
register(app: FastifyInstance): void {
|
||||
// Search symbols
|
||||
app.get('/symbols/search', async (request, reply) => {
|
||||
const symbolIndexService = this.getSymbolIndexService();
|
||||
|
||||
if (!symbolIndexService) {
|
||||
return reply.code(503).send({
|
||||
error: 'Symbol index service not ready',
|
||||
message: 'Service is still initializing, please try again in a moment',
|
||||
});
|
||||
}
|
||||
|
||||
const { q, limit } = request.query as { q?: string; limit?: string };
|
||||
|
||||
if (!q) {
|
||||
return reply.code(400).send({
|
||||
error: 'Query parameter "q" is required',
|
||||
});
|
||||
}
|
||||
|
||||
const limitNum = limit ? parseInt(limit, 10) : 30;
|
||||
|
||||
try {
|
||||
const results = symbolIndexService.search(q, limitNum);
|
||||
return { results };
|
||||
} catch (error: any) {
|
||||
app.log.error({ error: error.message }, 'Symbol search failed');
|
||||
return reply.code(500).send({
|
||||
error: 'Symbol search failed',
|
||||
message: error.message,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Resolve symbol (use wildcard to capture ticker with slashes like BINANCE:BTC/USDT)
|
||||
app.get('/symbols/*', async (request, reply) => {
|
||||
const symbolIndexService = this.getSymbolIndexService();
|
||||
|
||||
if (!symbolIndexService) {
|
||||
return reply.code(503).send({
|
||||
error: 'Symbol index service not ready',
|
||||
message: 'Service is still initializing, please try again in a moment',
|
||||
});
|
||||
}
|
||||
|
||||
// Extract ticker from wildcard path (everything after /symbols/)
|
||||
const ticker = (request.params as any)['*'];
|
||||
|
||||
try {
|
||||
const symbolInfo = symbolIndexService.resolveSymbol(ticker);
|
||||
|
||||
if (!symbolInfo) {
|
||||
return reply.code(404).send({
|
||||
error: 'Symbol not found',
|
||||
ticker,
|
||||
});
|
||||
}
|
||||
|
||||
return symbolInfo;
|
||||
} catch (error: any) {
|
||||
app.log.error({ error: error.message, ticker }, 'Symbol resolution failed');
|
||||
return reply.code(500).send({
|
||||
error: 'Symbol resolution failed',
|
||||
message: error.message,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Get symbol index stats
|
||||
app.get('/symbols/stats', async (_request, reply) => {
|
||||
const symbolIndexService = this.getSymbolIndexService();
|
||||
|
||||
if (!symbolIndexService) {
|
||||
return reply.code(503).send({
|
||||
error: 'Symbol index service not ready',
|
||||
message: 'Service is still initializing, please try again in a moment',
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const stats = symbolIndexService.getStats();
|
||||
return stats;
|
||||
} catch (error: any) {
|
||||
app.log.error({ error: error.message }, 'Failed to get symbol stats');
|
||||
return reply.code(500).send({
|
||||
error: 'Failed to get symbol stats',
|
||||
message: error.message,
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
249
gateway/src/services/ohlc-service.ts
Normal file
249
gateway/src/services/ohlc-service.ts
Normal file
@@ -0,0 +1,249 @@
|
||||
/**
|
||||
* OHLC Service - High-level API for historical market data
|
||||
*
|
||||
* Workflow (mirroring client-py/dexorder/ohlc_client.py):
|
||||
* 1. Check Iceberg for existing data
|
||||
* 2. Identify missing ranges
|
||||
* 3. If complete, return immediately
|
||||
* 4. Otherwise, request missing data via relay
|
||||
* 5. Wait for completion notification
|
||||
* 6. Query Iceberg again for complete dataset
|
||||
* 7. Return results
|
||||
*
|
||||
* This provides transparent caching - clients don't need to know
|
||||
* whether data came from cache or was fetched on-demand.
|
||||
*/
|
||||
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { IcebergClient } from '../clients/iceberg-client.js';
|
||||
import type { ZMQRelayClient } from '../clients/zmq-relay-client.js';
|
||||
import type {
|
||||
HistoryResult,
|
||||
SymbolInfo,
|
||||
SearchResult,
|
||||
DatafeedConfig,
|
||||
TradingViewBar,
|
||||
} from '../types/ohlc.js';
|
||||
import {
|
||||
secondsToMicros,
|
||||
backendToTradingView,
|
||||
resolutionToSeconds,
|
||||
DEFAULT_SUPPORTED_RESOLUTIONS,
|
||||
} from '../types/ohlc.js';
|
||||
|
||||
export interface OHLCServiceConfig {
|
||||
icebergClient: IcebergClient;
|
||||
relayClient: ZMQRelayClient;
|
||||
logger: FastifyBaseLogger;
|
||||
requestTimeout?: number; // Request timeout in ms (default: 30000)
|
||||
}
|
||||
|
||||
/**
|
||||
* OHLC Service
|
||||
*
|
||||
* Provides high-level API for fetching OHLC data with smart caching.
|
||||
*/
|
||||
export class OHLCService {
|
||||
private icebergClient: IcebergClient;
|
||||
private relayClient: ZMQRelayClient;
|
||||
private logger: FastifyBaseLogger;
|
||||
|
||||
constructor(config: OHLCServiceConfig) {
|
||||
this.icebergClient = config.icebergClient;
|
||||
this.relayClient = config.relayClient;
|
||||
this.logger = config.logger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch OHLC data with smart caching
|
||||
*
|
||||
* Steps:
|
||||
* 1. Query Iceberg for existing data
|
||||
* 2. If complete, return immediately
|
||||
* 3. If missing data, request via relay
|
||||
* 4. Wait for completion notification
|
||||
* 5. Query Iceberg again for complete dataset
|
||||
* 6. Return results
|
||||
*/
|
||||
async fetchOHLC(
|
||||
ticker: string,
|
||||
resolution: string,
|
||||
from_time: number, // Unix timestamp in SECONDS
|
||||
to_time: number, // Unix timestamp in SECONDS
|
||||
countback?: number
|
||||
): Promise<HistoryResult> {
|
||||
this.logger.debug({
|
||||
ticker,
|
||||
resolution,
|
||||
from_time,
|
||||
to_time,
|
||||
countback,
|
||||
}, 'Fetching OHLC data');
|
||||
|
||||
// Convert resolution to period_seconds
|
||||
const period_seconds = resolutionToSeconds(resolution);
|
||||
|
||||
// Convert times to microseconds
|
||||
const start_time = secondsToMicros(from_time);
|
||||
const end_time = secondsToMicros(to_time);
|
||||
|
||||
// Step 1: Check Iceberg for existing data
|
||||
let data = await this.icebergClient.queryOHLC(ticker, period_seconds, start_time, end_time);
|
||||
|
||||
// Step 2: Identify missing ranges
|
||||
const missingRanges = await this.icebergClient.findMissingOHLCRanges(
|
||||
ticker,
|
||||
period_seconds,
|
||||
start_time,
|
||||
end_time
|
||||
);
|
||||
|
||||
if (missingRanges.length === 0 && data.length > 0) {
|
||||
// All data exists in Iceberg
|
||||
this.logger.debug({ ticker, resolution, cached: true }, 'OHLC data found in cache');
|
||||
return this.formatHistoryResult(data, countback);
|
||||
}
|
||||
|
||||
// Step 3: Request missing data via relay
|
||||
this.logger.debug({ ticker, resolution, missingRanges: missingRanges.length }, 'Requesting missing OHLC data');
|
||||
|
||||
try {
|
||||
const notification = await this.relayClient.requestHistoricalOHLC(
|
||||
ticker,
|
||||
period_seconds,
|
||||
start_time,
|
||||
end_time,
|
||||
countback
|
||||
);
|
||||
|
||||
this.logger.info({
|
||||
ticker,
|
||||
resolution,
|
||||
row_count: notification.row_count,
|
||||
status: notification.status,
|
||||
}, 'Historical data request completed');
|
||||
|
||||
// Step 4: Query Iceberg again for complete dataset
|
||||
data = await this.icebergClient.queryOHLC(ticker, period_seconds, start_time, end_time);
|
||||
|
||||
return this.formatHistoryResult(data, countback);
|
||||
|
||||
} catch (error: any) {
|
||||
this.logger.error({
|
||||
error,
|
||||
ticker,
|
||||
resolution,
|
||||
}, 'Failed to fetch historical data');
|
||||
|
||||
// Return empty result on error
|
||||
return {
|
||||
bars: [],
|
||||
noData: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format OHLC data as TradingView history result
|
||||
*/
|
||||
private formatHistoryResult(data: any[], countback?: number): HistoryResult {
|
||||
if (data.length === 0) {
|
||||
return {
|
||||
bars: [],
|
||||
noData: true,
|
||||
};
|
||||
}
|
||||
|
||||
// Convert to TradingView format
|
||||
let bars: TradingViewBar[] = data.map(backendToTradingView);
|
||||
|
||||
// Sort by time
|
||||
bars.sort((a, b) => a.time - b.time);
|
||||
|
||||
// Apply countback limit if specified
|
||||
if (countback && bars.length > countback) {
|
||||
bars = bars.slice(-countback);
|
||||
}
|
||||
|
||||
return {
|
||||
bars,
|
||||
noData: false,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get datafeed configuration
|
||||
*/
|
||||
async getConfig(): Promise<DatafeedConfig> {
|
||||
return {
|
||||
supported_resolutions: DEFAULT_SUPPORTED_RESOLUTIONS,
|
||||
supports_search: true,
|
||||
supports_group_request: false,
|
||||
supports_marks: false,
|
||||
supports_timescale_marks: false,
|
||||
supports_time: false,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Search symbols
|
||||
*
|
||||
* For now, stub with default symbol
|
||||
*/
|
||||
async searchSymbols(
|
||||
query: string,
|
||||
type?: string,
|
||||
exchange?: string,
|
||||
limit: number = 30
|
||||
): Promise<SearchResult[]> {
|
||||
this.logger.debug({ query, type, exchange, limit }, 'Searching symbols');
|
||||
|
||||
// TODO: Implement central symbol registry
|
||||
// For now, return default symbol if query matches
|
||||
if (query.toLowerCase().includes('btc') || query.toLowerCase().includes('binance')) {
|
||||
return [{
|
||||
symbol: 'BINANCE:BTC/USDT',
|
||||
full_name: 'BINANCE:BTC/USDT',
|
||||
description: 'Bitcoin / Tether USD',
|
||||
exchange: 'BINANCE',
|
||||
ticker: 'BINANCE:BTC/USDT',
|
||||
type: 'crypto',
|
||||
}];
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve symbol metadata
|
||||
*
|
||||
* For now, stub with default symbol
|
||||
*/
|
||||
async resolveSymbol(symbol: string): Promise<SymbolInfo> {
|
||||
this.logger.debug({ symbol }, 'Resolving symbol');
|
||||
|
||||
// TODO: Implement central symbol registry
|
||||
// For now, return default symbol info for BINANCE:BTC/USDT
|
||||
if (symbol === 'BINANCE:BTC/USDT' || symbol === 'BTC/USDT') {
|
||||
return {
|
||||
symbol: 'BINANCE:BTC/USDT',
|
||||
name: 'BINANCE:BTC/USDT',
|
||||
ticker: 'BINANCE:BTC/USDT',
|
||||
description: 'Bitcoin / Tether USD',
|
||||
type: 'crypto',
|
||||
session: '24x7',
|
||||
timezone: 'Etc/UTC',
|
||||
exchange: 'BINANCE',
|
||||
minmov: 1,
|
||||
pricescale: 100,
|
||||
has_intraday: true,
|
||||
has_daily: true,
|
||||
has_weekly_and_monthly: false,
|
||||
supported_resolutions: DEFAULT_SUPPORTED_RESOLUTIONS,
|
||||
data_status: 'streaming',
|
||||
};
|
||||
}
|
||||
|
||||
throw new Error(`Symbol not found: ${symbol}`);
|
||||
}
|
||||
}
|
||||
266
gateway/src/services/symbol-index-service.ts
Normal file
266
gateway/src/services/symbol-index-service.ts
Normal file
@@ -0,0 +1,266 @@
|
||||
/**
|
||||
* Symbol Index Service
|
||||
*
|
||||
* Provides fast in-memory search for symbol metadata.
|
||||
* Loads initial data from Iceberg and stays synced via Kafka subscription.
|
||||
*/
|
||||
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { IcebergClient } from '../clients/iceberg-client.js';
|
||||
import type { SearchResult, SymbolInfo, SymbolMetadata } from '../types/ohlc.js';
|
||||
import { DEFAULT_SUPPORTED_RESOLUTIONS } from '../types/ohlc.js';
|
||||
|
||||
export interface SymbolIndexServiceConfig {
|
||||
icebergClient: IcebergClient;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Symbol Index Service
|
||||
*
|
||||
* Maintains an in-memory index of all available symbols for fast search.
|
||||
*/
|
||||
export class SymbolIndexService {
|
||||
private icebergClient: IcebergClient;
|
||||
private logger: FastifyBaseLogger;
|
||||
private symbols: Map<string, SymbolMetadata> = new Map(); // key: "EXCHANGE:MARKET_ID"
|
||||
private initialized: boolean = false;
|
||||
|
||||
constructor(config: SymbolIndexServiceConfig) {
|
||||
this.icebergClient = config.icebergClient;
|
||||
this.logger = config.logger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the index by loading symbols from Iceberg
|
||||
*/
|
||||
async initialize(): Promise<void> {
|
||||
this.logger.info('Initializing symbol index from Iceberg');
|
||||
|
||||
try {
|
||||
// Load all symbols from Iceberg symbol_metadata table
|
||||
const symbols = await this.icebergClient.queryAllSymbols();
|
||||
|
||||
this.logger.info({
|
||||
symbolsType: typeof symbols,
|
||||
symbolsIsArray: Array.isArray(symbols),
|
||||
symbolsLength: symbols?.length,
|
||||
firstSymbol: symbols[0]
|
||||
}, 'Loaded symbols debug info');
|
||||
|
||||
// Track unique keys for debugging
|
||||
const uniqueKeys = new Set<string>();
|
||||
|
||||
for (const symbol of symbols) {
|
||||
const key = `${symbol.exchange_id}:${symbol.market_id}`;
|
||||
uniqueKeys.add(key);
|
||||
this.symbols.set(key, symbol);
|
||||
}
|
||||
|
||||
this.initialized = true;
|
||||
this.logger.info({
|
||||
count: this.symbols.size,
|
||||
totalRows: symbols.length,
|
||||
uniqueKeys: uniqueKeys.size,
|
||||
sampleKeys: Array.from(uniqueKeys).slice(0, 5)
|
||||
}, 'Symbol index initialized');
|
||||
} catch (error: any) {
|
||||
this.logger.warn({ error: error.message }, 'Failed to initialize symbol index (will retry on first request)');
|
||||
// Don't throw - allow lazy loading
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure index is initialized (with retry on failure)
|
||||
*/
|
||||
private async ensureInitialized(): Promise<void> {
|
||||
if (this.initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.info('Lazy-loading symbol index');
|
||||
await this.initialize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update or add a symbol to the index
|
||||
*/
|
||||
updateSymbol(symbol: SymbolMetadata): void {
|
||||
const key = `${symbol.exchange_id}:${symbol.market_id}`;
|
||||
this.symbols.set(key, symbol);
|
||||
this.logger.debug({ key }, 'Updated symbol in index');
|
||||
}
|
||||
|
||||
/**
|
||||
* Search for symbols matching a query
|
||||
*
|
||||
* Simple case-insensitive substring matching across:
|
||||
* - Ticker (EXCHANGE:MARKET_ID)
|
||||
* - Base asset
|
||||
* - Quote asset
|
||||
* - Description
|
||||
*/
|
||||
async search(query: string, limit: number = 30): Promise<SearchResult[]> {
|
||||
await this.ensureInitialized();
|
||||
|
||||
if (!this.initialized) {
|
||||
this.logger.warn('Symbol index not initialized, returning empty results');
|
||||
return [];
|
||||
}
|
||||
|
||||
const queryLower = query.toLowerCase();
|
||||
const results: SearchResult[] = [];
|
||||
|
||||
for (const [key, metadata] of this.symbols) {
|
||||
// Match against various fields
|
||||
const ticker = key;
|
||||
const base = metadata.base_asset || '';
|
||||
const quote = metadata.quote_asset || '';
|
||||
const desc = metadata.description || '';
|
||||
const marketId = metadata.market_id || '';
|
||||
|
||||
if (
|
||||
ticker.toLowerCase().includes(queryLower) ||
|
||||
base.toLowerCase().includes(queryLower) ||
|
||||
quote.toLowerCase().includes(queryLower) ||
|
||||
desc.toLowerCase().includes(queryLower) ||
|
||||
marketId.toLowerCase().includes(queryLower)
|
||||
) {
|
||||
results.push(this.metadataToSearchResult(metadata));
|
||||
|
||||
if (results.length >= limit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.debug({ query, count: results.length }, 'Symbol search completed');
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve symbol metadata by ticker
|
||||
*/
|
||||
async resolveSymbol(ticker: string): Promise<SymbolInfo | null> {
|
||||
await this.ensureInitialized();
|
||||
|
||||
if (!this.initialized) {
|
||||
this.logger.warn('Symbol index not initialized after retry');
|
||||
return null;
|
||||
}
|
||||
|
||||
// ticker format: "EXCHANGE:MARKET_ID" or just "MARKET_ID"
|
||||
let key = ticker;
|
||||
|
||||
// If no exchange prefix, search for first match
|
||||
if (!ticker.includes(':')) {
|
||||
for (const [k, metadata] of this.symbols) {
|
||||
if (metadata.market_id === ticker) {
|
||||
key = k;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const metadata = this.symbols.get(key);
|
||||
if (!metadata) {
|
||||
this.logger.debug({ ticker }, 'Symbol not found');
|
||||
return null;
|
||||
}
|
||||
|
||||
return this.metadataToSymbolInfo(metadata);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert SymbolMetadata to SearchResult
|
||||
*/
|
||||
private metadataToSearchResult(metadata: SymbolMetadata): SearchResult {
|
||||
const symbol = metadata.market_id; // Clean format: "BTC/USDT"
|
||||
const ticker = `${metadata.exchange_id}:${metadata.market_id}`; // "BINANCE:BTC/USDT"
|
||||
const fullName = `${metadata.market_id} (${metadata.exchange_id})`;
|
||||
|
||||
return {
|
||||
symbol,
|
||||
ticker,
|
||||
full_name: fullName,
|
||||
description: metadata.description || `${metadata.base_asset}/${metadata.quote_asset} ${metadata.market_type || 'spot'} pair on ${metadata.exchange_id}`,
|
||||
exchange: metadata.exchange_id,
|
||||
type: metadata.market_type || 'spot',
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert SymbolMetadata to SymbolInfo
|
||||
*/
|
||||
private metadataToSymbolInfo(metadata: SymbolMetadata): SymbolInfo {
|
||||
const symbol = metadata.market_id;
|
||||
const ticker = `${metadata.exchange_id}:${metadata.market_id}`;
|
||||
|
||||
// Convert supported_period_seconds to resolution strings
|
||||
const supportedResolutions = this.periodSecondsToResolutions(metadata.supported_period_seconds || []);
|
||||
|
||||
// Calculate pricescale from tick_denom
|
||||
// tick_denom is 10^n where n is the number of decimal places
|
||||
// pricescale is the same value
|
||||
const pricescale = metadata.tick_denom ? Number(metadata.tick_denom) : 100;
|
||||
|
||||
return {
|
||||
symbol,
|
||||
ticker,
|
||||
name: symbol,
|
||||
description: metadata.description || `${metadata.base_asset}/${metadata.quote_asset} ${metadata.market_type || 'spot'} pair on ${metadata.exchange_id}`,
|
||||
type: metadata.market_type || 'spot',
|
||||
exchange: metadata.exchange_id,
|
||||
timezone: 'Etc/UTC',
|
||||
session: '24x7',
|
||||
supported_resolutions: supportedResolutions.length > 0 ? supportedResolutions : DEFAULT_SUPPORTED_RESOLUTIONS,
|
||||
has_intraday: true,
|
||||
has_daily: true,
|
||||
has_weekly_and_monthly: false,
|
||||
pricescale,
|
||||
minmov: 1,
|
||||
base_currency: metadata.base_asset,
|
||||
quote_currency: metadata.quote_asset,
|
||||
data_status: 'streaming',
|
||||
tick_denominator: metadata.tick_denom ? Number(metadata.tick_denom) : undefined,
|
||||
base_denominator: metadata.base_denom ? Number(metadata.base_denom) : undefined,
|
||||
quote_denominator: metadata.quote_denom ? Number(metadata.quote_denom) : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert period_seconds array to TradingView resolution strings
|
||||
*/
|
||||
private periodSecondsToResolutions(periods: number[]): string[] {
|
||||
const resolutions: string[] = [];
|
||||
|
||||
for (const seconds of periods) {
|
||||
if (seconds < 3600) {
|
||||
// Minutes
|
||||
resolutions.push(String(seconds / 60));
|
||||
} else if (seconds === 3600) {
|
||||
resolutions.push('60');
|
||||
} else if (seconds === 14400) {
|
||||
resolutions.push('240');
|
||||
} else if (seconds === 86400) {
|
||||
resolutions.push('1D');
|
||||
} else if (seconds === 604800) {
|
||||
resolutions.push('1W');
|
||||
} else if (seconds === 2592000) {
|
||||
resolutions.push('1M');
|
||||
}
|
||||
}
|
||||
|
||||
return resolutions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get statistics about the symbol index
|
||||
*/
|
||||
getStats() {
|
||||
return {
|
||||
symbolCount: this.symbols.size,
|
||||
initialized: this.initialized,
|
||||
};
|
||||
}
|
||||
}
|
||||
221
gateway/src/types/ohlc.ts
Normal file
221
gateway/src/types/ohlc.ts
Normal file
@@ -0,0 +1,221 @@
|
||||
/**
|
||||
* OHLC data types and utilities
|
||||
*
|
||||
* Handles conversion between:
|
||||
* - TradingView datafeed format (seconds, OHLCV structure)
|
||||
* - Backend/Iceberg format (microseconds, ticker prefix)
|
||||
* - ZMQ protocol format (protobuf messages)
|
||||
*/
|
||||
|
||||
/**
|
||||
* TradingView bar format (used by web frontend)
|
||||
*/
|
||||
export interface TradingViewBar {
|
||||
time: number; // Unix timestamp in SECONDS
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Backend OHLC format (from Iceberg)
|
||||
*/
|
||||
export interface BackendOHLC {
|
||||
timestamp: number; // Unix timestamp in MICROSECONDS
|
||||
ticker: string;
|
||||
period_seconds: number;
|
||||
open: number;
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Datafeed configuration (TradingView format)
|
||||
*/
|
||||
export interface DatafeedConfig {
|
||||
supported_resolutions: string[];
|
||||
supports_search: boolean;
|
||||
supports_group_request: boolean;
|
||||
supports_marks: boolean;
|
||||
supports_timescale_marks: boolean;
|
||||
supports_time: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Symbol info (TradingView format)
|
||||
* Matches backend.old/src/datasource/schema.py SymbolInfo
|
||||
*/
|
||||
export interface SymbolInfo {
|
||||
symbol: string; // Clean format (e.g., "BTC/USDT")
|
||||
ticker: string; // With exchange prefix (e.g., "BINANCE:BTC/USDT")
|
||||
name: string; // Display name
|
||||
description: string; // Human-readable description
|
||||
type: string; // "crypto", "spot", "futures", etc.
|
||||
exchange: string; // Exchange identifier
|
||||
timezone: string; // IANA timezone
|
||||
session: string; // Trading session (e.g., "24x7")
|
||||
supported_resolutions: string[]; // Supported time resolutions
|
||||
has_intraday: boolean;
|
||||
has_daily: boolean;
|
||||
has_weekly_and_monthly: boolean;
|
||||
pricescale: number; // Price scale factor
|
||||
minmov: number; // Minimum price movement
|
||||
base_currency?: string; // Base asset (e.g., "BTC")
|
||||
quote_currency?: string; // Quote asset (e.g., "USDT")
|
||||
data_status?: string; // "streaming", "delayed", etc.
|
||||
tick_denominator?: number; // Denominator for price scaling (e.g., 1e6)
|
||||
base_denominator?: number; // Denominator for base asset
|
||||
quote_denominator?: number; // Denominator for quote asset
|
||||
}
|
||||
|
||||
/**
|
||||
* History result (TradingView format)
|
||||
*/
|
||||
export interface HistoryResult {
|
||||
bars: TradingViewBar[];
|
||||
noData: boolean;
|
||||
nextTime?: number; // Unix timestamp in SECONDS for pagination
|
||||
}
|
||||
|
||||
/**
|
||||
* Search result (TradingView format)
|
||||
* Matches backend.old/src/datasource/schema.py SearchResult
|
||||
*/
|
||||
export interface SearchResult {
|
||||
symbol: string; // Clean format (e.g., "BTC/USDT")
|
||||
ticker: string; // With exchange prefix for routing (e.g., "BINANCE:BTC/USDT")
|
||||
full_name: string; // Full display name (e.g., "BTC/USDT (BINANCE)")
|
||||
description: string; // Human-readable description
|
||||
exchange: string; // Exchange identifier
|
||||
type: string; // Instrument type ("spot", "futures", etc.)
|
||||
}
|
||||
|
||||
/**
|
||||
* ZMQ Protocol Messages (simplified TypeScript representations)
|
||||
*/
|
||||
|
||||
export enum SubmitStatus {
|
||||
QUEUED = 0,
|
||||
DUPLICATE = 1,
|
||||
INVALID = 2,
|
||||
ERROR = 3,
|
||||
}
|
||||
|
||||
export enum NotificationStatus {
|
||||
OK = 0,
|
||||
NOT_FOUND = 1,
|
||||
ERROR = 2,
|
||||
TIMEOUT = 3,
|
||||
}
|
||||
|
||||
export interface SubmitHistoricalRequest {
|
||||
request_id: string;
|
||||
ticker: string;
|
||||
start_time: bigint; // microseconds
|
||||
end_time: bigint; // microseconds
|
||||
period_seconds: number;
|
||||
limit?: number;
|
||||
client_id?: string;
|
||||
}
|
||||
|
||||
export interface SubmitResponse {
|
||||
request_id: string;
|
||||
status: SubmitStatus;
|
||||
error_message?: string;
|
||||
notification_topic: string;
|
||||
}
|
||||
|
||||
export interface HistoryReadyNotification {
|
||||
request_id: string;
|
||||
ticker: string;
|
||||
period_seconds: number;
|
||||
start_time: bigint; // microseconds
|
||||
end_time: bigint; // microseconds
|
||||
status: NotificationStatus;
|
||||
error_message?: string;
|
||||
iceberg_namespace: string;
|
||||
iceberg_table: string;
|
||||
row_count: number;
|
||||
completed_at: bigint; // microseconds
|
||||
}
|
||||
|
||||
/**
|
||||
* Conversion utilities
|
||||
*/
|
||||
|
||||
export function secondsToMicros(seconds: number): bigint {
|
||||
return BigInt(Math.floor(seconds)) * 1000000n;
|
||||
}
|
||||
|
||||
export function microsToSeconds(micros: bigint | number): number {
|
||||
// Integer division: convert microseconds to seconds (truncates to integer)
|
||||
return Number(BigInt(micros) / 1000000n);
|
||||
}
|
||||
|
||||
export function backendToTradingView(backend: BackendOHLC): TradingViewBar {
|
||||
return {
|
||||
time: microsToSeconds(backend.timestamp),
|
||||
open: backend.open,
|
||||
high: backend.high,
|
||||
low: backend.low,
|
||||
close: backend.close,
|
||||
volume: backend.volume,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert TradingView resolution to seconds
|
||||
* Examples: "1" -> 60, "5" -> 300, "60" -> 3600, "1D" -> 86400
|
||||
*/
|
||||
export function resolutionToSeconds(resolution: string): number {
|
||||
// Handle numeric resolutions (minutes)
|
||||
if (/^\d+$/.test(resolution)) {
|
||||
return parseInt(resolution, 10) * 60;
|
||||
}
|
||||
|
||||
// Handle day/week/month resolutions
|
||||
if (resolution.endsWith('D')) {
|
||||
const days = parseInt(resolution.slice(0, -1), 10);
|
||||
return days * 86400;
|
||||
}
|
||||
if (resolution.endsWith('W')) {
|
||||
const weeks = parseInt(resolution.slice(0, -1), 10);
|
||||
return weeks * 7 * 86400;
|
||||
}
|
||||
if (resolution.endsWith('M')) {
|
||||
const months = parseInt(resolution.slice(0, -1), 10);
|
||||
return months * 30 * 86400; // Approximate
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported resolution: ${resolution}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Default supported resolutions
|
||||
*/
|
||||
export const DEFAULT_SUPPORTED_RESOLUTIONS = [
|
||||
'1', '5', '15', '30', '60', '240', '1D', '1W'
|
||||
];
|
||||
|
||||
/**
|
||||
* Symbol metadata from Iceberg (backend format)
|
||||
* Maps to Market protobuf and Iceberg symbol_metadata table
|
||||
*/
|
||||
export interface SymbolMetadata {
|
||||
exchange_id: string;
|
||||
market_id: string;
|
||||
market_type?: string;
|
||||
description?: string;
|
||||
base_asset?: string;
|
||||
quote_asset?: string;
|
||||
tick_denom?: bigint;
|
||||
base_denom?: bigint;
|
||||
quote_denom?: bigint;
|
||||
supported_period_seconds?: number[];
|
||||
earliest_time?: bigint;
|
||||
updated_at: bigint;
|
||||
}
|
||||
@@ -32,11 +32,18 @@ export const UserLicenseSchema = z.object({
|
||||
maxTokensPerMessage: z.number(),
|
||||
rateLimitPerMinute: z.number(),
|
||||
}),
|
||||
mcpServerUrl: z.string().url(),
|
||||
mcpServerUrl: z.string(), // Allow any string including 'pending', URL validation happens later
|
||||
preferredModel: ModelPreferenceSchema.optional(),
|
||||
expiresAt: z.date().optional(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date(),
|
||||
expiresAt: z.union([z.date(), z.string(), z.null()]).optional().transform(val => {
|
||||
if (!val || val === null) return undefined;
|
||||
return val instanceof Date ? val : new Date(val);
|
||||
}),
|
||||
createdAt: z.union([z.date(), z.string()]).transform(val =>
|
||||
val instanceof Date ? val : new Date(val)
|
||||
),
|
||||
updatedAt: z.union([z.date(), z.string()]).transform(val =>
|
||||
val instanceof Date ? val : new Date(val)
|
||||
),
|
||||
});
|
||||
|
||||
export type UserLicense = z.infer<typeof UserLicenseSchema>;
|
||||
|
||||
190
gateway/src/workspace/container-sync.ts
Normal file
190
gateway/src/workspace/container-sync.ts
Normal file
@@ -0,0 +1,190 @@
|
||||
/**
|
||||
* Container Sync
|
||||
*
|
||||
* Handles synchronization of persistent workspace stores with the user container
|
||||
* via MCP tools. Persistent stores (chartStore, userPreferences, etc.) are
|
||||
* stored in the container and loaded/saved via MCP tool calls.
|
||||
*
|
||||
* Container-side storage: /data/workspace/{store_name}.json
|
||||
*
|
||||
* MCP Tools used:
|
||||
* - workspace_read(store_name) -> dict
|
||||
* - workspace_write(store_name, data) -> None
|
||||
* - workspace_patch(store_name, patch) -> dict (new state)
|
||||
*/
|
||||
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { Operation as JsonPatchOp } from 'fast-json-patch';
|
||||
import type { MCPClientConnector } from '../harness/mcp-client.js';
|
||||
|
||||
/**
|
||||
* Result of loading a store from the container.
|
||||
*/
|
||||
export interface LoadResult {
|
||||
exists: boolean;
|
||||
state?: unknown;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of saving a store to the container.
|
||||
*/
|
||||
export interface SaveResult {
|
||||
success: boolean;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of patching a store in the container.
|
||||
*/
|
||||
export interface PatchResult {
|
||||
success: boolean;
|
||||
newState?: unknown;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles synchronization with the user's container via MCP.
|
||||
*/
|
||||
export class ContainerSync {
|
||||
private mcpClient: MCPClientConnector;
|
||||
private logger: FastifyBaseLogger;
|
||||
|
||||
constructor(mcpClient: MCPClientConnector, logger: FastifyBaseLogger) {
|
||||
this.mcpClient = mcpClient;
|
||||
this.logger = logger.child({ component: 'ContainerSync' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a workspace store from the container.
|
||||
* Returns the stored state or indicates the store doesn't exist.
|
||||
*/
|
||||
async loadStore(storeName: string): Promise<LoadResult> {
|
||||
if (!this.mcpClient.isConnected()) {
|
||||
this.logger.warn({ store: storeName }, 'MCP client not connected, cannot load store');
|
||||
return { exists: false, error: 'MCP client not connected' };
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.debug({ store: storeName }, 'Loading store from container');
|
||||
|
||||
const result = (await this.mcpClient.callTool('workspace_read', {
|
||||
store_name: storeName,
|
||||
})) as { exists: boolean; data?: unknown; error?: string };
|
||||
|
||||
if (result.error) {
|
||||
this.logger.warn({ store: storeName, error: result.error }, 'Container returned error');
|
||||
return { exists: false, error: result.error };
|
||||
}
|
||||
|
||||
if (!result.exists) {
|
||||
this.logger.debug({ store: storeName }, 'Store does not exist in container');
|
||||
return { exists: false };
|
||||
}
|
||||
|
||||
this.logger.debug({ store: storeName }, 'Loaded store from container');
|
||||
return { exists: true, state: result.data };
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
this.logger.error({ store: storeName, error: message }, 'Failed to load store from container');
|
||||
return { exists: false, error: message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a workspace store to the container.
|
||||
* Overwrites any existing state.
|
||||
*/
|
||||
async saveStore(storeName: string, state: unknown): Promise<SaveResult> {
|
||||
if (!this.mcpClient.isConnected()) {
|
||||
this.logger.warn({ store: storeName }, 'MCP client not connected, cannot save store');
|
||||
return { success: false, error: 'MCP client not connected' };
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.debug({ store: storeName }, 'Saving store to container');
|
||||
|
||||
const result = (await this.mcpClient.callTool('workspace_write', {
|
||||
store_name: storeName,
|
||||
data: state,
|
||||
})) as { success: boolean; error?: string };
|
||||
|
||||
if (result.error || !result.success) {
|
||||
this.logger.warn({ store: storeName, error: result.error }, 'Failed to save store');
|
||||
return { success: false, error: result.error || 'Unknown error' };
|
||||
}
|
||||
|
||||
this.logger.debug({ store: storeName }, 'Saved store to container');
|
||||
return { success: true };
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
this.logger.error({ store: storeName, error: message }, 'Failed to save store to container');
|
||||
return { success: false, error: message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a JSON patch to a store in the container.
|
||||
* Returns the new state after applying the patch.
|
||||
*/
|
||||
async patchStore(storeName: string, patch: JsonPatchOp[]): Promise<PatchResult> {
|
||||
if (!this.mcpClient.isConnected()) {
|
||||
this.logger.warn({ store: storeName }, 'MCP client not connected, cannot patch store');
|
||||
return { success: false, error: 'MCP client not connected' };
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.debug({ store: storeName, patchOps: patch.length }, 'Patching store in container');
|
||||
|
||||
const result = (await this.mcpClient.callTool('workspace_patch', {
|
||||
store_name: storeName,
|
||||
patch,
|
||||
})) as { success: boolean; data?: unknown; error?: string };
|
||||
|
||||
if (result.error || !result.success) {
|
||||
this.logger.warn({ store: storeName, error: result.error }, 'Failed to patch store');
|
||||
return { success: false, error: result.error || 'Unknown error' };
|
||||
}
|
||||
|
||||
this.logger.debug({ store: storeName }, 'Patched store in container');
|
||||
return { success: true, newState: result.data };
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
this.logger.error({ store: storeName, error: message }, 'Failed to patch store in container');
|
||||
return { success: false, error: message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load all persistent stores from the container.
|
||||
* Returns a map of store name -> state.
|
||||
*/
|
||||
async loadAllStores(storeNames: string[]): Promise<Map<string, unknown>> {
|
||||
const states = new Map<string, unknown>();
|
||||
|
||||
for (const storeName of storeNames) {
|
||||
const result = await this.loadStore(storeName);
|
||||
if (result.exists && result.state !== undefined) {
|
||||
states.set(storeName, result.state);
|
||||
}
|
||||
}
|
||||
|
||||
return states;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save all persistent stores to the container.
|
||||
*/
|
||||
async saveAllStores(stores: Map<string, unknown>): Promise<void> {
|
||||
for (const [storeName, state] of stores) {
|
||||
await this.saveStore(storeName, state);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if MCP client is connected.
|
||||
*/
|
||||
isConnected(): boolean {
|
||||
return this.mcpClient.isConnected();
|
||||
}
|
||||
}
|
||||
86
gateway/src/workspace/index.ts
Normal file
86
gateway/src/workspace/index.ts
Normal file
@@ -0,0 +1,86 @@
|
||||
/**
|
||||
* Workspace Module
|
||||
*
|
||||
* Provides two-way state synchronization between web clients, gateway, and user containers.
|
||||
*
|
||||
* Key components:
|
||||
* - WorkspaceManager: Per-session state manager with channel-agnostic interface
|
||||
* - SyncRegistry: Handles JSON patch sync protocol
|
||||
* - ContainerSync: Persists state to user containers via MCP
|
||||
*
|
||||
* Usage:
|
||||
* ```typescript
|
||||
* import { WorkspaceManager, ContainerSync, DEFAULT_STORES } from './workspace/index.js';
|
||||
*
|
||||
* // Create container sync (optional, for persistent stores)
|
||||
* const containerSync = new ContainerSync(mcpClient, logger);
|
||||
*
|
||||
* // Create workspace manager for session
|
||||
* const workspace = new WorkspaceManager({
|
||||
* userId: 'user-123',
|
||||
* sessionId: 'session-456',
|
||||
* stores: DEFAULT_STORES,
|
||||
* containerSync,
|
||||
* logger,
|
||||
* });
|
||||
*
|
||||
* // Initialize (loads persistent stores from container)
|
||||
* await workspace.initialize();
|
||||
*
|
||||
* // Attach channel adapter
|
||||
* workspace.setAdapter({
|
||||
* sendSnapshot: (msg) => socket.send(JSON.stringify(msg)),
|
||||
* sendPatch: (msg) => socket.send(JSON.stringify(msg)),
|
||||
* getCapabilities: () => ({ supportsSync: true, ... }),
|
||||
* });
|
||||
*
|
||||
* // Handle sync messages from client
|
||||
* workspace.handleHello(clientSeqs);
|
||||
* workspace.handlePatch(storeName, seq, patch);
|
||||
*
|
||||
* // Access state
|
||||
* const chartState = workspace.getState('chartState');
|
||||
* await workspace.setState('chartState', newState);
|
||||
*
|
||||
* // Register triggers (future use)
|
||||
* const unsub = workspace.onPathChange('/chartState/symbol', (old, new, ctx) => {
|
||||
* console.log('Symbol changed:', old, '->', new);
|
||||
* });
|
||||
*
|
||||
* // Cleanup
|
||||
* await workspace.shutdown();
|
||||
* ```
|
||||
*/
|
||||
|
||||
// Types
|
||||
export type {
|
||||
SnapshotMessage,
|
||||
PatchMessage,
|
||||
HelloMessage,
|
||||
InboundSyncMessage,
|
||||
OutboundSyncMessage,
|
||||
StoreConfig,
|
||||
ChannelAdapter,
|
||||
ChannelCapabilities,
|
||||
PathTrigger,
|
||||
PathTriggerHandler,
|
||||
PathTriggerContext,
|
||||
ChartState,
|
||||
ChartStore,
|
||||
ChannelState,
|
||||
ChannelInfo,
|
||||
WorkspaceStores,
|
||||
} from './types.js';
|
||||
|
||||
export { DEFAULT_STORES } from './types.js';
|
||||
|
||||
// Sync registry
|
||||
export { SyncRegistry } from './sync-registry.js';
|
||||
|
||||
// Container sync
|
||||
export { ContainerSync } from './container-sync.js';
|
||||
export type { LoadResult, SaveResult, PatchResult } from './container-sync.js';
|
||||
|
||||
// Workspace manager
|
||||
export { WorkspaceManager } from './workspace-manager.js';
|
||||
export type { WorkspaceManagerConfig } from './workspace-manager.js';
|
||||
407
gateway/src/workspace/sync-registry.ts
Normal file
407
gateway/src/workspace/sync-registry.ts
Normal file
@@ -0,0 +1,407 @@
|
||||
/**
|
||||
* Sync Registry
|
||||
*
|
||||
* Manages synchronized state stores with JSON patch-based updates.
|
||||
* Ported from backend.old/src/sync/registry.py.
|
||||
*
|
||||
* Key features:
|
||||
* - Sequence-numbered patches for reliable sync
|
||||
* - History buffer for catchup patches
|
||||
* - Conflict resolution (frontend wins)
|
||||
* - Optimistic updates with rollback on conflict
|
||||
*/
|
||||
|
||||
import type { Operation as JsonPatchOp } from 'fast-json-patch';
|
||||
import fastJsonPatch from 'fast-json-patch';
|
||||
const { applyPatch, compare: computePatch, deepClone } = fastJsonPatch;
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { SnapshotMessage, PatchMessage, StoreConfig } from './types.js';
|
||||
|
||||
/**
|
||||
* History entry: sequence number and the patch that produced it.
|
||||
*/
|
||||
interface HistoryEntry {
|
||||
seq: number;
|
||||
patch: JsonPatchOp[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Entry for a single synchronized store.
|
||||
*/
|
||||
class SyncEntry {
|
||||
readonly storeName: string;
|
||||
private state: unknown;
|
||||
private seq: number = 0;
|
||||
private lastSnapshot: unknown;
|
||||
private history: HistoryEntry[] = [];
|
||||
private readonly historySize: number;
|
||||
|
||||
constructor(storeName: string, initialState: unknown, historySize: number = 50) {
|
||||
this.storeName = storeName;
|
||||
this.state = deepClone(initialState);
|
||||
this.lastSnapshot = deepClone(initialState);
|
||||
this.historySize = historySize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current state (deep clone to prevent mutation).
|
||||
*/
|
||||
getState(): unknown {
|
||||
return deepClone(this.state);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current sequence number.
|
||||
*/
|
||||
getSeq(): number {
|
||||
return this.seq;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set state directly (used for loading from container).
|
||||
* Resets sequence to 0.
|
||||
*/
|
||||
setState(newState: unknown): void {
|
||||
this.state = deepClone(newState);
|
||||
this.lastSnapshot = deepClone(newState);
|
||||
this.seq = 0;
|
||||
this.history = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute patch from last snapshot to current state.
|
||||
* Returns null if no changes.
|
||||
*/
|
||||
computePatch(): JsonPatchOp[] | null {
|
||||
const currentState = deepClone(this.state);
|
||||
const patch = computePatch(this.lastSnapshot as any, currentState as any);
|
||||
return patch.length > 0 ? patch : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit a patch to history and update snapshot.
|
||||
*/
|
||||
commitPatch(patch: JsonPatchOp[]): void {
|
||||
this.seq += 1;
|
||||
this.history.push({ seq: this.seq, patch });
|
||||
|
||||
// Trim history if needed
|
||||
while (this.history.length > this.historySize) {
|
||||
this.history.shift();
|
||||
}
|
||||
|
||||
this.lastSnapshot = deepClone(this.state);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get catchup patches since a given sequence.
|
||||
* Returns null if catchup not possible (need full snapshot).
|
||||
*/
|
||||
getCatchupPatches(sinceSeq: number): HistoryEntry[] | null {
|
||||
if (sinceSeq === this.seq) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Check if we have all patches needed
|
||||
if (this.history.length === 0 || this.history[0].seq > sinceSeq + 1) {
|
||||
return null; // Need full snapshot
|
||||
}
|
||||
|
||||
return this.history.filter((entry) => entry.seq > sinceSeq);
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a patch to state (used when applying local changes).
|
||||
*/
|
||||
applyPatch(patch: JsonPatchOp[]): void {
|
||||
const result = applyPatch(deepClone(this.state), patch, false, false);
|
||||
this.state = result.newDocument;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply client patch with conflict resolution.
|
||||
* Returns the resolved state and any patches to send back.
|
||||
*/
|
||||
applyClientPatch(
|
||||
clientBaseSeq: number,
|
||||
patch: JsonPatchOp[],
|
||||
logger?: FastifyBaseLogger
|
||||
): { needsSnapshot: boolean; resolvedState?: unknown } {
|
||||
try {
|
||||
if (clientBaseSeq === this.seq) {
|
||||
// No conflict - apply directly
|
||||
const currentState = deepClone(this.state);
|
||||
const result = applyPatch(currentState, patch, false, false);
|
||||
this.state = result.newDocument;
|
||||
this.commitPatch(patch);
|
||||
logger?.debug(
|
||||
{ store: this.storeName, seq: this.seq },
|
||||
'Applied client patch without conflict'
|
||||
);
|
||||
return { needsSnapshot: false };
|
||||
}
|
||||
|
||||
if (clientBaseSeq < this.seq) {
|
||||
// Conflict! Frontend wins.
|
||||
logger?.debug(
|
||||
{ store: this.storeName, clientSeq: clientBaseSeq, serverSeq: this.seq },
|
||||
'Conflict detected, frontend wins'
|
||||
);
|
||||
|
||||
// Get backend patches since client's base
|
||||
const backendPatches: JsonPatchOp[][] = [];
|
||||
for (const entry of this.history) {
|
||||
if (entry.seq > clientBaseSeq) {
|
||||
backendPatches.push(entry.patch);
|
||||
}
|
||||
}
|
||||
|
||||
// Get paths modified by frontend
|
||||
const frontendPaths = new Set(patch.map((op) => op.path));
|
||||
|
||||
// Apply frontend patch first
|
||||
const currentState = deepClone(this.state);
|
||||
let newState: unknown;
|
||||
try {
|
||||
const result = applyPatch(currentState, patch, false, false);
|
||||
newState = result.newDocument;
|
||||
} catch (e) {
|
||||
logger?.warn(
|
||||
{ store: this.storeName, error: e },
|
||||
'Failed to apply client patch during conflict resolution'
|
||||
);
|
||||
return { needsSnapshot: true, resolvedState: this.state };
|
||||
}
|
||||
|
||||
// Re-apply backend patches that don't overlap with frontend
|
||||
for (const bPatch of backendPatches) {
|
||||
const filteredPatch = bPatch.filter((op) => !frontendPaths.has(op.path));
|
||||
if (filteredPatch.length > 0) {
|
||||
try {
|
||||
const result = applyPatch(deepClone(newState), filteredPatch, false, false);
|
||||
newState = result.newDocument;
|
||||
} catch (e) {
|
||||
logger?.debug(
|
||||
{ store: this.storeName, error: e },
|
||||
'Skipping backend patch during conflict resolution'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.state = newState;
|
||||
|
||||
// Compute final patch from last snapshot
|
||||
const finalPatch = computePatch(this.lastSnapshot as any, newState as any);
|
||||
if (finalPatch.length > 0) {
|
||||
this.commitPatch(finalPatch);
|
||||
}
|
||||
|
||||
// Send snapshot to converge
|
||||
return { needsSnapshot: true, resolvedState: this.state };
|
||||
}
|
||||
|
||||
// clientBaseSeq > this.seq - client is ahead, shouldn't happen
|
||||
logger?.warn(
|
||||
{ store: this.storeName, clientSeq: clientBaseSeq, serverSeq: this.seq },
|
||||
'Client ahead of server, sending snapshot'
|
||||
);
|
||||
return { needsSnapshot: true, resolvedState: this.state };
|
||||
} catch (e) {
|
||||
logger?.error(
|
||||
{ store: this.storeName, error: e },
|
||||
'Unexpected error applying client patch'
|
||||
);
|
||||
return { needsSnapshot: true, resolvedState: this.state };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Registry managing multiple synchronized stores.
|
||||
*/
|
||||
export class SyncRegistry {
|
||||
private entries = new Map<string, SyncEntry>();
|
||||
private logger?: FastifyBaseLogger;
|
||||
|
||||
constructor(logger?: FastifyBaseLogger) {
|
||||
this.logger = logger?.child({ component: 'SyncRegistry' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a store with initial state.
|
||||
*/
|
||||
register(config: StoreConfig): void {
|
||||
const entry = new SyncEntry(config.name, config.initialState());
|
||||
this.entries.set(config.name, entry);
|
||||
this.logger?.debug({ store: config.name }, 'Registered store');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a store is registered.
|
||||
*/
|
||||
has(storeName: string): boolean {
|
||||
return this.entries.has(storeName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current state of a store.
|
||||
*/
|
||||
getState<T = unknown>(storeName: string): T | undefined {
|
||||
const entry = this.entries.get(storeName);
|
||||
return entry?.getState() as T | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current sequence number for a store.
|
||||
*/
|
||||
getSeq(storeName: string): number {
|
||||
const entry = this.entries.get(storeName);
|
||||
return entry?.getSeq() ?? 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set state directly (used for loading from container).
|
||||
*/
|
||||
setState(storeName: string, state: unknown): void {
|
||||
const entry = this.entries.get(storeName);
|
||||
if (entry) {
|
||||
entry.setState(state);
|
||||
this.logger?.debug({ store: storeName }, 'Set store state');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update state locally and compute patch.
|
||||
* Returns the patch if state changed, null otherwise.
|
||||
*/
|
||||
updateState(storeName: string, updater: (state: unknown) => unknown): JsonPatchOp[] | null {
|
||||
const entry = this.entries.get(storeName);
|
||||
if (!entry) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const currentState = entry.getState();
|
||||
const newState = updater(currentState);
|
||||
|
||||
// Compute patch
|
||||
const patch = computePatch(currentState as any, newState as any);
|
||||
if (patch.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Apply and commit
|
||||
entry.applyPatch(patch);
|
||||
entry.commitPatch(patch);
|
||||
|
||||
this.logger?.debug(
|
||||
{ store: storeName, seq: entry.getSeq(), patchOps: patch.length },
|
||||
'Updated store state'
|
||||
);
|
||||
|
||||
return patch;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync client based on their reported sequences.
|
||||
* Returns messages to send (snapshots or patches).
|
||||
*/
|
||||
syncClient(clientSeqs: Record<string, number>): (SnapshotMessage | PatchMessage)[] {
|
||||
const messages: (SnapshotMessage | PatchMessage)[] = [];
|
||||
|
||||
for (const [storeName, entry] of this.entries) {
|
||||
const clientSeq = clientSeqs[storeName] ?? -1;
|
||||
const catchupPatches = entry.getCatchupPatches(clientSeq);
|
||||
|
||||
if (catchupPatches === null) {
|
||||
// Need full snapshot
|
||||
messages.push({
|
||||
type: 'snapshot',
|
||||
store: storeName,
|
||||
seq: entry.getSeq(),
|
||||
state: entry.getState(),
|
||||
});
|
||||
this.logger?.debug(
|
||||
{ store: storeName, clientSeq, serverSeq: entry.getSeq() },
|
||||
'Sending snapshot'
|
||||
);
|
||||
} else {
|
||||
// Send catchup patches
|
||||
for (const { seq, patch } of catchupPatches) {
|
||||
messages.push({
|
||||
type: 'patch',
|
||||
store: storeName,
|
||||
seq,
|
||||
patch,
|
||||
});
|
||||
}
|
||||
if (catchupPatches.length > 0) {
|
||||
this.logger?.debug(
|
||||
{ store: storeName, patchCount: catchupPatches.length },
|
||||
'Sending catchup patches'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a patch from the client.
|
||||
* Returns message to send back (snapshot if conflict, null otherwise).
|
||||
*/
|
||||
applyClientPatch(
|
||||
storeName: string,
|
||||
clientBaseSeq: number,
|
||||
patch: JsonPatchOp[]
|
||||
): SnapshotMessage | null {
|
||||
const entry = this.entries.get(storeName);
|
||||
if (!entry) {
|
||||
this.logger?.warn({ store: storeName }, 'Store not found');
|
||||
return null;
|
||||
}
|
||||
|
||||
const result = entry.applyClientPatch(clientBaseSeq, patch, this.logger);
|
||||
|
||||
if (result.needsSnapshot) {
|
||||
return {
|
||||
type: 'snapshot',
|
||||
store: storeName,
|
||||
seq: entry.getSeq(),
|
||||
state: result.resolvedState ?? entry.getState(),
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered store names.
|
||||
*/
|
||||
getStoreNames(): string[] {
|
||||
return Array.from(this.entries.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all current sequences (for persistence).
|
||||
*/
|
||||
getAllSeqs(): Record<string, number> {
|
||||
const seqs: Record<string, number> = {};
|
||||
for (const [name, entry] of this.entries) {
|
||||
seqs[name] = entry.getSeq();
|
||||
}
|
||||
return seqs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all current states (for persistence).
|
||||
*/
|
||||
getAllStates(): Record<string, unknown> {
|
||||
const states: Record<string, unknown> = {};
|
||||
for (const [name, entry] of this.entries) {
|
||||
states[name] = entry.getState();
|
||||
}
|
||||
return states;
|
||||
}
|
||||
}
|
||||
239
gateway/src/workspace/types.ts
Normal file
239
gateway/src/workspace/types.ts
Normal file
@@ -0,0 +1,239 @@
|
||||
/**
|
||||
* Workspace Sync Types
|
||||
*
|
||||
* Defines the protocol messages and abstractions for two-way state sync
|
||||
* between web clients, gateway, and user containers.
|
||||
*
|
||||
* The workspace is a unified namespace that:
|
||||
* - Syncs transient state (chartState) between client and gateway
|
||||
* - Syncs persistent state (chartStore) between client, gateway, and container
|
||||
* - Provides triggers for path changes (future use)
|
||||
* - Is channel-agnostic (works with WebSocket, Telegram, Slack, etc.)
|
||||
*/
|
||||
|
||||
import type { Operation as JsonPatchOp } from 'fast-json-patch';
|
||||
|
||||
// =============================================================================
|
||||
// Protocol Messages
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Full state snapshot for a store.
|
||||
* Sent when client connects or when catchup patches are unavailable.
|
||||
*/
|
||||
export interface SnapshotMessage {
|
||||
type: 'snapshot';
|
||||
store: string;
|
||||
seq: number;
|
||||
state: unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Incremental patch for a store.
|
||||
* Uses JSON Patch (RFC 6902) format.
|
||||
*/
|
||||
export interface PatchMessage {
|
||||
type: 'patch';
|
||||
store: string;
|
||||
seq: number;
|
||||
patch: JsonPatchOp[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Client hello message with current sequence numbers.
|
||||
* Sent on connect to request catchup patches or snapshots.
|
||||
*/
|
||||
export interface HelloMessage {
|
||||
type: 'hello';
|
||||
seqs: Record<string, number>;
|
||||
}
|
||||
|
||||
/** Messages from client to gateway */
|
||||
export type InboundSyncMessage = HelloMessage | PatchMessage;
|
||||
|
||||
/** Messages from gateway to client */
|
||||
export type OutboundSyncMessage = SnapshotMessage | PatchMessage;
|
||||
|
||||
// =============================================================================
|
||||
// Store Configuration
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Configuration for a workspace store.
|
||||
*/
|
||||
export interface StoreConfig {
|
||||
/** Unique store name (e.g., 'chartState', 'chartStore') */
|
||||
name: string;
|
||||
|
||||
/** If true, store is persisted to user container via MCP */
|
||||
persistent: boolean;
|
||||
|
||||
/** Factory function returning initial state for new sessions */
|
||||
initialState: () => unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default store configurations.
|
||||
* Additional stores can be registered at runtime.
|
||||
*/
|
||||
export const DEFAULT_STORES: StoreConfig[] = [
|
||||
{
|
||||
name: 'chartState',
|
||||
persistent: false,
|
||||
initialState: () => ({
|
||||
symbol: 'BINANCE:BTC/USDT',
|
||||
start_time: null,
|
||||
end_time: null,
|
||||
interval: '15',
|
||||
selected_shapes: [],
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: 'chartStore',
|
||||
persistent: true,
|
||||
initialState: () => ({
|
||||
drawings: {},
|
||||
templates: {},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: 'channelState',
|
||||
persistent: false,
|
||||
initialState: () => ({
|
||||
connected: {},
|
||||
}),
|
||||
},
|
||||
];
|
||||
|
||||
// =============================================================================
|
||||
// Channel Adapter Interface
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Capabilities that a channel may support.
|
||||
*/
|
||||
export interface ChannelCapabilities {
|
||||
/** Channel supports sync protocol (snapshot/patch messages) */
|
||||
supportsSync: boolean;
|
||||
|
||||
/** Channel supports sending images */
|
||||
supportsImages: boolean;
|
||||
|
||||
/** Channel supports markdown formatting */
|
||||
supportsMarkdown: boolean;
|
||||
|
||||
/** Channel supports streaming responses */
|
||||
supportsStreaming: boolean;
|
||||
|
||||
/** Channel supports TradingView chart embeds */
|
||||
supportsTradingViewEmbed: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapter interface for communication channels.
|
||||
* Implemented by WebSocket handler, Telegram handler, etc.
|
||||
*/
|
||||
export interface ChannelAdapter {
|
||||
/** Send a full state snapshot to the client */
|
||||
sendSnapshot(msg: SnapshotMessage): void;
|
||||
|
||||
/** Send an incremental patch to the client */
|
||||
sendPatch(msg: PatchMessage): void;
|
||||
|
||||
/** Get channel capabilities */
|
||||
getCapabilities(): ChannelCapabilities;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Path Triggers (Future Use)
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Trigger handler function type.
|
||||
* Called when a watched path changes.
|
||||
*/
|
||||
export type PathTriggerHandler = (
|
||||
oldValue: unknown,
|
||||
newValue: unknown,
|
||||
context: PathTriggerContext
|
||||
) => void | Promise<void>;
|
||||
|
||||
/**
|
||||
* Context passed to trigger handlers.
|
||||
*/
|
||||
export interface PathTriggerContext {
|
||||
/** Store name where change occurred */
|
||||
store: string;
|
||||
|
||||
/** Full path that changed (JSON pointer) */
|
||||
path: string;
|
||||
|
||||
/** Current sequence number after change */
|
||||
seq: number;
|
||||
|
||||
/** User ID for this workspace */
|
||||
userId: string;
|
||||
|
||||
/** Session ID for this workspace */
|
||||
sessionId: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Registered path trigger.
|
||||
*/
|
||||
export interface PathTrigger {
|
||||
/** JSON pointer path to watch (e.g., '/chartState/symbol') */
|
||||
path: string;
|
||||
|
||||
/** Handler called when path changes */
|
||||
handler: PathTriggerHandler;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Store State Types (for type-safe access)
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Chart state - transient, tracks current view.
|
||||
*/
|
||||
export interface ChartState {
|
||||
symbol: string;
|
||||
start_time: number | null;
|
||||
end_time: number | null;
|
||||
interval: string;
|
||||
selected_shapes: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Chart store - persistent, stores drawings and templates.
|
||||
*/
|
||||
export interface ChartStore {
|
||||
drawings: Record<string, unknown>;
|
||||
templates: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Channel state - transient, tracks connected channels.
|
||||
*/
|
||||
export interface ChannelState {
|
||||
connected: Record<string, ChannelInfo>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Information about a connected channel.
|
||||
*/
|
||||
export interface ChannelInfo {
|
||||
type: string;
|
||||
connectedAt: number;
|
||||
capabilities: ChannelCapabilities;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map of store names to their state types.
|
||||
*/
|
||||
export interface WorkspaceStores {
|
||||
chartState: ChartState;
|
||||
chartStore: ChartStore;
|
||||
channelState: ChannelState;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
460
gateway/src/workspace/workspace-manager.ts
Normal file
460
gateway/src/workspace/workspace-manager.ts
Normal file
@@ -0,0 +1,460 @@
|
||||
/**
|
||||
* Workspace Manager
|
||||
*
|
||||
* Central manager for workspace state synchronization across channels.
|
||||
* Provides a channel-agnostic interface for:
|
||||
* - Two-way sync of transient state (client ↔ gateway)
|
||||
* - Two-way sync of persistent state (client ↔ gateway ↔ container)
|
||||
* - Path-based change triggers (future use)
|
||||
*
|
||||
* Each user session gets one WorkspaceManager instance.
|
||||
* Multiple channels (WebSocket, Telegram, etc.) can attach to the same workspace.
|
||||
*/
|
||||
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { Operation as JsonPatchOp } from 'fast-json-patch';
|
||||
import { SyncRegistry } from './sync-registry.js';
|
||||
import type { ContainerSync } from './container-sync.js';
|
||||
import type {
|
||||
StoreConfig,
|
||||
ChannelAdapter,
|
||||
PathTrigger,
|
||||
PathTriggerHandler,
|
||||
PathTriggerContext,
|
||||
WorkspaceStores,
|
||||
} from './types.js';
|
||||
import { DEFAULT_STORES } from './types.js';
|
||||
|
||||
export interface WorkspaceManagerConfig {
|
||||
userId: string;
|
||||
sessionId: string;
|
||||
stores: StoreConfig[];
|
||||
containerSync?: ContainerSync;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Manages workspace state for a user session.
|
||||
*/
|
||||
export class WorkspaceManager {
|
||||
private userId: string;
|
||||
private sessionId: string;
|
||||
private registry: SyncRegistry;
|
||||
private containerSync?: ContainerSync;
|
||||
private logger: FastifyBaseLogger;
|
||||
private stores: StoreConfig[];
|
||||
|
||||
// Current channel adapter (WebSocket, Telegram, etc.)
|
||||
private adapter: ChannelAdapter | null = null;
|
||||
|
||||
// Path triggers for change notifications
|
||||
private triggers: PathTrigger[] = [];
|
||||
|
||||
// Track which stores are dirty (changed since last container sync)
|
||||
private dirtyStores = new Set<string>();
|
||||
|
||||
// Track initialization state
|
||||
private initialized = false;
|
||||
|
||||
constructor(config: WorkspaceManagerConfig) {
|
||||
this.userId = config.userId;
|
||||
this.sessionId = config.sessionId;
|
||||
this.stores = config.stores;
|
||||
this.containerSync = config.containerSync;
|
||||
this.logger = config.logger.child({ component: 'WorkspaceManager', sessionId: config.sessionId });
|
||||
|
||||
this.registry = new SyncRegistry(this.logger);
|
||||
|
||||
// Register all stores
|
||||
for (const store of this.stores) {
|
||||
this.registry.register(store);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize workspace - load persistent stores from container.
|
||||
*/
|
||||
async initialize(): Promise<void> {
|
||||
if (this.initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.info('Initializing workspace');
|
||||
|
||||
// Load persistent stores from container
|
||||
if (this.containerSync?.isConnected()) {
|
||||
const persistentStores = this.stores.filter((s) => s.persistent).map((s) => s.name);
|
||||
|
||||
if (persistentStores.length > 0) {
|
||||
this.logger.debug({ stores: persistentStores }, 'Loading persistent stores from container');
|
||||
|
||||
const states = await this.containerSync.loadAllStores(persistentStores);
|
||||
for (const [storeName, state] of states) {
|
||||
this.registry.setState(storeName, state);
|
||||
this.logger.debug({ store: storeName }, 'Loaded persistent store');
|
||||
}
|
||||
}
|
||||
} else {
|
||||
this.logger.debug('Container sync not available, using initial state for persistent stores');
|
||||
}
|
||||
|
||||
this.initialized = true;
|
||||
this.logger.info('Workspace initialized');
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown workspace - save dirty persistent stores to container.
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
if (!this.initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.info('Shutting down workspace');
|
||||
|
||||
// Save dirty persistent stores
|
||||
await this.saveDirtyStores();
|
||||
|
||||
this.adapter = null;
|
||||
this.initialized = false;
|
||||
this.logger.info('Workspace shut down');
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Channel Adapter Management
|
||||
// ===========================================================================
|
||||
|
||||
/**
|
||||
* Set the channel adapter for sending messages.
|
||||
* Only one adapter can be active at a time.
|
||||
*/
|
||||
setAdapter(adapter: ChannelAdapter): void {
|
||||
this.adapter = adapter;
|
||||
this.logger.debug('Channel adapter set');
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the channel adapter.
|
||||
*/
|
||||
clearAdapter(): void {
|
||||
this.adapter = null;
|
||||
this.logger.debug('Channel adapter cleared');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an adapter is connected.
|
||||
*/
|
||||
hasAdapter(): boolean {
|
||||
return this.adapter !== null;
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Sync Protocol Handlers (called by channel adapters)
|
||||
// ===========================================================================
|
||||
|
||||
/**
|
||||
* Handle hello message from client.
|
||||
* Sends snapshots or catchup patches for all stores.
|
||||
*/
|
||||
async handleHello(clientSeqs: Record<string, number>): Promise<void> {
|
||||
if (!this.adapter) {
|
||||
this.logger.warn('No adapter connected, cannot respond to hello');
|
||||
return;
|
||||
}
|
||||
|
||||
this.logger.debug({ clientSeqs }, 'Handling hello');
|
||||
|
||||
const messages = this.registry.syncClient(clientSeqs);
|
||||
|
||||
for (const msg of messages) {
|
||||
if (msg.type === 'snapshot') {
|
||||
this.adapter.sendSnapshot(msg);
|
||||
} else {
|
||||
this.adapter.sendPatch(msg);
|
||||
}
|
||||
}
|
||||
|
||||
this.logger.debug({ messageCount: messages.length }, 'Sent sync messages');
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle patch message from client.
|
||||
* Applies patch and may send snapshot back on conflict.
|
||||
*/
|
||||
async handlePatch(storeName: string, clientSeq: number, patch: JsonPatchOp[]): Promise<void> {
|
||||
this.logger.debug({ store: storeName, clientSeq, patchOps: patch.length }, 'Handling client patch');
|
||||
|
||||
// Get old state for triggers
|
||||
const oldState = this.registry.getState(storeName);
|
||||
|
||||
// Apply patch
|
||||
const response = this.registry.applyClientPatch(storeName, clientSeq, patch);
|
||||
|
||||
// Mark as dirty if persistent
|
||||
const storeConfig = this.stores.find((s) => s.name === storeName);
|
||||
if (storeConfig?.persistent) {
|
||||
this.dirtyStores.add(storeName);
|
||||
}
|
||||
|
||||
// Send response if needed
|
||||
if (response && this.adapter) {
|
||||
this.adapter.sendSnapshot(response);
|
||||
}
|
||||
|
||||
// Fire triggers
|
||||
const newState = this.registry.getState(storeName);
|
||||
await this.fireTriggers(storeName, oldState, newState, patch);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// State Access (for gateway code)
|
||||
// ===========================================================================
|
||||
|
||||
/**
|
||||
* Get current state of a store.
|
||||
*/
|
||||
getState<K extends keyof WorkspaceStores>(storeName: K): WorkspaceStores[K] | undefined;
|
||||
getState<T = unknown>(storeName: string): T | undefined;
|
||||
getState<T = unknown>(storeName: string): T | undefined {
|
||||
return this.registry.getState<T>(storeName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update state of a store and notify client.
|
||||
*/
|
||||
async setState(storeName: string, state: unknown): Promise<void> {
|
||||
// Get old state for triggers
|
||||
const oldState = this.registry.getState(storeName);
|
||||
|
||||
// Update state (this computes and commits a patch)
|
||||
const patch = this.registry.updateState(storeName, () => state);
|
||||
|
||||
if (patch) {
|
||||
// Mark as dirty if persistent
|
||||
const storeConfig = this.stores.find((s) => s.name === storeName);
|
||||
if (storeConfig?.persistent) {
|
||||
this.dirtyStores.add(storeName);
|
||||
}
|
||||
|
||||
// Send patch to client
|
||||
if (this.adapter) {
|
||||
this.adapter.sendPatch({
|
||||
type: 'patch',
|
||||
store: storeName,
|
||||
seq: this.registry.getSeq(storeName),
|
||||
patch,
|
||||
});
|
||||
}
|
||||
|
||||
// Fire triggers
|
||||
await this.fireTriggers(storeName, oldState, state, patch);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update state with a partial merge.
|
||||
*/
|
||||
async updateState<T extends Record<string, unknown>>(
|
||||
storeName: string,
|
||||
updates: Partial<T>
|
||||
): Promise<void> {
|
||||
const current = this.registry.getState<T>(storeName);
|
||||
if (current && typeof current === 'object') {
|
||||
await this.setState(storeName, { ...current, ...updates });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all store names.
|
||||
*/
|
||||
getStoreNames(): string[] {
|
||||
return this.registry.getStoreNames();
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Path Triggers
|
||||
// ===========================================================================
|
||||
|
||||
/**
|
||||
* Register a trigger for path changes.
|
||||
* Returns unsubscribe function.
|
||||
*/
|
||||
onPathChange(path: string, handler: PathTriggerHandler): () => void {
|
||||
const trigger: PathTrigger = { path, handler };
|
||||
this.triggers.push(trigger);
|
||||
|
||||
this.logger.debug({ path }, 'Registered path trigger');
|
||||
|
||||
return () => {
|
||||
const index = this.triggers.indexOf(trigger);
|
||||
if (index >= 0) {
|
||||
this.triggers.splice(index, 1);
|
||||
this.logger.debug({ path }, 'Unregistered path trigger');
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Fire triggers for paths affected by a patch.
|
||||
*/
|
||||
private async fireTriggers(
|
||||
storeName: string,
|
||||
oldState: unknown,
|
||||
newState: unknown,
|
||||
patch: JsonPatchOp[]
|
||||
): Promise<void> {
|
||||
if (this.triggers.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const context: PathTriggerContext = {
|
||||
store: storeName,
|
||||
path: '',
|
||||
seq: this.registry.getSeq(storeName),
|
||||
userId: this.userId,
|
||||
sessionId: this.sessionId,
|
||||
};
|
||||
|
||||
// Check each patch operation against triggers
|
||||
for (const op of patch) {
|
||||
const fullPath = `/${storeName}${op.path}`;
|
||||
|
||||
for (const trigger of this.triggers) {
|
||||
if (this.pathMatches(fullPath, trigger.path)) {
|
||||
context.path = fullPath;
|
||||
|
||||
// Extract old and new values at the path
|
||||
const oldValue = this.getValueAtPath(oldState, op.path);
|
||||
const newValue = this.getValueAtPath(newState, op.path);
|
||||
|
||||
try {
|
||||
await trigger.handler(oldValue, newValue, context);
|
||||
} catch (error) {
|
||||
this.logger.error(
|
||||
{ path: trigger.path, error },
|
||||
'Error in path trigger handler'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a path matches a trigger path pattern.
|
||||
* Currently supports exact match and prefix match with wildcard.
|
||||
*/
|
||||
private pathMatches(path: string, pattern: string): boolean {
|
||||
// Exact match
|
||||
if (path === pattern) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Prefix match (e.g., /chartState/* matches /chartState/symbol)
|
||||
if (pattern.endsWith('/*')) {
|
||||
const prefix = pattern.slice(0, -2);
|
||||
return path.startsWith(prefix + '/');
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get value at a JSON pointer path.
|
||||
*/
|
||||
private getValueAtPath(obj: unknown, path: string): unknown {
|
||||
if (!path || path === '/') {
|
||||
return obj;
|
||||
}
|
||||
|
||||
const parts = path.split('/').filter(Boolean);
|
||||
let current: any = obj;
|
||||
|
||||
for (const part of parts) {
|
||||
if (current === null || current === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
current = current[part];
|
||||
}
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Container Persistence
|
||||
// ===========================================================================
|
||||
|
||||
/**
|
||||
* Save dirty persistent stores to container.
|
||||
*/
|
||||
async saveDirtyStores(): Promise<void> {
|
||||
if (!this.containerSync?.isConnected()) {
|
||||
this.logger.debug('Container sync not available, skipping save');
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.dirtyStores.size === 0) {
|
||||
this.logger.debug('No dirty stores to save');
|
||||
return;
|
||||
}
|
||||
|
||||
const toSave = new Map<string, unknown>();
|
||||
|
||||
for (const storeName of this.dirtyStores) {
|
||||
const storeConfig = this.stores.find((s) => s.name === storeName);
|
||||
if (storeConfig?.persistent) {
|
||||
const state = this.registry.getState(storeName);
|
||||
if (state !== undefined) {
|
||||
toSave.set(storeName, state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (toSave.size > 0) {
|
||||
this.logger.debug({ stores: Array.from(toSave.keys()) }, 'Saving dirty stores to container');
|
||||
await this.containerSync.saveAllStores(toSave);
|
||||
this.dirtyStores.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Force save a specific store to container.
|
||||
*/
|
||||
async saveStore(storeName: string): Promise<void> {
|
||||
if (!this.containerSync?.isConnected()) {
|
||||
this.logger.warn({ store: storeName }, 'Container sync not available');
|
||||
return;
|
||||
}
|
||||
|
||||
const storeConfig = this.stores.find((s) => s.name === storeName);
|
||||
if (!storeConfig?.persistent) {
|
||||
this.logger.warn({ store: storeName }, 'Store is not persistent');
|
||||
return;
|
||||
}
|
||||
|
||||
const state = this.registry.getState(storeName);
|
||||
if (state !== undefined) {
|
||||
await this.containerSync.saveStore(storeName, state);
|
||||
this.dirtyStores.delete(storeName);
|
||||
}
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Accessors
|
||||
// ===========================================================================
|
||||
|
||||
getUserId(): string {
|
||||
return this.userId;
|
||||
}
|
||||
|
||||
getSessionId(): string {
|
||||
return this.sessionId;
|
||||
}
|
||||
|
||||
isInitialized(): boolean {
|
||||
return this.initialized;
|
||||
}
|
||||
}
|
||||
|
||||
// Re-export DEFAULT_STORES for convenience
|
||||
export { DEFAULT_STORES };
|
||||
@@ -2,10 +2,12 @@
|
||||
import ccxt from 'ccxt';
|
||||
|
||||
export class CCXTFetcher {
|
||||
constructor(config, logger) {
|
||||
constructor(config, logger, metadataGenerator = null) {
|
||||
this.config = config;
|
||||
this.logger = logger;
|
||||
this.exchanges = new Map();
|
||||
this.metadataGenerator = metadataGenerator;
|
||||
this.metadataCache = new Map(); // Cache metadata by ticker
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -24,6 +26,41 @@ export class CCXTFetcher {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get metadata for a ticker (cached or generate on-the-fly)
|
||||
*/
|
||||
async getMetadata(ticker) {
|
||||
// Check cache first
|
||||
if (this.metadataCache.has(ticker)) {
|
||||
return this.metadataCache.get(ticker);
|
||||
}
|
||||
|
||||
// Generate metadata on-the-fly
|
||||
if (!this.metadataGenerator) {
|
||||
throw new Error('Metadata generator not available');
|
||||
}
|
||||
|
||||
const { exchange: exchangeName, symbol } = this.parseTicker(ticker);
|
||||
const exchangeUpper = exchangeName.toUpperCase();
|
||||
const exchange = this.getExchange(exchangeName);
|
||||
|
||||
// Load market info from CCXT
|
||||
await exchange.loadMarkets();
|
||||
const market = exchange.market(symbol);
|
||||
|
||||
if (!market) {
|
||||
throw new Error(`Market not found: ${symbol} on ${exchangeUpper}`);
|
||||
}
|
||||
|
||||
// Convert to our metadata format
|
||||
const metadata = this.metadataGenerator.convertMarketToMetadata(exchangeUpper, symbol, market);
|
||||
|
||||
// Cache it
|
||||
this.metadataCache.set(ticker, metadata);
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create CCXT exchange instance
|
||||
*/
|
||||
@@ -123,8 +160,11 @@ export class CCXTFetcher {
|
||||
}
|
||||
}
|
||||
|
||||
// Get metadata for proper denomination
|
||||
const metadata = await this.getMetadata(ticker);
|
||||
|
||||
// Convert to our OHLC format
|
||||
return allCandles.map(candle => this.convertToOHLC(candle, ticker, periodSeconds));
|
||||
return allCandles.map(candle => this.convertToOHLC(candle, ticker, periodSeconds, metadata));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -148,8 +188,11 @@ export class CCXTFetcher {
|
||||
'Fetched recent trades'
|
||||
);
|
||||
|
||||
// Get metadata for proper denomination
|
||||
const metadata = await this.getMetadata(ticker);
|
||||
|
||||
// Convert to our Tick format
|
||||
return trades.map(trade => this.convertToTick(trade, ticker));
|
||||
return trades.map(trade => this.convertToTick(trade, ticker, metadata));
|
||||
} catch (error) {
|
||||
this.logger.error(
|
||||
{ error: error.message, ticker },
|
||||
@@ -162,21 +205,23 @@ export class CCXTFetcher {
|
||||
/**
|
||||
* Convert CCXT OHLCV array to our OHLC format
|
||||
* CCXT format: [timestamp, open, high, low, close, volume]
|
||||
* Uses denominators from market metadata for proper integer representation
|
||||
*/
|
||||
convertToOHLC(candle, ticker, periodSeconds) {
|
||||
convertToOHLC(candle, ticker, periodSeconds, metadata) {
|
||||
const [timestamp, open, high, low, close, volume] = candle;
|
||||
|
||||
// Convert to fixed-point integers (using 8 decimal places = 10^8)
|
||||
const DENOM = 100000000;
|
||||
// Use denominators from metadata
|
||||
const tickDenom = metadata.tickDenom || 100;
|
||||
const baseDenom = metadata.baseDenom || 100000000;
|
||||
|
||||
return {
|
||||
ticker,
|
||||
timestamp: (timestamp * 1000).toString(), // Convert ms to microseconds
|
||||
open: Math.round(open * DENOM).toString(),
|
||||
high: Math.round(high * DENOM).toString(),
|
||||
low: Math.round(low * DENOM).toString(),
|
||||
close: Math.round(close * DENOM).toString(),
|
||||
volume: Math.round(volume * DENOM).toString(),
|
||||
open: Math.round(open * tickDenom).toString(),
|
||||
high: Math.round(high * tickDenom).toString(),
|
||||
low: Math.round(low * tickDenom).toString(),
|
||||
close: Math.round(close * tickDenom).toString(),
|
||||
volume: Math.round(volume * baseDenom).toString(),
|
||||
open_time: (timestamp * 1000).toString(),
|
||||
close_time: ((timestamp + periodSeconds * 1000) * 1000).toString()
|
||||
};
|
||||
@@ -184,14 +229,17 @@ export class CCXTFetcher {
|
||||
|
||||
/**
|
||||
* Convert CCXT trade to our Tick format
|
||||
* Uses denominators from market metadata for proper integer representation
|
||||
*/
|
||||
convertToTick(trade, ticker) {
|
||||
// Convert to fixed-point integers (using 8 decimal places = 10^8)
|
||||
const DENOM = 100000000;
|
||||
convertToTick(trade, ticker, metadata) {
|
||||
// Use denominators from metadata
|
||||
const tickDenom = metadata.tickDenom || 100;
|
||||
const baseDenom = metadata.baseDenom || 100000000;
|
||||
const quoteDenom = metadata.quoteDenom || tickDenom;
|
||||
|
||||
const price = Math.round(trade.price * DENOM);
|
||||
const amount = Math.round(trade.amount * DENOM);
|
||||
const quoteAmount = Math.round((trade.price * trade.amount) * DENOM);
|
||||
const price = Math.round(trade.price * tickDenom);
|
||||
const amount = Math.round(trade.amount * baseDenom);
|
||||
const quoteAmount = Math.round((trade.price * trade.amount) * quoteDenom);
|
||||
|
||||
return {
|
||||
trade_id: trade.id || `${trade.timestamp}`,
|
||||
|
||||
@@ -8,6 +8,7 @@ import { ZmqClient } from './zmq-client.js';
|
||||
import { KafkaProducer } from './kafka-producer.js';
|
||||
import { CCXTFetcher } from './ccxt-fetcher.js';
|
||||
import { RealtimePoller } from './realtime-poller.js';
|
||||
import { SymbolMetadataGenerator } from './symbol-metadata-generator.js';
|
||||
|
||||
// Logger setup
|
||||
const logger = pino({
|
||||
@@ -61,6 +62,10 @@ function loadConfig() {
|
||||
max_concurrent: config.max_concurrent || 10,
|
||||
poll_interval_ms: config.poll_interval_ms || 10000,
|
||||
|
||||
// Symbol metadata configuration
|
||||
supported_exchanges: config.supported_exchanges || ['binance', 'coinbase', 'kraken'],
|
||||
symbol_metadata_interval_ms: config.symbol_metadata_interval_ms || 6 * 60 * 60 * 1000, // 6 hours
|
||||
|
||||
...secrets
|
||||
};
|
||||
}
|
||||
@@ -75,9 +80,16 @@ class IngestorWorker {
|
||||
config,
|
||||
logger.child({ component: 'kafka' })
|
||||
);
|
||||
// Create metadata generator first so ccxtFetcher can use it
|
||||
this.metadataGenerator = new SymbolMetadataGenerator(
|
||||
config,
|
||||
this.kafkaProducer,
|
||||
logger.child({ component: 'metadata' })
|
||||
);
|
||||
this.ccxtFetcher = new CCXTFetcher(
|
||||
config,
|
||||
logger.child({ component: 'ccxt' })
|
||||
logger.child({ component: 'ccxt' }),
|
||||
this.metadataGenerator
|
||||
);
|
||||
this.realtimePoller = new RealtimePoller(
|
||||
this.ccxtFetcher,
|
||||
@@ -88,6 +100,10 @@ class IngestorWorker {
|
||||
// Track active requests
|
||||
this.activeRequests = new Map();
|
||||
this.isShutdown = false;
|
||||
|
||||
// Metadata generation interval
|
||||
this.metadataIntervalMs = config.symbol_metadata_interval_ms;
|
||||
this.metadataInterval = null;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -106,6 +122,26 @@ class IngestorWorker {
|
||||
// Start work loop
|
||||
this.workLoop();
|
||||
|
||||
// Generate symbol metadata on startup
|
||||
this.logger.info('Generating initial symbol metadata');
|
||||
try {
|
||||
const results = await this.metadataGenerator.generateAll();
|
||||
this.logger.info({ results }, 'Initial symbol metadata generated');
|
||||
} catch (error) {
|
||||
this.logger.error({ error: error.message }, 'Failed to generate initial symbol metadata');
|
||||
}
|
||||
|
||||
// Schedule periodic metadata generation
|
||||
this.metadataInterval = setInterval(async () => {
|
||||
this.logger.info('Periodic symbol metadata generation');
|
||||
try {
|
||||
const results = await this.metadataGenerator.generateAll();
|
||||
this.logger.info({ results }, 'Periodic symbol metadata generated');
|
||||
} catch (error) {
|
||||
this.logger.error({ error: error.message }, 'Failed to generate periodic symbol metadata');
|
||||
}
|
||||
}, this.metadataIntervalMs);
|
||||
|
||||
this.logger.info('Ingestor worker started successfully');
|
||||
}
|
||||
|
||||
@@ -347,7 +383,8 @@ class IngestorWorker {
|
||||
return {
|
||||
activeRequests: this.activeRequests.size,
|
||||
maxConcurrent: this.config.max_concurrent,
|
||||
pollerStats: this.realtimePoller.getStats()
|
||||
pollerStats: this.realtimePoller.getStats(),
|
||||
metadataStatus: this.metadataGenerator.getStatus()
|
||||
};
|
||||
}
|
||||
|
||||
@@ -362,11 +399,17 @@ class IngestorWorker {
|
||||
this.isShutdown = true;
|
||||
this.logger.info('Shutting down ingestor worker');
|
||||
|
||||
// Stop metadata generation interval
|
||||
if (this.metadataInterval) {
|
||||
clearInterval(this.metadataInterval);
|
||||
}
|
||||
|
||||
// Stop polling
|
||||
this.realtimePoller.shutdown();
|
||||
|
||||
// Close connections
|
||||
await this.ccxtFetcher.close();
|
||||
await this.metadataGenerator.close();
|
||||
await this.kafkaProducer.disconnect();
|
||||
await this.zmqClient.shutdown();
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Kafka producer for writing market data
|
||||
import { Kafka } from 'kafkajs';
|
||||
import { encodeMessage, MessageTypeId, Tick, OHLC, OHLCBatch } from './proto/messages.js';
|
||||
import { encodeMessage, MessageTypeId, Tick, OHLC, OHLCBatch, Market } from './proto/messages.js';
|
||||
|
||||
export class KafkaProducer {
|
||||
constructor(config, logger) {
|
||||
@@ -257,6 +257,41 @@ export class KafkaProducer {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write market metadata messages to Kafka
|
||||
* @param {string} topic - Kafka topic name
|
||||
* @param {Array<object>} messages - Array of {key, value} objects where value is Market metadata
|
||||
*/
|
||||
async writeMarketMetadata(topic, messages) {
|
||||
if (!this.isConnected) {
|
||||
throw new Error('Kafka producer not connected');
|
||||
}
|
||||
|
||||
if (messages.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const kafkaMessages = messages.map(({ key, value }) => {
|
||||
const [frame1, frame2] = encodeMessage(MessageTypeId.MARKET, value, Market);
|
||||
const encodedValue = Buffer.concat([frame1, frame2]);
|
||||
|
||||
return {
|
||||
key,
|
||||
value: encodedValue
|
||||
};
|
||||
});
|
||||
|
||||
await this.producer.send({
|
||||
topic,
|
||||
messages: kafkaMessages
|
||||
});
|
||||
|
||||
this.logger.debug(
|
||||
{ count: messages.length, topic },
|
||||
'Wrote market metadata to Kafka'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from Kafka
|
||||
*/
|
||||
|
||||
313
ingestor/src/symbol-metadata-generator.js
Normal file
313
ingestor/src/symbol-metadata-generator.js
Normal file
@@ -0,0 +1,313 @@
|
||||
// Symbol Metadata Generator - extracts market metadata from CCXT exchanges
|
||||
import ccxt from 'ccxt';
|
||||
|
||||
export class SymbolMetadataGenerator {
|
||||
constructor(config, kafkaProducer, logger) {
|
||||
this.config = config;
|
||||
this.kafkaProducer = kafkaProducer;
|
||||
this.logger = logger;
|
||||
this.exchanges = new Map();
|
||||
this.lastGenerated = new Map(); // Track last generation time per exchange
|
||||
this.publishedSymbols = new Set(); // Track published symbols to prevent duplicates
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create CCXT exchange instance
|
||||
*/
|
||||
getExchange(exchangeName) {
|
||||
if (this.exchanges.has(exchangeName)) {
|
||||
return this.exchanges.get(exchangeName);
|
||||
}
|
||||
|
||||
const ExchangeClass = ccxt[exchangeName];
|
||||
if (!ExchangeClass) {
|
||||
throw new Error(`Unsupported exchange: ${exchangeName}`);
|
||||
}
|
||||
|
||||
const exchange = new ExchangeClass({
|
||||
enableRateLimit: true,
|
||||
options: {
|
||||
defaultType: 'spot'
|
||||
}
|
||||
});
|
||||
|
||||
this.exchanges.set(exchangeName, exchange);
|
||||
this.logger.info({ exchange: exchangeName }, 'Created CCXT exchange instance for metadata');
|
||||
|
||||
return exchange;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate symbol metadata for all configured exchanges
|
||||
*/
|
||||
async generateAll() {
|
||||
const supportedExchanges = this.config.supported_exchanges || ['binance'];
|
||||
|
||||
this.logger.info({ exchanges: supportedExchanges }, 'Generating symbol metadata for all exchanges');
|
||||
|
||||
const results = [];
|
||||
for (const exchangeName of supportedExchanges) {
|
||||
try {
|
||||
const metadata = await this.generateForExchange(exchangeName);
|
||||
results.push({ exchange: exchangeName, count: metadata.length, success: true });
|
||||
} catch (error) {
|
||||
this.logger.error(
|
||||
{ error: error.message, exchange: exchangeName },
|
||||
'Failed to generate metadata for exchange'
|
||||
);
|
||||
results.push({ exchange: exchangeName, error: error.message, success: false });
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate symbol metadata for a single exchange
|
||||
*/
|
||||
async generateForExchange(exchangeName) {
|
||||
const exchangeLower = exchangeName.toLowerCase();
|
||||
const exchangeUpper = exchangeName.toUpperCase();
|
||||
|
||||
this.logger.info({ exchange: exchangeUpper }, 'Loading markets from exchange');
|
||||
|
||||
const exchange = this.getExchange(exchangeLower);
|
||||
|
||||
// Load all markets from the exchange
|
||||
const markets = await exchange.loadMarkets();
|
||||
|
||||
this.logger.info(
|
||||
{ exchange: exchangeUpper, marketCount: Object.keys(markets).length },
|
||||
'Loaded markets from exchange'
|
||||
);
|
||||
|
||||
const metadataList = [];
|
||||
|
||||
for (const [symbol, market] of Object.entries(markets)) {
|
||||
try {
|
||||
const metadata = this.convertMarketToMetadata(exchangeUpper, symbol, market);
|
||||
|
||||
// Debug log first few symbols
|
||||
if (metadataList.length < 5) {
|
||||
this.logger.info({
|
||||
exchange: exchangeUpper,
|
||||
symbol,
|
||||
metadata
|
||||
}, `Symbol metadata ${metadataList.length + 1} generated`);
|
||||
}
|
||||
|
||||
metadataList.push(metadata);
|
||||
} catch (error) {
|
||||
this.logger.warn(
|
||||
{ exchange: exchangeUpper, symbol, error: error.message },
|
||||
'Failed to convert market to metadata'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Write all metadata to Kafka
|
||||
if (metadataList.length > 0) {
|
||||
try {
|
||||
await this.writeMetadataToKafka(metadataList);
|
||||
} catch (error) {
|
||||
this.logger.error(
|
||||
{ exchange: exchangeUpper, error: error.message, stack: error.stack },
|
||||
'Failed to write metadata to Kafka'
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Update last generated timestamp
|
||||
this.lastGenerated.set(exchangeUpper, Date.now());
|
||||
|
||||
this.logger.info(
|
||||
{ exchange: exchangeUpper, count: metadataList.length },
|
||||
'Generated and published symbol metadata'
|
||||
);
|
||||
|
||||
return metadataList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert CCXT market object to our Market metadata format
|
||||
*/
|
||||
convertMarketToMetadata(exchangeId, symbol, market) {
|
||||
const base = market.base || '';
|
||||
const quote = market.quote || '';
|
||||
const marketType = market.type || 'spot';
|
||||
|
||||
// Extract precision information
|
||||
const precision = market.precision || {};
|
||||
const limits = market.limits || {};
|
||||
|
||||
// Get tick_denom from price precision
|
||||
// This tells us the denominator for price values.
|
||||
// For example, if BTC/USDT trades with 2 decimals (0.01 precision), tick_denom = 100
|
||||
//
|
||||
// CCXT precision.price can be:
|
||||
// - Integer (decimal places): 2 means 0.01 tick size -> denominator 100
|
||||
// - Float (tick size): 0.01 -> invert to get denominator 100
|
||||
let tick_denom;
|
||||
if (precision.price !== undefined) {
|
||||
if (Number.isInteger(precision.price)) {
|
||||
// Integer: number of decimal places
|
||||
// e.g., precision.price = 2 means 2 decimal places = 0.01 tick = 100 denom
|
||||
tick_denom = Math.pow(10, precision.price);
|
||||
} else {
|
||||
// Float: actual tick size, need to invert and round
|
||||
// e.g., precision.price = 0.01 -> 1/0.01 = 100
|
||||
tick_denom = Math.round(1 / precision.price);
|
||||
}
|
||||
} else if (limits.price?.min !== undefined) {
|
||||
// Fallback: use minimum price as tick size
|
||||
tick_denom = Math.round(1 / limits.price.min);
|
||||
} else {
|
||||
// Default to 2 decimals (pennies)
|
||||
tick_denom = 100;
|
||||
}
|
||||
|
||||
// Get base_denom from amount precision (for volumes)
|
||||
let base_denom;
|
||||
if (precision.amount !== undefined) {
|
||||
if (Number.isInteger(precision.amount)) {
|
||||
base_denom = Math.pow(10, precision.amount);
|
||||
} else {
|
||||
base_denom = Math.round(1 / precision.amount);
|
||||
}
|
||||
} else if (limits.amount?.min !== undefined) {
|
||||
base_denom = Math.round(1 / limits.amount.min);
|
||||
} else {
|
||||
// Default to 8 decimals (standard for crypto)
|
||||
base_denom = 100000000;
|
||||
}
|
||||
|
||||
// Get quote_denom from cost precision (price * amount)
|
||||
let quote_denom;
|
||||
if (precision.cost !== undefined) {
|
||||
if (Number.isInteger(precision.cost)) {
|
||||
quote_denom = Math.pow(10, precision.cost);
|
||||
} else {
|
||||
quote_denom = Math.round(1 / precision.cost);
|
||||
}
|
||||
} else {
|
||||
// Default: typically tick_denom for most exchanges
|
||||
quote_denom = tick_denom;
|
||||
}
|
||||
|
||||
// Standard supported periods (in seconds)
|
||||
// Most exchanges support these timeframes
|
||||
const supported_period_seconds = [
|
||||
60, // 1m
|
||||
300, // 5m
|
||||
900, // 15m
|
||||
1800, // 30m
|
||||
3600, // 1h
|
||||
14400, // 4h
|
||||
86400, // 1d
|
||||
];
|
||||
|
||||
// Build description
|
||||
const description = `${base}/${quote} ${marketType} trading pair on ${exchangeId}`;
|
||||
|
||||
// NOTE: protobufjs expects camelCase field names, not snake_case!
|
||||
return {
|
||||
exchangeId: exchangeId,
|
||||
marketId: symbol,
|
||||
marketType: marketType,
|
||||
description,
|
||||
baseAsset: base,
|
||||
quoteAsset: quote,
|
||||
tickDenom: tick_denom,
|
||||
baseDenom: base_denom,
|
||||
quoteDenom: quote_denom,
|
||||
supportedPeriodSeconds: supported_period_seconds,
|
||||
// earliestTime can be added later if we track it
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Write metadata list to Kafka topic
|
||||
* Deduplicates symbols to prevent duplicate writes
|
||||
*/
|
||||
async writeMetadataToKafka(metadataList) {
|
||||
const topic = 'symbol-metadata';
|
||||
|
||||
// Filter out duplicates
|
||||
const uniqueMetadata = [];
|
||||
let duplicateCount = 0;
|
||||
|
||||
for (const metadata of metadataList) {
|
||||
const key = `${metadata.exchangeId}:${metadata.marketId}`;
|
||||
|
||||
// Debug first few to understand duplication
|
||||
if (uniqueMetadata.length < 3 || (uniqueMetadata.length === 0 && duplicateCount < 3)) {
|
||||
this.logger.info({
|
||||
key,
|
||||
exchangeId: metadata.exchangeId,
|
||||
marketId: metadata.marketId,
|
||||
isDuplicate: this.publishedSymbols.has(key),
|
||||
setSize: this.publishedSymbols.size
|
||||
}, 'Deduplication check');
|
||||
}
|
||||
|
||||
if (!this.publishedSymbols.has(key)) {
|
||||
uniqueMetadata.push(metadata);
|
||||
this.publishedSymbols.add(key);
|
||||
} else {
|
||||
duplicateCount++;
|
||||
}
|
||||
}
|
||||
|
||||
if (uniqueMetadata.length === 0) {
|
||||
this.logger.debug(
|
||||
{ duplicateCount, topic },
|
||||
'All symbols already published, skipping'
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert each metadata to protobuf Market message
|
||||
const messages = uniqueMetadata.map(metadata => {
|
||||
const key = `${metadata.exchangeId}:${metadata.marketId}`;
|
||||
|
||||
return {
|
||||
key,
|
||||
value: metadata,
|
||||
};
|
||||
});
|
||||
|
||||
await this.kafkaProducer.writeMarketMetadata(topic, messages);
|
||||
|
||||
this.logger.info(
|
||||
{ count: messages.length, duplicateCount, topic },
|
||||
'Wrote symbol metadata to Kafka'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get status of metadata generation
|
||||
*/
|
||||
getStatus() {
|
||||
const status = {};
|
||||
for (const [exchange, timestamp] of this.lastGenerated.entries()) {
|
||||
status[exchange] = {
|
||||
lastGenerated: new Date(timestamp).toISOString(),
|
||||
ageMinutes: Math.floor((Date.now() - timestamp) / 60000),
|
||||
};
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close all exchange connections
|
||||
*/
|
||||
async close() {
|
||||
for (const [name, exchange] of this.exchanges) {
|
||||
if (exchange.close) {
|
||||
await exchange.close();
|
||||
}
|
||||
}
|
||||
this.exchanges.clear();
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "com.dexorder.proto";
|
||||
|
||||
message Market {
|
||||
// The prices and volumes must be adjusted by the rational denominator provided
|
||||
// by the market metadata
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "com.dexorder.proto";
|
||||
|
||||
message Tick {
|
||||
// Unique identifier for the trade
|
||||
string trade_id = 1;
|
||||
|
||||
2
web/.gitignore
vendored
2
web/.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
/protobuf
|
||||
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
|
||||
@@ -40,11 +40,18 @@ watch(isMobile, (mobile) => {
|
||||
// Check if user is already authenticated on page load
|
||||
onMounted(async () => {
|
||||
// Try to restore session from stored token
|
||||
if (authService.getToken()) {
|
||||
const token = authService.getToken()
|
||||
if (token) {
|
||||
const sessionValid = await authService.checkAuth()
|
||||
if (sessionValid) {
|
||||
isAuthenticated.value = true
|
||||
// Connect WebSocket with existing token
|
||||
try {
|
||||
await wsManager.connect(token)
|
||||
await initializeApp()
|
||||
} catch (err) {
|
||||
console.error('Failed to connect WebSocket on session restore:', err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,9 +77,8 @@ const handleAuthenticate = async (email: string, password: string) => {
|
||||
return
|
||||
}
|
||||
|
||||
// Step 2: Connect WebSocket with JWT token (if WebSocket is used for real-time sync)
|
||||
// For now, we're not connecting WebSocket until it's implemented in the gateway
|
||||
// await wsManager.connect(result.token)
|
||||
// Step 2: Connect WebSocket with JWT token for real-time sync
|
||||
await wsManager.connect(result.token)
|
||||
|
||||
// Step 3: Initialize application
|
||||
await initializeApp()
|
||||
|
||||
@@ -27,8 +27,27 @@ class AuthService {
|
||||
private gatewayUrl: string
|
||||
|
||||
constructor() {
|
||||
// Use environment variable for gateway URL, fallback to localhost for dev
|
||||
this.gatewayUrl = import.meta.env.VITE_GATEWAY_URL || 'http://localhost:3000'
|
||||
// Use environment variable for gateway URL, fallback based on current location
|
||||
// VITE_GATEWAY_URL can be:
|
||||
// - Relative path: "/api" (for production with ingress)
|
||||
// - Absolute URL: "http://localhost:3000" (for local dev)
|
||||
// - Absolute URL: "http://dexorder.local" (for minikube)
|
||||
const envUrl = import.meta.env.VITE_GATEWAY_URL
|
||||
|
||||
if (envUrl) {
|
||||
// If it starts with /, it's a relative path - use current origin
|
||||
if (envUrl.startsWith('/')) {
|
||||
this.gatewayUrl = window.location.origin + envUrl
|
||||
} else {
|
||||
this.gatewayUrl = envUrl
|
||||
}
|
||||
} else if (window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1') {
|
||||
// Local development default
|
||||
this.gatewayUrl = 'http://localhost:3000'
|
||||
} else {
|
||||
// Production default: use current origin
|
||||
this.gatewayUrl = window.location.origin
|
||||
}
|
||||
|
||||
// Try to restore token from localStorage
|
||||
const storedToken = localStorage.getItem(TOKEN_STORAGE_KEY)
|
||||
@@ -51,6 +70,15 @@ class AuthService {
|
||||
body: JSON.stringify({ email, password }),
|
||||
})
|
||||
|
||||
// Check if response is JSON before parsing
|
||||
const contentType = response.headers.get('content-type')
|
||||
if (!contentType || !contentType.includes('application/json')) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Server error (${response.status}): ${response.statusText || 'Service unavailable'}`,
|
||||
}
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
|
||||
@@ -31,12 +31,18 @@ interface Subscription {
|
||||
subscriptionId: string
|
||||
}
|
||||
|
||||
interface SymbolDenominators {
|
||||
tick: number
|
||||
base: number
|
||||
}
|
||||
|
||||
export class WebSocketDatafeed implements IBasicDataFeed {
|
||||
private pendingRequests: Map<string, PendingRequest> = new Map()
|
||||
private subscriptions: Map<string, Subscription> = new Map()
|
||||
private requestTimeout = 10000 // 10 seconds
|
||||
private configuration: DatafeedConfiguration | null = null
|
||||
private messageHandler: MessageHandler
|
||||
private symbolDenominators: Map<string, SymbolDenominators> = new Map() // Track denominators per symbol
|
||||
|
||||
constructor() {
|
||||
// Use the shared WebSocket connection (managed by App.vue authentication)
|
||||
@@ -53,19 +59,27 @@ export class WebSocketDatafeed implements IBasicDataFeed {
|
||||
const requestId = message.request_id || this.generateRequestId()
|
||||
message.request_id = requestId
|
||||
|
||||
console.log('[TradingView Datafeed] Sending request:', requestId, message.type, message)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeout = window.setTimeout(() => {
|
||||
console.error('[TradingView Datafeed] Request timeout:', requestId, message.type)
|
||||
this.pendingRequests.delete(requestId)
|
||||
reject(new Error('Request timeout'))
|
||||
}, this.requestTimeout)
|
||||
|
||||
this.pendingRequests.set(requestId, { resolve, reject, timeout })
|
||||
|
||||
const wsState = wsManager.getWebSocket()?.readyState
|
||||
console.log('[TradingView Datafeed] WebSocket state before send:', wsState, 'OPEN=' + WebSocket.OPEN)
|
||||
|
||||
wsManager.send(message)
|
||||
})
|
||||
}
|
||||
|
||||
private handleMessage(message: any): void {
|
||||
console.log('[TradingView Datafeed] Received message:', message)
|
||||
console.log('[TradingView Datafeed] Received message:', message.type, message)
|
||||
console.log('[TradingView Datafeed] Pending requests count:', this.pendingRequests.size)
|
||||
|
||||
// Handle responses to pending requests
|
||||
if (message.request_id && this.pendingRequests.has(message.request_id)) {
|
||||
@@ -75,27 +89,30 @@ export class WebSocketDatafeed implements IBasicDataFeed {
|
||||
clearTimeout(pending.timeout)
|
||||
|
||||
if (message.type === 'error') {
|
||||
console.log('[TradingView Datafeed] Resolving with error:', message.error_message)
|
||||
console.error('[TradingView Datafeed] Resolving with error:', message.error_message)
|
||||
pending.reject(new Error(message.error_message || 'Unknown error'))
|
||||
} else {
|
||||
console.log('[TradingView Datafeed] Resolving with response')
|
||||
console.log('[TradingView Datafeed] Resolving with response:', message.type)
|
||||
pending.resolve(message)
|
||||
}
|
||||
} else if (message.request_id) {
|
||||
console.log('[TradingView Datafeed] No pending request found for:', message.request_id)
|
||||
console.warn('[TradingView Datafeed] No pending request found for:', message.request_id, 'Available:', Array.from(this.pendingRequests.keys()))
|
||||
}
|
||||
|
||||
// Handle real-time bar updates
|
||||
if (message.type === 'bar_update') {
|
||||
const subscription = this.subscriptions.get(message.subscription_id)
|
||||
if (subscription && message.bar) {
|
||||
const symbolKey = subscription.symbolInfo.ticker || subscription.symbolInfo.name
|
||||
const denoms = this.symbolDenominators.get(symbolKey) || { tick: 1, base: 1 }
|
||||
|
||||
const bar: Bar = {
|
||||
time: message.bar.time * 1000, // Convert to milliseconds
|
||||
open: parseFloat(message.bar.data.open),
|
||||
high: parseFloat(message.bar.data.high),
|
||||
low: parseFloat(message.bar.data.low),
|
||||
close: parseFloat(message.bar.data.close),
|
||||
volume: parseFloat(message.bar.data.volume)
|
||||
open: parseFloat(message.bar.open) / denoms.tick,
|
||||
high: parseFloat(message.bar.high) / denoms.tick,
|
||||
low: parseFloat(message.bar.low) / denoms.tick,
|
||||
close: parseFloat(message.bar.close) / denoms.tick,
|
||||
volume: parseFloat(message.bar.volume) / denoms.base
|
||||
}
|
||||
subscription.onTick(bar)
|
||||
}
|
||||
@@ -159,20 +176,42 @@ export class WebSocketDatafeed implements IBasicDataFeed {
|
||||
onResolve: (symbolInfo: LibrarySymbolInfo) => void,
|
||||
onError: (reason: string) => void
|
||||
): void {
|
||||
console.log('[TradingView Datafeed] Resolving symbol:', symbolName)
|
||||
console.log('[TradingView Datafeed] WebSocket state:', wsManager.getWebSocket()?.readyState)
|
||||
|
||||
this.sendRequest<any>({
|
||||
type: 'resolve_symbol',
|
||||
symbol: symbolName
|
||||
})
|
||||
.then((response) => {
|
||||
console.log('[TradingView Datafeed] Received response:', response)
|
||||
if (response.symbol_info) {
|
||||
console.log('[TradingView Datafeed] Resolved symbol info:', response.symbol_info)
|
||||
|
||||
// Store the denominators for this symbol
|
||||
const symbolKey = response.symbol_info.ticker || response.symbol_info.name
|
||||
const tickDenom = response.symbol_info.tick_denominator || 1
|
||||
const baseDenom = response.symbol_info.base_denominator || 1
|
||||
|
||||
this.symbolDenominators.set(symbolKey, {
|
||||
tick: tickDenom,
|
||||
base: baseDenom
|
||||
})
|
||||
console.log('[TradingView Datafeed] Stored denominators:', symbolKey, { tick: tickDenom, base: baseDenom })
|
||||
|
||||
onResolve(response.symbol_info)
|
||||
} else {
|
||||
console.error('[TradingView Datafeed] No symbol_info in response')
|
||||
onError('Symbol not found')
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Failed to resolve symbol:', error)
|
||||
console.error('[TradingView Datafeed] Failed to resolve symbol:', symbolName, error)
|
||||
console.error('[TradingView Datafeed] Error details:', {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
pendingRequests: this.pendingRequests.size
|
||||
})
|
||||
onError(error instanceof Error ? error.message : 'Unknown error')
|
||||
})
|
||||
}
|
||||
@@ -189,9 +228,12 @@ export class WebSocketDatafeed implements IBasicDataFeed {
|
||||
onResult: (bars: Bar[], meta: HistoryMetadata) => void,
|
||||
onError: (reason: string) => void
|
||||
): void {
|
||||
const symbolKey = symbolInfo.ticker || symbolInfo.name
|
||||
const denoms = this.symbolDenominators.get(symbolKey) || { tick: 1, base: 1 }
|
||||
|
||||
this.sendRequest<any>({
|
||||
type: 'get_bars',
|
||||
symbol: symbolInfo.ticker || symbolInfo.name,
|
||||
symbol: symbolKey,
|
||||
resolution: resolution,
|
||||
from_time: periodParams.from,
|
||||
to_time: periodParams.to,
|
||||
@@ -199,15 +241,20 @@ export class WebSocketDatafeed implements IBasicDataFeed {
|
||||
})
|
||||
.then((response) => {
|
||||
if (response.history) {
|
||||
console.log('[TradingView Datafeed] Raw bar sample:', response.history.bars?.[0])
|
||||
console.log('[TradingView Datafeed] Denominators:', denoms)
|
||||
|
||||
const bars: Bar[] = (response.history.bars || []).map((bar: any) => ({
|
||||
time: bar.time * 1000, // Convert to milliseconds
|
||||
open: parseFloat(bar.data.open),
|
||||
high: parseFloat(bar.data.high),
|
||||
low: parseFloat(bar.data.low),
|
||||
close: parseFloat(bar.data.close),
|
||||
volume: parseFloat(bar.data.volume)
|
||||
open: parseFloat(bar.open) / denoms.tick,
|
||||
high: parseFloat(bar.high) / denoms.tick,
|
||||
low: parseFloat(bar.low) / denoms.tick,
|
||||
close: parseFloat(bar.close) / denoms.tick,
|
||||
volume: parseFloat(bar.volume) / denoms.base
|
||||
}))
|
||||
|
||||
console.log('[TradingView Datafeed] Scaled bar sample:', bars[0])
|
||||
|
||||
const meta: HistoryMetadata = {
|
||||
noData: bars.length === 0,
|
||||
nextTime: response.history.next_time
|
||||
|
||||
@@ -12,6 +12,7 @@ class WebSocketManager {
|
||||
private handlers: Set<MessageHandler> = new Set()
|
||||
private reconnectTimeout: number | null = null
|
||||
public isConnected = ref(false)
|
||||
public isAuthenticated = ref(false)
|
||||
private token: string | null = null
|
||||
|
||||
/**
|
||||
@@ -27,38 +28,73 @@ class WebSocketManager {
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
// Use env variable for WebSocket URL, fallback to localhost for dev
|
||||
const wsUrl = import.meta.env.VITE_WS_URL || 'ws://localhost:8080/ws'
|
||||
// Build WebSocket URL based on environment
|
||||
// VITE_GATEWAY_URL can be:
|
||||
// - Relative path: "/api" (for production with ingress)
|
||||
// - Absolute URL: "http://localhost:3000" (for local dev)
|
||||
const envUrl = import.meta.env.VITE_GATEWAY_URL
|
||||
let baseUrl: string
|
||||
|
||||
// Append token as query parameter
|
||||
if (envUrl) {
|
||||
// If it starts with /, it's a relative path - use current origin
|
||||
if (envUrl.startsWith('/')) {
|
||||
baseUrl = window.location.origin + envUrl
|
||||
} else {
|
||||
baseUrl = envUrl
|
||||
}
|
||||
} else if (window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1') {
|
||||
baseUrl = 'http://localhost:3000'
|
||||
} else {
|
||||
// Production: use current origin
|
||||
baseUrl = window.location.origin
|
||||
}
|
||||
|
||||
// Gateway WebSocket endpoint is /ws/chat (baseUrl already includes /api if needed)
|
||||
const wsUrl = baseUrl.replace('http://', 'ws://').replace('https://', 'wss://') + '/ws/chat'
|
||||
|
||||
// WebSocket API doesn't support custom headers, so we'll send the token in the URL
|
||||
// The gateway will need to handle token extraction from query params
|
||||
const wsUrlWithToken = `${wsUrl}?token=${encodeURIComponent(token)}`
|
||||
console.log('[WebSocket] Connecting to:', wsUrl)
|
||||
console.log('[WebSocket] Full URL (token hidden):', wsUrl + '?token=***')
|
||||
this.ws = new WebSocket(wsUrlWithToken)
|
||||
|
||||
this.ws.onopen = () => {
|
||||
console.log('WebSocket connected')
|
||||
console.log('[WebSocket] Connected successfully')
|
||||
this.isConnected.value = true
|
||||
this.isAuthenticated.value = false // Wait for 'connected' message from server
|
||||
resolve()
|
||||
}
|
||||
|
||||
this.ws.onmessage = (event) => {
|
||||
console.log('[WebSocket] Raw message received:', event.data.substring(0, 200))
|
||||
try {
|
||||
const message = JSON.parse(event.data)
|
||||
console.log('[WebSocket] Parsed message type:', message.type)
|
||||
|
||||
// Mark as authenticated when we receive the 'connected' message
|
||||
if (message.type === 'connected') {
|
||||
console.log('[WebSocket] Received connected message, marking as authenticated')
|
||||
this.isAuthenticated.value = true
|
||||
}
|
||||
|
||||
// Pass to all handlers
|
||||
this.handlers.forEach(handler => handler(message))
|
||||
} catch (err) {
|
||||
console.error('Error parsing WebSocket message:', err)
|
||||
console.error('[WebSocket] Error parsing message:', err)
|
||||
console.error('[WebSocket] Raw data:', event.data.substring(0, 500))
|
||||
}
|
||||
}
|
||||
|
||||
this.ws.onerror = (error) => {
|
||||
console.error('WebSocket error:', error)
|
||||
console.error('[WebSocket] Connection error:', error)
|
||||
this.isConnected.value = false
|
||||
reject(error)
|
||||
}
|
||||
|
||||
this.ws.onclose = (event) => {
|
||||
this.isConnected.value = false
|
||||
this.isAuthenticated.value = false
|
||||
console.log('WebSocket disconnected:', event.code, event.reason)
|
||||
}
|
||||
|
||||
@@ -81,8 +117,12 @@ class WebSocketManager {
|
||||
}
|
||||
|
||||
send(message: WebSocketMessage) {
|
||||
console.log('[WebSocket] Attempting to send message:', message.type, 'readyState:', this.ws?.readyState)
|
||||
if (this.ws?.readyState === WebSocket.OPEN) {
|
||||
console.log('[WebSocket] Sending message:', JSON.stringify(message))
|
||||
this.ws.send(JSON.stringify(message))
|
||||
} else {
|
||||
console.error('[WebSocket] Cannot send message - WebSocket not open. State:', this.ws?.readyState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +134,7 @@ class WebSocketManager {
|
||||
this.ws.close()
|
||||
this.ws = null
|
||||
}
|
||||
this.isAuthenticated.value = false
|
||||
}
|
||||
|
||||
getWebSocket() {
|
||||
|
||||
Reference in New Issue
Block a user