sandbox connected and streaming

This commit is contained in:
2026-03-30 23:29:03 -04:00
parent c3a8fae132
commit 998f69fa1a
130 changed files with 7416 additions and 2123 deletions

4
.gitignore vendored
View File

@@ -117,8 +117,8 @@ flink/protobuf/
relay/protobuf/ relay/protobuf/
ingestor/protobuf/ ingestor/protobuf/
gateway/protobuf/ gateway/protobuf/
client-py/protobuf/ sandbox/protobuf/
# Generated protobuf code # Generated protobuf code
gateway/src/generated/ gateway/src/generated/
client-py/dexorder/generated/ sandbox/dexorder/generated/

View File

@@ -3,9 +3,9 @@
#REMOTE=northamerica-northeast2-docker.pkg.dev/dexorder-430504/dexorder #REMOTE=northamerica-northeast2-docker.pkg.dev/dexorder-430504/dexorder
REMOTE=${REMOTE:-git.dxod.org/dexorder/dexorder} REMOTE=${REMOTE:-git.dxod.org/dexorder/dexorder}
if [ "$1" != "flink" ] && [ "$1" != "relay" ] && [ "$1" != "ingestor" ] && [ "$1" != "web" ] && [ "$1" != "gateway" ] && [ "$1" != "lifecycle-sidecar" ] && [ "$1" != "client-py" ]; then if [ "$1" != "flink" ] && [ "$1" != "relay" ] && [ "$1" != "ingestor" ] && [ "$1" != "web" ] && [ "$1" != "gateway" ] && [ "$1" != "lifecycle-sidecar" ] && [ "$1" != "sandbox" ]; then
echo echo
echo usage: "$0 "'{flink|relay|ingestor|web|gateway|lifecycle-sidecar|client-py} [''dev''] [config] [deployment] [kubernetes] [image_tag]' echo usage: "$0 "'{flink|relay|ingestor|web|gateway|lifecycle-sidecar|sandbox} [''dev''] [config] [deployment] [kubernetes] [image_tag]'
echo echo
echo ' [''dev''] if the literal string ''dev'' is not the second argument, then the build refuses to run if source code is not checked in. Otherwise, the git revision numbers are used in the image tag.' echo ' [''dev''] if the literal string ''dev'' is not the second argument, then the build refuses to run if source code is not checked in. Otherwise, the git revision numbers are used in the image tag.'
echo echo
@@ -100,6 +100,72 @@ if [ "$PROJECT" != "lifecycle-sidecar" ]; then
rsync -a --checksum --delete protobuf/ $PROJECT/protobuf/ rsync -a --checksum --delete protobuf/ $PROJECT/protobuf/
fi fi
# For gateway: copy Python API files for research subagent
if [ "$PROJECT" == "gateway" ]; then
echo "Copying Python API files for research subagent..."
# Create api-source directory
mkdir -p gateway/src/harness/subagents/research/api-source
# Copy all Python API files (for easy future expansion)
cp sandbox/dexorder/api/*.py gateway/src/harness/subagents/research/api-source/
# Generate api-reference.md with verbatim Python source code
API_REF="gateway/src/harness/subagents/research/memory/api-reference.md"
cat > "$API_REF" << 'HEADER'
# Dexorder Research API Reference
This file contains the complete Python API source code with full docstrings.
These files are copied verbatim from `sandbox/dexorder/api/`.
The API provides access to market data and charting capabilities for research scripts.
---
## Overview
Research scripts access the API via:
```python
from dexorder.api import get_api
api = get_api()
```
The API instance provides:
- `api.data` - DataAPI for fetching OHLC market data
- `api.charting` - ChartingAPI for creating financial charts
---
## Complete API Source Code
The following sections contain the verbatim Python source files with complete
type hints, docstrings, and examples.
HEADER
# Append each Python file
for py_file in api.py data_api.py charting_api.py __init__.py; do
if [ -f "sandbox/dexorder/api/$py_file" ]; then
echo "" >> "$API_REF"
echo "### $py_file" >> "$API_REF"
echo '```python' >> "$API_REF"
cat "sandbox/dexorder/api/$py_file" >> "$API_REF"
echo '```' >> "$API_REF"
echo "" >> "$API_REF"
fi
done
cat >> "$API_REF" << 'FOOTER'
---
For practical usage patterns and complete working examples, see `usage-examples.md`.
FOOTER
echo "Generated api-reference.md with Python API source code"
fi
docker build $NO_CACHE -f $PROJECT/Dockerfile --build-arg="CONFIG=$CONFIG" --build-arg="DEPLOYMENT=$DEPLOYMENT" -t dexorder/ai-$PROJECT:latest $PROJECT || exit 1 docker build $NO_CACHE -f $PROJECT/Dockerfile --build-arg="CONFIG=$CONFIG" --build-arg="DEPLOYMENT=$DEPLOYMENT" -t dexorder/ai-$PROJECT:latest $PROJECT || exit 1
# Cleanup is handled by trap # Cleanup is handled by trap

133
bin/dev
View File

@@ -19,7 +19,7 @@ usage() {
echo "Commands:" echo "Commands:"
echo " start Start minikube and deploy all services" echo " start Start minikube and deploy all services"
echo " stop [--keep-data] Stop minikube (deletes PVCs by default)" echo " stop [--keep-data] Stop minikube (deletes PVCs by default)"
echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web|client-py)" echo " restart [svc] Rebuild and redeploy all services, or just one (relay|ingestor|flink|gateway|sidecar|web|sandbox)"
echo " deep-restart [svc] Restart StatefulSet(s) and delete their PVCs (kafka|postgres|minio|qdrant|all)" echo " deep-restart [svc] Restart StatefulSet(s) and delete their PVCs (kafka|postgres|minio|qdrant|all)"
echo " rebuild [svc] Rebuild all custom images, or just one" echo " rebuild [svc] Rebuild all custom images, or just one"
echo " deploy [svc] Deploy/update all services, or just one" echo " deploy [svc] Deploy/update all services, or just one"
@@ -132,19 +132,16 @@ rebuild_images() {
if [ "$service" == "all" ] || [ "$service" == "relay" ]; then if [ "$service" == "all" ] || [ "$service" == "relay" ]; then
echo -e "${GREEN}→${NC} Building relay..." echo -e "${GREEN}→${NC} Building relay..."
RELAY_TAG=$(build_and_get_tag relay) || exit 1 RELAY_TAG=$(build_and_get_tag relay) || exit 1
docker tag "dexorder/ai-relay:$RELAY_TAG" "dexorder/relay:$RELAY_TAG"
fi fi
if [ "$service" == "all" ] || [ "$service" == "ingestor" ]; then if [ "$service" == "all" ] || [ "$service" == "ingestor" ]; then
echo -e "${GREEN}→${NC} Building ingestor..." echo -e "${GREEN}→${NC} Building ingestor..."
INGEST_TAG=$(build_and_get_tag ingestor) || exit 1 INGEST_TAG=$(build_and_get_tag ingestor) || exit 1
docker tag "dexorder/ai-ingestor:$INGEST_TAG" "dexorder/ingestor:$INGEST_TAG"
fi fi
if [ "$service" == "all" ] || [ "$service" == "flink" ]; then if [ "$service" == "all" ] || [ "$service" == "flink" ]; then
echo -e "${GREEN}→${NC} Building flink..." echo -e "${GREEN}→${NC} Building flink..."
FLINK_TAG=$(build_and_get_tag flink) || exit 1 FLINK_TAG=$(build_and_get_tag flink) || exit 1
docker tag "dexorder/ai-flink:$FLINK_TAG" "dexorder/flink:$FLINK_TAG"
fi fi
# Build gateway (Node.js application) # Build gateway (Node.js application)
@@ -165,10 +162,10 @@ rebuild_images() {
WEB_TAG=$(build_and_get_tag web) || exit 1 WEB_TAG=$(build_and_get_tag web) || exit 1
fi fi
# Build client-py (Python client library) # Build sandbox (Python client library)
if [ "$service" == "all" ] || [ "$service" == "client-py" ]; then if [ "$service" == "all" ] || [ "$service" == "sandbox" ]; then
echo -e "${GREEN}→${NC} Building client-py..." echo -e "${GREEN}→${NC} Building sandbox..."
CLIENT_PY_TAG=$(build_and_get_tag client-py) || exit 1 SANDBOX_TAG=$(build_and_get_tag sandbox) || exit 1
fi fi
# Save the tags for deployment (all services, preserving any we didn't rebuild) # Save the tags for deployment (all services, preserving any we didn't rebuild)
@@ -178,9 +175,9 @@ rebuild_images() {
echo "GATEWAY_TAG=$GATEWAY_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "GATEWAY_TAG=$GATEWAY_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo "SIDECAR_TAG=$SIDECAR_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "SIDECAR_TAG=$SIDECAR_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo "WEB_TAG=$WEB_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "WEB_TAG=$WEB_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo "CLIENT_PY_TAG=$CLIENT_PY_TAG" >> "$ROOT_DIR/.dev-image-tag" echo "SANDBOX_TAG=$SANDBOX_TAG" >> "$ROOT_DIR/.dev-image-tag"
echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG, client-py=$CLIENT_PY_TAG${NC}" echo -e "${GREEN}✓ Images built: relay=$RELAY_TAG, ingestor=$INGEST_TAG, flink=$FLINK_TAG, gateway=$GATEWAY_TAG, sidecar=$SIDECAR_TAG, web=$WEB_TAG, sandbox=$SANDBOX_TAG${NC}"
} }
deploy_services() { deploy_services() {
@@ -219,6 +216,11 @@ deploy_services() {
# Update configs # Update configs
echo -e "${GREEN}→${NC} Updating configs..." echo -e "${GREEN}→${NC} Updating configs..."
# Template the gateway-config.yaml with actual image tags
sed -i "s/SANDBOX_TAG_PLACEHOLDER/$SANDBOX_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s/SIDECAR_TAG_PLACEHOLDER/$SIDECAR_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
"$SCRIPT_DIR/config-update" dev "$SCRIPT_DIR/config-update" dev
# Create a temporary kustomization overlay with image tags # Create a temporary kustomization overlay with image tags
@@ -227,25 +229,39 @@ deploy_services() {
# Image tags (added by bin/dev) # Image tags (added by bin/dev)
images: images:
- name: dexorder/relay - name: dexorder/ai-relay
newTag: $RELAY_TAG newTag: $RELAY_TAG
- name: dexorder/ingestor - name: dexorder/ai-ingestor
newTag: $INGEST_TAG newTag: $INGEST_TAG
- name: dexorder/flink - name: dexorder/ai-flink
newTag: $FLINK_TAG newTag: $FLINK_TAG
- name: dexorder/gateway - name: dexorder/ai-gateway
newTag: $GATEWAY_TAG newTag: $GATEWAY_TAG
- name: dexorder/ai-web - name: dexorder/ai-web
newTag: $WEB_TAG newTag: $WEB_TAG
- name: dexorder/ai-sandbox
newTag: $SANDBOX_TAG
- name: dexorder/ai-lifecycle-sidecar
newTag: $SIDECAR_TAG
EOF EOF
# Apply kustomize # Apply kustomize
echo -e "${GREEN}→${NC} Applying Kubernetes manifests..." echo -e "${GREEN}→${NC} Applying Kubernetes manifests..."
kubectl apply -k . kubectl apply -k .
# Apply sandbox-namespace secrets (must be after kustomize creates the dexorder-sandboxes namespace)
echo -e "${GREEN}→${NC} Applying sandbox secrets..."
if [ -f "$ROOT_DIR/deploy/k8s/dev/secrets/sandbox-secrets.yaml" ]; then
kubectl apply -f "$ROOT_DIR/deploy/k8s/dev/secrets/sandbox-secrets.yaml"
fi
# Clean up the appended image tags from kustomization.yaml # Clean up the appended image tags from kustomization.yaml
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
# Restore gateway-config.yaml placeholders
sed -i "s/$SANDBOX_TAG/SANDBOX_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s/$SIDECAR_TAG/SIDECAR_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
echo -e "${GREEN}✓ Services deployed${NC}" echo -e "${GREEN}✓ Services deployed${NC}"
echo "" echo ""
@@ -389,21 +405,15 @@ create_dev_user() {
# Create/update license for the user # Create/update license for the user
echo -e "${GREEN}→${NC} Creating $LICENSE_TYPE license for dev user..." echo -e "${GREEN}→${NC} Creating $LICENSE_TYPE license for dev user..."
kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c " kubectl exec "$pg_pod" -- psql -U postgres -d iceberg -c "
INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url, features, resource_limits, preferred_model) INSERT INTO user_licenses (user_id, email, license, mcp_server_url)
VALUES ( VALUES (
'$user_id', '$user_id',
'$DEV_EMAIL', '$DEV_EMAIL',
'$LICENSE_TYPE', '{\"licenseType\":\"$LICENSE_TYPE\",\"features\":{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true},\"resourceLimits\":{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60},\"k8sResources\":{\"memoryRequest\":\"512Mi\",\"memoryLimit\":\"2Gi\",\"cpuRequest\":\"250m\",\"cpuLimit\":\"2000m\",\"storage\":\"10Gi\",\"tmpSizeLimit\":\"256Mi\",\"enableIdleShutdown\":true,\"idleTimeoutMinutes\":60},\"preferredModel\":{\"provider\":\"anthropic\",\"model\":\"claude-sonnet-4-6\",\"temperature\":0.7}}',
'http://localhost:8080/mcp', 'http://localhost:8080/mcp'
'{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true}',
'{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60}',
'{\"provider\":\"anthropic\",\"model\":\"claude-sonnet-4-6\",\"temperature\":0.7}'
) )
ON CONFLICT (user_id) DO UPDATE SET ON CONFLICT (user_id) DO UPDATE SET
license_type = EXCLUDED.license_type, license = EXCLUDED.license,
features = EXCLUDED.features,
resource_limits = EXCLUDED.resource_limits,
preferred_model = EXCLUDED.preferred_model,
updated_at = NOW(); updated_at = NOW();
" > /dev/null 2>&1 " > /dev/null 2>&1
echo -e "${GREEN}✓ Dev user ready ($DEV_EMAIL / $DEV_PASSWORD)${NC}" echo -e "${GREEN}✓ Dev user ready ($DEV_EMAIL / $DEV_PASSWORD)${NC}"
@@ -595,21 +605,52 @@ deploy_service() {
# This ensures all patches (including imagePullPolicy) are properly applied # This ensures all patches (including imagePullPolicy) are properly applied
cd "$ROOT_DIR/deploy/k8s/dev" cd "$ROOT_DIR/deploy/k8s/dev"
# Create a temporary kustomization overlay with image tags # Map service names to image names and tags
local image_name=""
local image_tag=""
case "$service" in
relay)
image_name="dexorder/ai-relay"
image_tag="$RELAY_TAG"
;;
ingestor)
image_name="dexorder/ai-ingestor"
image_tag="$INGEST_TAG"
;;
flink)
image_name="dexorder/ai-flink"
image_tag="$FLINK_TAG"
;;
gateway)
image_name="dexorder/ai-gateway"
image_tag="$GATEWAY_TAG"
# Also need to template gateway-config.yaml
sed -i "s/SANDBOX_TAG_PLACEHOLDER/$SANDBOX_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s/SIDECAR_TAG_PLACEHOLDER/$SIDECAR_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
"$SCRIPT_DIR/config-update" dev
;;
web)
image_name="dexorder/ai-web"
image_tag="$WEB_TAG"
;;
lifecycle-sidecar|sidecar)
image_name="dexorder/ai-lifecycle-sidecar"
image_tag="$SIDECAR_TAG"
;;
*)
echo -e "${RED}Error: Unknown service '$service'${NC}"
return 1
;;
esac
# Create a temporary kustomization overlay with ONLY this service's image tag
cat >> kustomization.yaml <<EOF cat >> kustomization.yaml <<EOF
# Image tags (added by bin/dev) # Image tags (added by bin/dev)
images: images:
- name: dexorder/relay - name: $image_name
newTag: $RELAY_TAG newTag: $image_tag
- name: dexorder/ingestor
newTag: $INGEST_TAG
- name: dexorder/flink
newTag: $FLINK_TAG
- name: dexorder/gateway
newTag: $GATEWAY_TAG
- name: dexorder/ai-web
newTag: $WEB_TAG
EOF EOF
kubectl apply -k . kubectl apply -k .
@@ -617,6 +658,12 @@ EOF
# Clean up the appended image tags from kustomization.yaml # Clean up the appended image tags from kustomization.yaml
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
# Restore gateway-config.yaml placeholders if we modified it
if [ "$service" == "gateway" ]; then
sed -i "s/$SANDBOX_TAG/SANDBOX_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s/$SIDECAR_TAG/SIDECAR_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
fi
echo -e "${GREEN}✓ $service deployed${NC}" echo -e "${GREEN}✓ $service deployed${NC}"
} }
@@ -649,9 +696,9 @@ case "$COMMAND" in
kubectl wait --for=delete pod -l app=qdrant --timeout=60s 2>/dev/null || true kubectl wait --for=delete pod -l app=qdrant --timeout=60s 2>/dev/null || true
# Now delete PVCs # Now delete PVCs
delete_pvcs all delete_pvcs all
# Delete dexorder-agents namespace # Delete dexorder-sandboxes namespace
echo -e "${GREEN}→${NC} Deleting dexorder-agents namespace..." echo -e "${GREEN}→${NC} Deleting dexorder-sandboxes namespace..."
kubectl delete namespace dexorder-agents 2>/dev/null || true kubectl delete namespace dexorder-sandboxes 2>/dev/null || true
minikube stop minikube stop
echo -e "${GREEN}✓ Minikube stopped and PVCs deleted${NC}" echo -e "${GREEN}✓ Minikube stopped and PVCs deleted${NC}"
echo -e "${YELLOW}Tip: Use 'bin/dev stop --keep-data' to preserve PVCs${NC}" echo -e "${YELLOW}Tip: Use 'bin/dev stop --keep-data' to preserve PVCs${NC}"
@@ -667,7 +714,15 @@ case "$COMMAND" in
# Multiple services specified # Multiple services specified
for service in "$@"; do for service in "$@"; do
rebuild_images "$service" rebuild_images "$service"
deploy_service "$service"
# Special handling for sandbox: delete sandbox deployments instead of applying kustomization
if [ "$service" == "sandbox" ]; then
echo -e "${GREEN}→${NC} Deleting user container deployments in dexorder-sandboxes namespace..."
kubectl delete deployments --all -n dexorder-sandboxes 2>/dev/null || true
echo -e "${GREEN}✓ User containers will be recreated by gateway on next login${NC}"
else
deploy_service "$service"
fi
done done
fi fi
;; ;;

View File

@@ -14,7 +14,7 @@ NC='\033[0m' # No Color
usage() { usage() {
echo "Usage: $0 [COMMAND]" echo "Usage: $0 [COMMAND]"
echo "" echo ""
echo "Test client-py against the development environment" echo "Test sandbox against the development environment"
echo "" echo ""
echo "Commands:" echo "Commands:"
echo " ohlc Test OHLCClient API (default)" echo " ohlc Test OHLCClient API (default)"
@@ -107,10 +107,10 @@ run_ohlc_test() {
cd "$ROOT_DIR" cd "$ROOT_DIR"
# Install client-py in development mode # Install sandbox in development mode
pip install -e client-py >/dev/null 2>&1 || { pip install -e sandbox >/dev/null 2>&1 || {
echo -e "${YELLOW}Installing client-py dependencies...${NC}" echo -e "${YELLOW}Installing sandbox dependencies...${NC}"
pip install -e client-py pip install -e sandbox
} }
# Run the test # Run the test
@@ -123,10 +123,10 @@ run_history_test() {
cd "$ROOT_DIR" cd "$ROOT_DIR"
# Install client-py in development mode # Install sandbox in development mode
pip install -e client-py >/dev/null 2>&1 || { pip install -e sandbox >/dev/null 2>&1 || {
echo -e "${YELLOW}Installing client-py dependencies...${NC}" echo -e "${YELLOW}Installing sandbox dependencies...${NC}"
pip install -e client-py pip install -e sandbox
} }
# Run the low-level test # Run the low-level test
@@ -139,10 +139,10 @@ open_shell() {
cd "$ROOT_DIR" cd "$ROOT_DIR"
# Install client-py in development mode # Install sandbox in development mode
pip install -e client-py >/dev/null 2>&1 || { pip install -e sandbox >/dev/null 2>&1 || {
echo -e "${YELLOW}Installing client-py dependencies...${NC}" echo -e "${YELLOW}Installing sandbox dependencies...${NC}"
pip install -e client-py pip install -e sandbox
} }
echo -e "${BLUE}Example usage:${NC}" echo -e "${BLUE}Example usage:${NC}"
@@ -156,7 +156,7 @@ open_shell() {
python3 -i -c " python3 -i -c "
import sys import sys
import os import os
sys.path.insert(0, os.path.join(os.getcwd(), 'client-py')) sys.path.insert(0, os.path.join(os.getcwd(), 'sandbox'))
from dexorder import OHLCClient, HistoryClient, IcebergClient from dexorder import OHLCClient, HistoryClient, IcebergClient
import asyncio import asyncio
print('✓ dexorder package imported') print('✓ dexorder package imported')

View File

@@ -94,6 +94,7 @@ else
"ingestor-secrets" "ingestor-secrets"
"flink-secrets" "flink-secrets"
"gateway-secrets" "gateway-secrets"
"sandbox-secrets"
) )
FAILED=0 FAILED=0

View File

@@ -1,40 +0,0 @@
import logging
from matplotlib import pyplot as plt
import pandas as pd
from abc import abstractmethod, ABC
log = logging.getLogger(__name__)
class ChartingAPI(ABC):
"""
User-facing pyplot charts. Start a Figure with plot_ohlc() or gca(), continue plotting indicators and other
time-series using plot_indicator(), add any ad-hoc axes you need, then call show() to send an image to the user.
"""
@abstractmethod
def plot_ohlc(self, ohlc: pd.DataFrame, axes: plt.Axes = None, **plot_args) -> plt.Figure:
"""
Plots a standard OHLC candlestick chart in the user's preferred style. Use this to overlay any price-series data
or to have a chart for reference above a time-series indicator or other value.
"""
@abstractmethod
def plot_indicator(self, indicator: pd.DataFrame, domain: tuple[float, float] = None, axes: plt.Axes = None,
**plot_args) -> None:
"""
Plots an indicator in the user's standard style. If axes is None then new axes will be created at the bottom
of the current figure.
:param indicator:
:param domain: The minimum and maximum possible values of the indicator. If None, the domain will be inferred from the data
"""
@abstractmethod
def gca(self) -> plt.Figure:
"""
Returns a generic pyplot gca() pre-configured with the user's preferred styling. Calling show() will
send the chart image to the user.
Use this only if it doesn't make sense to have a candlestick chart shown anywhere in the figure. Otherwise
for most indicators, price series, and other time-series values, it's better to start with plot_ohlc() to
at least give the user a chart for reference, even if the primary data you want to show has separate axes.
"""

View File

@@ -1,4 +1,4 @@
# ValidatingAdmissionPolicy to restrict images in dexorder-agents namespace # ValidatingAdmissionPolicy to restrict images in dexorder-sandboxes namespace
# Requires Kubernetes 1.30+ (or 1.28+ with feature gate) # Requires Kubernetes 1.30+ (or 1.28+ with feature gate)
# This is the critical security control that prevents arbitrary image execution # This is the critical security control that prevents arbitrary image execution
# even if the gateway is compromised. # even if the gateway is compromised.
@@ -6,25 +6,28 @@
apiVersion: admissionregistration.k8s.io/v1 apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingAdmissionPolicy kind: ValidatingAdmissionPolicy
metadata: metadata:
name: dexorder-agent-image-policy name: dexorder-sandbox-image-policy
spec: spec:
failurePolicy: Fail failurePolicy: Fail
matchConstraints: matchConstraints:
namespaceSelector: namespaceSelector:
matchLabels: matchLabels:
dexorder.io/type: agents dexorder.io/type: sandboxes
resourceRules: resourceRules:
- apiGroups: ["apps"] - apiGroups: ["apps"]
apiVersions: ["v1"] apiVersions: ["v1"]
resources: ["deployments"] resources: ["deployments"]
operations: ["CREATE", "UPDATE"] operations: ["CREATE", "UPDATE"]
validations: validations:
# Only allow images from our approved registry with agent prefix # Only allow images from our approved registry with sandbox prefix
- expression: | - expression: |
object.spec.template.spec.containers.all(c, object.spec.template.spec.containers.all(c,
c.image.startsWith('ghcr.io/dexorder/agent:') || c.image.startsWith('ghcr.io/dexorder/sandbox:') ||
c.image.startsWith('ghcr.io/dexorder/agent-')) c.image.startsWith('ghcr.io/dexorder/sandbox-') ||
message: "Only approved dexorder agent images are allowed in the agents namespace" c.image.startsWith('ghcr.io/dexorder/lifecycle-sidecar:') ||
c.image.startsWith('dexorder/ai-sandbox:') ||
c.image.startsWith('dexorder/ai-lifecycle-sidecar:'))
message: "Only approved dexorder sandbox images are allowed in the sandboxes namespace"
reason: Forbidden reason: Forbidden
# No privileged containers # No privileged containers
@@ -99,12 +102,12 @@ spec:
apiVersion: admissionregistration.k8s.io/v1 apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingAdmissionPolicyBinding kind: ValidatingAdmissionPolicyBinding
metadata: metadata:
name: dexorder-agent-image-policy-binding name: dexorder-sandbox-image-policy-binding
spec: spec:
policyName: dexorder-agent-image-policy policyName: dexorder-sandbox-image-policy
validationActions: validationActions:
- Deny - Deny
matchResources: matchResources:
namespaceSelector: namespaceSelector:
matchLabels: matchLabels:
dexorder.io/type: agents dexorder.io/type: sandboxes

View File

@@ -1,6 +1,6 @@
# RBAC for gateway to CREATE agent deployments only # RBAC for gateway to CREATE sandbox deployments only
# Principle of least privilege: gateway can ONLY create deployments/services/PVCs # Principle of least privilege: gateway can ONLY create deployments/services/PVCs
# in the dexorder-agents namespace. Deletion is handled by the lifecycle sidecar. # in the dexorder-sandboxes namespace. Deletion is handled by the lifecycle sidecar.
# No pods, secrets, exec, or cross-namespace access. # No pods, secrets, exec, or cross-namespace access.
--- ---
apiVersion: v1 apiVersion: v1
@@ -8,12 +8,12 @@ kind: ServiceAccount
metadata: metadata:
name: gateway name: gateway
--- ---
# Role scoped to dexorder-agents namespace only # Role scoped to dexorder-sandboxes namespace only
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
metadata: metadata:
name: agent-creator name: sandbox-creator
namespace: dexorder-agents namespace: dexorder-sandboxes
rules: rules:
# Deployments: create and read only (deletion handled by sidecar) # Deployments: create and read only (deletion handled by sidecar)
- apiGroups: ["apps"] - apiGroups: ["apps"]
@@ -25,7 +25,7 @@ rules:
resources: ["persistentvolumeclaims"] resources: ["persistentvolumeclaims"]
verbs: ["create", "get", "list", "watch"] verbs: ["create", "get", "list", "watch"]
# Services: create and manage agent MCP endpoints # Services: create and manage sandbox MCP endpoints
- apiGroups: [""] - apiGroups: [""]
resources: ["services"] resources: ["services"]
verbs: ["create", "get", "list", "watch", "patch", "update"] verbs: ["create", "get", "list", "watch", "patch", "update"]
@@ -52,13 +52,13 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
name: gateway-agent-creator name: gateway-sandbox-creator
namespace: dexorder-agents namespace: dexorder-sandboxes
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: gateway name: gateway
namespace: default namespace: default
roleRef: roleRef:
kind: Role kind: Role
name: agent-creator name: sandbox-creator
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io

View File

@@ -43,6 +43,9 @@ spec:
- name: wait-for-qdrant - name: wait-for-qdrant
image: busybox:1.36 image: busybox:1.36
command: ['sh', '-c', 'until nc -z qdrant 6333; do echo waiting for qdrant; sleep 2; done;'] command: ['sh', '-c', 'until nc -z qdrant 6333; do echo waiting for qdrant; sleep 2; done;']
- name: wait-for-iceberg-catalog
image: busybox:1.36
command: ['sh', '-c', 'until nc -z iceberg-catalog 8181; do echo waiting for iceberg-catalog; sleep 2; done;']
volumes: volumes:
- name: config - name: config

View File

@@ -6,21 +6,21 @@ resources:
- init.yaml - init.yaml
# Namespace definitions with PodSecurity labels # Namespace definitions with PodSecurity labels
- namespaces.yaml - namespaces.yaml
# RBAC for gateway to create agents (creation only) # RBAC for gateway to create sandboxes (creation only)
- gateway-rbac.yaml - gateway-rbac.yaml
# RBAC for lifecycle sidecar (self-deletion) # RBAC for lifecycle sidecar (self-deletion)
- lifecycle-sidecar-rbac.yaml - lifecycle-sidecar-rbac.yaml
# Admission policies (image restriction, security requirements) # Admission policies (image restriction, security requirements)
- admission-policy.yaml - admission-policy.yaml
# Resource quotas and limits for agents namespace # Resource quotas and limits for sandboxes namespace
- agent-quotas.yaml - sandbox-quotas.yaml
# Network isolation policies # Network isolation policies
- network-policies.yaml - network-policies.yaml
# Gateway service # Gateway service
- gateway.yaml - gateway.yaml
- gateway-ingress.yaml - gateway-ingress.yaml
# Example agent deployment (for reference, not applied by default) # Example sandbox deployment (for reference, not applied by default)
# - agent-deployment-example.yaml # - sandbox-deployment-example.yaml
# Services # Services
- web.yaml - web.yaml
- ingress.yaml - ingress.yaml

View File

@@ -1,30 +1,30 @@
# RBAC for lifecycle sidecar - allows self-deletion only # RBAC for lifecycle sidecar - allows self-deletion only
# Each agent pod gets this ServiceAccount and can only delete its own deployment # Each sandbox pod gets this ServiceAccount and can only delete its own deployment
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: agent-lifecycle name: sandbox-lifecycle
namespace: dexorder-agents namespace: dexorder-sandboxes
--- ---
# Role allowing deletion of deployments and PVCs # Role allowing deletion of deployments and PVCs
# This is scoped to the dexorder-agents namespace # This is scoped to the dexorder-sandboxes namespace
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
metadata: metadata:
name: agent-self-delete name: sandbox-self-delete
namespace: dexorder-agents namespace: dexorder-sandboxes
rules: rules:
# Allow getting and deleting deployments # Allow getting and deleting deployments
- apiGroups: ["apps"] - apiGroups: ["apps"]
resources: ["deployments"] resources: ["deployments"]
verbs: ["get", "delete"] verbs: ["get", "delete"]
# Allow getting and deleting PVCs (for anonymous users) # Allow getting and deleting PVCs (for anonymous users)
- apiGroups: [""] - apiGroups: [""]
resources: ["persistentvolumeclaims"] resources: ["persistentvolumeclaims"]
verbs: ["get", "delete"] verbs: ["get", "delete"]
# Read-only access to pods (for status checking) # Read-only access to pods (for status checking)
- apiGroups: [""] - apiGroups: [""]
resources: ["pods"] resources: ["pods"]
@@ -33,15 +33,15 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
name: agent-self-delete name: sandbox-self-delete
namespace: dexorder-agents namespace: dexorder-sandboxes
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: agent-lifecycle name: sandbox-lifecycle
namespace: dexorder-agents namespace: dexorder-sandboxes
roleRef: roleRef:
kind: Role kind: Role
name: agent-self-delete name: sandbox-self-delete
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
--- ---
# Additional security: ValidatingWebhookConfiguration to restrict deletion # Additional security: ValidatingWebhookConfiguration to restrict deletion
@@ -49,5 +49,5 @@ roleRef:
# Requires a validating webhook server (can be added later) # Requires a validating webhook server (can be added later)
# For now, we rely on: # For now, we rely on:
# 1. Sidecar only knowing its own deployment name (from env) # 1. Sidecar only knowing its own deployment name (from env)
# 2. RBAC limiting to dexorder-agents namespace # 2. RBAC limiting to dexorder-sandboxes namespace
# 3. Admission policy restricting deployment creation (already defined) # 3. Admission policy restricting deployment creation (already defined)

View File

@@ -1,14 +1,14 @@
# Namespace definitions for dexorder AI platform # Namespace definitions for dexorder AI platform
# - default: gateway, web, and infrastructure services # - default: gateway, web, and infrastructure services
# - dexorder-agents: user agent containers (isolated, restricted) # - dexorder-sandboxes: per-user sandbox containers (isolated, restricted)
--- ---
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: dexorder-agents name: dexorder-sandboxes
labels: labels:
app.kubernetes.io/part-of: dexorder app.kubernetes.io/part-of: dexorder
dexorder.io/type: agents dexorder.io/type: sandboxes
# Enforce restricted pod security standards # Enforce restricted pod security standards
pod-security.kubernetes.io/enforce: restricted pod-security.kubernetes.io/enforce: restricted
pod-security.kubernetes.io/enforce-version: latest pod-security.kubernetes.io/enforce-version: latest

View File

@@ -1,29 +1,29 @@
# Network policies for agent isolation # Network policies for sandbox isolation
# Agents can only communicate with specific services, not with each other # Sandboxes can only communicate with specific services, not with each other
# or with the Kubernetes API # or with the Kubernetes API
--- ---
# Default deny all ingress and egress in agents namespace # Default deny all ingress and egress in sandboxes namespace
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: NetworkPolicy kind: NetworkPolicy
metadata: metadata:
name: default-deny-all name: default-deny-all
namespace: dexorder-agents namespace: dexorder-sandboxes
spec: spec:
podSelector: {} podSelector: {}
policyTypes: policyTypes:
- Ingress - Ingress
- Egress - Egress
--- ---
# Allow agents to receive connections from gateway (MCP) # Allow sandboxes to receive connections from gateway (MCP)
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: NetworkPolicy kind: NetworkPolicy
metadata: metadata:
name: allow-gateway-ingress name: allow-gateway-ingress
namespace: dexorder-agents namespace: dexorder-sandboxes
spec: spec:
podSelector: podSelector:
matchLabels: matchLabels:
dexorder.io/component: agent dexorder.io/component: sandbox
policyTypes: policyTypes:
- Ingress - Ingress
ingress: ingress:
@@ -37,16 +37,16 @@ spec:
- protocol: TCP - protocol: TCP
port: 5555 # ZeroMQ control channel port: 5555 # ZeroMQ control channel
--- ---
# Allow agents to connect to required services # Allow sandboxes to connect to required services
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: NetworkPolicy kind: NetworkPolicy
metadata: metadata:
name: allow-agent-egress name: allow-sandbox-egress
namespace: dexorder-agents namespace: dexorder-sandboxes
spec: spec:
podSelector: podSelector:
matchLabels: matchLabels:
dexorder.io/component: agent dexorder.io/component: sandbox
policyTypes: policyTypes:
- Egress - Egress
egress: egress:
@@ -93,11 +93,11 @@ spec:
- protocol: TCP - protocol: TCP
port: 443 port: 443
--- ---
# Default namespace: allow ingress from agents to gateway # Default namespace: allow ingress from sandboxes to gateway
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: NetworkPolicy kind: NetworkPolicy
metadata: metadata:
name: allow-agent-callbacks name: allow-sandbox-callbacks
spec: spec:
podSelector: podSelector:
matchLabels: matchLabels:
@@ -108,7 +108,7 @@ spec:
- from: - from:
- namespaceSelector: - namespaceSelector:
matchLabels: matchLabels:
dexorder.io/type: agents dexorder.io/type: sandboxes
ports: ports:
- protocol: TCP - protocol: TCP
port: 3000 port: 3000

View File

@@ -1,17 +1,17 @@
# Example agent deployment with lifecycle sidecar # Example sandbox deployment with lifecycle sidecar
# This would be created by the gateway for each user # This would be created by the gateway for each user
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: agent-user-abc123 name: sandbox-user-abc123
namespace: dexorder-agents namespace: dexorder-sandboxes
labels: labels:
app.kubernetes.io/name: agent app.kubernetes.io/name: sandbox
app.kubernetes.io/component: user-agent app.kubernetes.io/component: user-sandbox
dexorder.io/component: agent dexorder.io/component: sandbox
dexorder.io/user-id: user-abc123 dexorder.io/user-id: user-abc123
dexorder.io/deployment: agent-user-abc123 dexorder.io/deployment: sandbox-user-abc123
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@@ -20,15 +20,15 @@ spec:
template: template:
metadata: metadata:
labels: labels:
dexorder.io/component: agent dexorder.io/component: sandbox
dexorder.io/user-id: user-abc123 dexorder.io/user-id: user-abc123
dexorder.io/deployment: agent-user-abc123 dexorder.io/deployment: sandbox-user-abc123
spec: spec:
serviceAccountName: agent-lifecycle serviceAccountName: sandbox-lifecycle
# Share PID namespace so sidecar can monitor main container # Share PID namespace so sidecar can monitor main container
shareProcessNamespace: true shareProcessNamespace: true
# Security context # Security context
securityContext: securityContext:
runAsNonRoot: true runAsNonRoot: true
@@ -36,13 +36,13 @@ spec:
fsGroup: 1000 fsGroup: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
containers: containers:
# Main agent container # Main sandbox container
- name: agent - name: sandbox
image: ghcr.io/dexorder/agent:latest image: ghcr.io/dexorder/sandbox:latest
imagePullPolicy: Always imagePullPolicy: Always
# Security context (required by admission policy) # Security context (required by admission policy)
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
@@ -52,7 +52,7 @@ spec:
capabilities: capabilities:
drop: drop:
- ALL - ALL
# Resource limits (required by admission policy) # Resource limits (required by admission policy)
resources: resources:
requests: requests:
@@ -61,7 +61,7 @@ spec:
limits: limits:
memory: "1Gi" memory: "1Gi"
cpu: "1000m" cpu: "1000m"
# Environment variables # Environment variables
env: env:
- name: USER_ID - name: USER_ID
@@ -76,7 +76,7 @@ spec:
value: "3000" value: "3000"
- name: ZMQ_CONTROL_PORT - name: ZMQ_CONTROL_PORT
value: "5555" value: "5555"
# Ports # Ports
ports: ports:
- name: mcp - name: mcp
@@ -85,17 +85,17 @@ spec:
- name: zmq-control - name: zmq-control
containerPort: 5555 containerPort: 5555
protocol: TCP protocol: TCP
# Volume mounts # Volume mounts
volumeMounts: volumeMounts:
- name: agent-data - name: sandbox-data
mountPath: /app/data mountPath: /app/data
- name: tmp - name: tmp
mountPath: /tmp mountPath: /tmp
- name: shared-run - name: shared-run
mountPath: /var/run/agent mountPath: /var/run/sandbox
# Liveness probe (agent's MCP server) # Liveness probe (sandbox's MCP server)
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /health path: /health
@@ -103,7 +103,7 @@ spec:
initialDelaySeconds: 10 initialDelaySeconds: 10
periodSeconds: 30 periodSeconds: 30
timeoutSeconds: 5 timeoutSeconds: 5
# Readiness probe # Readiness probe
readinessProbe: readinessProbe:
httpGet: httpGet:
@@ -111,12 +111,12 @@ spec:
port: mcp port: mcp
initialDelaySeconds: 5 initialDelaySeconds: 5
periodSeconds: 10 periodSeconds: 10
# Lifecycle sidecar # Lifecycle sidecar
- name: lifecycle-sidecar - name: lifecycle-sidecar
image: ghcr.io/dexorder/lifecycle-sidecar:latest image: ghcr.io/dexorder/lifecycle-sidecar:latest
imagePullPolicy: Always imagePullPolicy: Always
# Security context # Security context
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
@@ -126,7 +126,7 @@ spec:
capabilities: capabilities:
drop: drop:
- ALL - ALL
# Resource limits # Resource limits
resources: resources:
requests: requests:
@@ -135,7 +135,7 @@ spec:
limits: limits:
memory: "64Mi" memory: "64Mi"
cpu: "50m" cpu: "50m"
# Environment variables (injected via downward API) # Environment variables (injected via downward API)
env: env:
- name: NAMESPACE - name: NAMESPACE
@@ -150,44 +150,44 @@ spec:
value: "free" # Gateway sets this based on license value: "free" # Gateway sets this based on license
- name: MAIN_CONTAINER_PID - name: MAIN_CONTAINER_PID
value: "1" # In shared PID namespace, main container is typically PID 1 value: "1" # In shared PID namespace, main container is typically PID 1
# Volume mounts # Volume mounts
volumeMounts: volumeMounts:
- name: shared-run - name: shared-run
mountPath: /var/run/agent mountPath: /var/run/sandbox
readOnly: true readOnly: true
# Volumes # Volumes
volumes: volumes:
# Persistent data (user files, state) # Persistent data (user files, state)
- name: agent-data - name: sandbox-data
persistentVolumeClaim: persistentVolumeClaim:
claimName: agent-user-abc123-data claimName: sandbox-user-abc123-data
# Temporary writable filesystem (read-only rootfs) # Temporary writable filesystem (read-only rootfs)
- name: tmp - name: tmp
emptyDir: emptyDir:
medium: Memory medium: Memory
sizeLimit: 128Mi sizeLimit: 128Mi
# Shared between main container and sidecar # Shared between main container and sidecar
- name: shared-run - name: shared-run
emptyDir: emptyDir:
medium: Memory medium: Memory
sizeLimit: 1Mi sizeLimit: 1Mi
# Restart policy # Restart policy
restartPolicy: Always restartPolicy: Always
# Termination grace period # Termination grace period
terminationGracePeriodSeconds: 30 terminationGracePeriodSeconds: 30
--- ---
# PVC for agent persistent data # PVC for sandbox persistent data
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: agent-user-abc123-data name: sandbox-user-abc123-data
namespace: dexorder-agents namespace: dexorder-sandboxes
labels: labels:
dexorder.io/user-id: user-abc123 dexorder.io/user-id: user-abc123
spec: spec:
@@ -198,12 +198,12 @@ spec:
storage: 1Gi storage: 1Gi
storageClassName: standard # Or your preferred storage class storageClassName: standard # Or your preferred storage class
--- ---
# Service to expose agent MCP endpoint # Service to expose sandbox MCP endpoint
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: agent-user-abc123 name: sandbox-user-abc123
namespace: dexorder-agents namespace: dexorder-sandboxes
labels: labels:
dexorder.io/user-id: user-abc123 dexorder.io/user-id: user-abc123
spec: spec:

View File

@@ -1,12 +1,12 @@
# Resource constraints for the dexorder-agents namespace # Resource constraints for the dexorder-sandboxes namespace
# These limits apply regardless of what the gateway requests # These limits apply regardless of what the gateway requests
--- ---
# LimitRange: per-container defaults and maximums # LimitRange: per-container defaults and maximums
apiVersion: v1 apiVersion: v1
kind: LimitRange kind: LimitRange
metadata: metadata:
name: agent-limits name: sandbox-limits
namespace: dexorder-agents namespace: dexorder-sandboxes
spec: spec:
limits: limits:
# Default limits applied if deployment doesn't specify # Default limits applied if deployment doesn't specify
@@ -36,11 +36,11 @@ spec:
apiVersion: v1 apiVersion: v1
kind: ResourceQuota kind: ResourceQuota
metadata: metadata:
name: agent-quota name: sandbox-quota
namespace: dexorder-agents namespace: dexorder-sandboxes
spec: spec:
hard: hard:
# Total compute limits for all agents combined # Total compute limits for all sandboxes combined
requests.cpu: "20" requests.cpu: "20"
requests.memory: "40Gi" requests.memory: "40Gi"
limits.cpu: "40" limits.cpu: "40"

View File

@@ -4,13 +4,13 @@
apiVersion: admissionregistration.k8s.io/v1 apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingAdmissionPolicy kind: ValidatingAdmissionPolicy
metadata: metadata:
name: dexorder-agent-image-policy name: dexorder-sandbox-image-policy
spec: spec:
failurePolicy: Fail failurePolicy: Fail
matchConstraints: matchConstraints:
namespaceSelector: namespaceSelector:
matchLabels: matchLabels:
dexorder.io/type: agents dexorder.io/type: sandboxes
resourceRules: resourceRules:
- apiGroups: ["apps"] - apiGroups: ["apps"]
apiVersions: ["v1"] apiVersions: ["v1"]
@@ -20,13 +20,13 @@ spec:
# Allow local dev images in addition to production registry # Allow local dev images in addition to production registry
- expression: | - expression: |
object.spec.template.spec.containers.all(c, object.spec.template.spec.containers.all(c,
c.image.startsWith('ghcr.io/dexorder/agent:') || c.image.startsWith('ghcr.io/dexorder/sandbox:') ||
c.image.startsWith('ghcr.io/dexorder/agent-') || c.image.startsWith('ghcr.io/dexorder/sandbox-') ||
c.image.startsWith('localhost:5000/dexorder/agent') || c.image.startsWith('localhost:5000/dexorder/sandbox') ||
c.image.startsWith('dexorder/agent') || c.image.startsWith('dexorder/sandbox') ||
c.image.startsWith('dexorder/ai-client-py') || c.image.startsWith('dexorder/ai-sandbox') ||
c.image.startsWith('dexorder/ai-lifecycle-sidecar')) c.image.startsWith('dexorder/ai-lifecycle-sidecar'))
message: "Only approved dexorder agent images are allowed" message: "Only approved dexorder sandbox images are allowed"
reason: Forbidden reason: Forbidden
# No privileged containers # No privileged containers

View File

@@ -53,13 +53,18 @@ data:
# Kubernetes configuration # Kubernetes configuration
kubernetes: kubernetes:
namespace: dexorder-agents namespace: dexorder-sandboxes
in_cluster: true in_cluster: true
agent_image: dexorder/ai-client-py:latest sandbox_image: dexorder/ai-sandbox:SANDBOX_TAG_PLACEHOLDER
sidecar_image: dexorder/ai-lifecycle-sidecar:latest sidecar_image: dexorder/ai-lifecycle-sidecar:SIDECAR_TAG_PLACEHOLDER
storage_class: standard storage_class: standard
image_pull_policy: Never # For minikube dev - use local images image_pull_policy: Never # For minikube dev - use local images
# Agent configuration
agent:
# Number of prior conversation turns loaded as LLM context and flushed to Iceberg at session end
conversation_history_limit: 20
# DragonflyDB (Redis-compatible, for hot storage and session management) # DragonflyDB (Redis-compatible, for hot storage and session management)
redis: redis:
url: redis://dragonfly:6379 url: redis://dragonfly:6379
@@ -76,6 +81,7 @@ data:
ohlc_catalog_uri: http://iceberg-catalog:8181 ohlc_catalog_uri: http://iceberg-catalog:8181
ohlc_namespace: trading ohlc_namespace: trading
s3_endpoint: http://minio:9000 s3_endpoint: http://minio:9000
conversations_bucket: warehouse # S3 bucket for conversation Parquet cold storage
# Event router (ZeroMQ) # Event router (ZeroMQ)
events: events:

View File

@@ -8,7 +8,7 @@ spec:
spec: spec:
containers: containers:
- name: gateway - name: gateway
image: dexorder/ai-gateway:latest image: dexorder/ai-gateway
imagePullPolicy: Never imagePullPolicy: Never
env: env:
- name: NODE_OPTIONS - name: NODE_OPTIONS

View File

@@ -480,7 +480,7 @@ spec:
command: ['sh', '-c', 'until nc -z iceberg-catalog 8181; do echo waiting for iceberg-catalog; sleep 2; done;'] command: ['sh', '-c', 'until nc -z iceberg-catalog 8181; do echo waiting for iceberg-catalog; sleep 2; done;']
containers: containers:
- name: flink-jobmanager - name: flink-jobmanager
image: dexorder/flink:latest image: dexorder/ai-flink
imagePullPolicy: Never imagePullPolicy: Never
args: ["standalone-job", "--job-classname", "com.dexorder.flink.TradingFlinkApp"] args: ["standalone-job", "--job-classname", "com.dexorder.flink.TradingFlinkApp"]
ports: ports:
@@ -542,7 +542,7 @@ spec:
command: ['sh', '-c', 'until nc -z flink-jobmanager 6123; do echo waiting for jobmanager; sleep 2; done;'] command: ['sh', '-c', 'until nc -z flink-jobmanager 6123; do echo waiting for jobmanager; sleep 2; done;']
containers: containers:
- name: flink-taskmanager - name: flink-taskmanager
image: dexorder/flink:latest image: dexorder/ai-flink
imagePullPolicy: Never imagePullPolicy: Never
args: ["taskmanager"] args: ["taskmanager"]
env: env:
@@ -617,7 +617,7 @@ spec:
spec: spec:
containers: containers:
- name: relay - name: relay
image: dexorder/relay:latest image: dexorder/ai-relay
imagePullPolicy: Never imagePullPolicy: Never
ports: ports:
- containerPort: 5555 - containerPort: 5555
@@ -665,7 +665,7 @@ spec:
command: ['sh', '-c', 'until nc -z kafka 9092; do echo waiting for kafka; sleep 2; done;'] command: ['sh', '-c', 'until nc -z kafka 9092; do echo waiting for kafka; sleep 2; done;']
containers: containers:
- name: ingestor - name: ingestor
image: dexorder/ingestor:latest image: dexorder/ai-ingestor
imagePullPolicy: Never imagePullPolicy: Never
env: env:
- name: LOG_LEVEL - name: LOG_LEVEL

View File

@@ -8,12 +8,12 @@ resources:
- storage-class.yaml - storage-class.yaml
- configs/gateway-config.yaml - configs/gateway-config.yaml
- gateway-health-ingress.yaml - gateway-health-ingress.yaml
- agent-config.yaml # ConfigMap for agent pods in dexorder-agents namespace - sandbox-config.yaml # ConfigMap for sandbox pods in dexorder-sandboxes namespace
# Dev-specific patches # Dev-specific patches
patches: patches:
# Reduced resource quotas for minikube # Reduced resource quotas for minikube
- path: agent-quotas-patch.yaml - path: sandbox-quotas-patch.yaml
# Allow local registry images # Allow local registry images
- path: admission-policy-patch.yaml - path: admission-policy-patch.yaml
# Web environment variables for dev # Web environment variables for dev
@@ -155,6 +155,63 @@ generatorOptions:

View File

@@ -1,18 +1,18 @@
# Agent ConfigMap in dexorder-agents namespace # Sandbox ConfigMap in dexorder-sandboxes namespace
# This is mounted into dynamically created agent pods # This is mounted into dynamically created sandbox pods
--- ---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: agent-config name: sandbox-config
namespace: dexorder-agents namespace: dexorder-sandboxes
labels: labels:
app.kubernetes.io/name: agent app.kubernetes.io/name: sandbox
app.kubernetes.io/component: config app.kubernetes.io/component: config
data: data:
config.yaml: | config.yaml: |
# Default configuration for user agent containers # Default configuration for user sandbox containers
# This is mounted at /app/config/config.yaml in agent pods # This is mounted at /app/config/config.yaml in sandbox pods
# Data directory for persistent storage (workspace, strategies, etc.) # Data directory for persistent storage (workspace, strategies, etc.)
# This is mounted as a PVC at /app/data # This is mounted as a PVC at /app/data
@@ -26,10 +26,14 @@ data:
data: data:
iceberg: iceberg:
catalog_name: "dexorder" catalog_name: "dexorder"
# Catalog properties loaded from secrets catalog_uri: "http://iceberg-catalog.default.svc.cluster.local:8181"
namespace: "trading"
# S3 endpoint for MinIO in default namespace
s3_endpoint: "http://minio.default.svc.cluster.local:9000"
relay: relay:
endpoint: "tcp://relay.dexorder.svc.cluster.local:5560" endpoint: "tcp://relay.default.svc.cluster.local:5559"
notification_endpoint: "tcp://relay.default.svc.cluster.local:5558"
timeout_ms: 5000 timeout_ms: 5000
# Strategy settings # Strategy settings

View File

@@ -4,8 +4,8 @@
apiVersion: v1 apiVersion: v1
kind: ResourceQuota kind: ResourceQuota
metadata: metadata:
name: agent-quota name: sandbox-quota
namespace: dexorder-agents namespace: dexorder-sandboxes
spec: spec:
hard: hard:
# Reduced for minikube # Reduced for minikube

View File

@@ -8,7 +8,7 @@ spec:
spec: spec:
containers: containers:
- name: ai-web - name: ai-web
image: dexorder/ai-web:latest image: dexorder/ai-web
imagePullPolicy: Never imagePullPolicy: Never
env: env:
- name: VITE_GATEWAY_URL - name: VITE_GATEWAY_URL

View File

@@ -28,9 +28,9 @@ data:
# Kubernetes configuration # Kubernetes configuration
kubernetes: kubernetes:
namespace: dexorder-agents namespace: dexorder-sandboxes
in_cluster: true in_cluster: true
agent_image: dexorder/ai-client-py:latest sandbox_image: dexorder/ai-sandbox:latest
sidecar_image: dexorder/ai-lifecycle-sidecar:latest sidecar_image: dexorder/ai-lifecycle-sidecar:latest
storage_class: standard storage_class: standard
image_pull_policy: Always # For production - always pull from registry image_pull_policy: Always # For production - always pull from registry

View File

@@ -109,7 +109,7 @@ DexOrder is an AI-powered trading platform that combines real-time market data p
### 2. User Containers ### 2. User Containers
**Location:** `client-py/` **Location:** `sandbox/`
**Language:** Python **Language:** Python
**Purpose:** Per-user isolated workspace and data storage **Purpose:** Per-user isolated workspace and data storage
@@ -415,12 +415,12 @@ User authenticates → Gateway checks if deployment exists
### RBAC ### RBAC
**Gateway ServiceAccount:** **Gateway ServiceAccount:**
- Create deployments/services/PVCs in `dexorder-agents` namespace - Create deployments/services/PVCs in `dexorder-sandboxes` namespace
- Read pod status and logs - Read pod status and logs
- Cannot delete, exec, or access secrets - Cannot delete, exec, or access secrets
**Lifecycle Sidecar ServiceAccount:** **Lifecycle Sidecar ServiceAccount:**
- Delete deployments in `dexorder-agents` namespace - Delete deployments in `dexorder-sandboxes` namespace
- Delete PVCs (conditional on user type) - Delete PVCs (conditional on user type)
- Cannot access other resources - Cannot access other resources
@@ -428,7 +428,7 @@ User authenticates → Gateway checks if deployment exists
### Admission Control ### Admission Control
All pods in `dexorder-agents` namespace must: All pods in `dexorder-sandboxes` namespace must:
- Use approved images only (allowlist) - Use approved images only (allowlist)
- Run as non-root - Run as non-root
- Drop all capabilities - Drop all capabilities
@@ -544,13 +544,13 @@ kubectl apply -k deploy/k8s/prod
# Push images to registry # Push images to registry
docker push ghcr.io/dexorder/gateway:latest docker push ghcr.io/dexorder/gateway:latest
docker push ghcr.io/dexorder/agent:latest docker push ghcr.io/dexorder/sandbox:latest
docker push ghcr.io/dexorder/lifecycle-sidecar:latest docker push ghcr.io/dexorder/lifecycle-sidecar:latest
``` ```
**Namespaces:** **Namespaces:**
- `dexorder-system` - Platform services (gateway, infrastructure) - `dexorder-system` - Platform services (gateway, infrastructure)
- `dexorder-agents` - User containers (isolated) - `dexorder-sandboxes` - User containers (isolated)
--- ---

View File

@@ -35,7 +35,7 @@ User agent containers self-manage their lifecycle to optimize resource usage. Co
### 1. Lifecycle Manager (Python) ### 1. Lifecycle Manager (Python)
**Location**: `client-py/dexorder/lifecycle_manager.py` **Location**: `sandbox/dexorder/lifecycle_manager.py`
Runs inside the agent container and tracks: Runs inside the agent container and tracks:
- **Activity**: MCP tool/resource/prompt calls reset the idle timer - **Activity**: MCP tool/resource/prompt calls reset the idle timer
@@ -85,7 +85,7 @@ Runs alongside the agent container with shared PID namespace. Monitors the main
- `USER_TYPE`: License tier (`anonymous`, `free`, `paid`, `enterprise`) - `USER_TYPE`: License tier (`anonymous`, `free`, `paid`, `enterprise`)
- `MAIN_CONTAINER_PID`: PID of main container (default: 1) - `MAIN_CONTAINER_PID`: PID of main container (default: 1)
**RBAC**: Has permission to delete deployments and PVCs **only in dexorder-agents namespace**. Cannot delete other deployments due to: **RBAC**: Has permission to delete deployments and PVCs **only in dexorder-sandboxes namespace**. Cannot delete other deployments due to:
1. Only knows its own deployment name (from env) 1. Only knows its own deployment name (from env)
2. RBAC scoped to namespace 2. RBAC scoped to namespace
3. No cross-pod communication 3. No cross-pod communication
@@ -164,12 +164,12 @@ Configured via `USER_TYPE` env var in deployment.
**Lifecycle Sidecar**: **Lifecycle Sidecar**:
- Can delete its own deployment only - Can delete its own deployment only
- Cannot delete other deployments - Cannot delete other deployments
- Scoped to dexorder-agents namespace - Scoped to dexorder-sandboxes namespace
- No exec, no secrets access - No exec, no secrets access
### Admission Control ### Admission Control
All deployments in `dexorder-agents` namespace are subject to: All deployments in `dexorder-sandboxes` namespace are subject to:
- Image allowlist (only approved images) - Image allowlist (only approved images)
- Security context enforcement (non-root, drop caps, read-only rootfs) - Security context enforcement (non-root, drop caps, read-only rootfs)
- Resource limits required - Resource limits required
@@ -198,7 +198,7 @@ kubectl apply -k deploy/k8s/dev # or prod
``` ```
This creates: This creates:
- Namespaces (`dexorder-system`, `dexorder-agents`) - Namespaces (`dexorder-system`, `dexorder-sandboxes`)
- RBAC (gateway, lifecycle sidecar) - RBAC (gateway, lifecycle sidecar)
- Admission policies - Admission policies
- Network policies - Network policies
@@ -257,7 +257,7 @@ cd lifecycle-sidecar
go build -o lifecycle-sidecar main.go go build -o lifecycle-sidecar main.go
# Run (requires k8s config) # Run (requires k8s config)
export NAMESPACE=dexorder-agents export NAMESPACE=dexorder-sandboxes
export DEPLOYMENT_NAME=agent-test export DEPLOYMENT_NAME=agent-test
export USER_TYPE=free export USER_TYPE=free
./lifecycle-sidecar ./lifecycle-sidecar
@@ -277,7 +277,7 @@ export USER_TYPE=free
Check logs: Check logs:
```bash ```bash
kubectl logs -n dexorder-agents agent-user-abc123 -c agent kubectl logs -n dexorder-sandboxes sandbox-user-abc123 -c agent
``` ```
Verify: Verify:
@@ -289,19 +289,19 @@ Verify:
Check sidecar logs: Check sidecar logs:
```bash ```bash
kubectl logs -n dexorder-agents agent-user-abc123 -c lifecycle-sidecar kubectl logs -n dexorder-sandboxes sandbox-user-abc123 -c lifecycle-sidecar
``` ```
Verify: Verify:
- Exit code file exists: `/var/run/agent/exit_code` contains `42` - Exit code file exists: `/var/run/agent/exit_code` contains `42`
- RBAC permissions: `kubectl auth can-i delete deployments --as=system:serviceaccount:dexorder-agents:agent-lifecycle -n dexorder-agents` - RBAC permissions: `kubectl auth can-i delete deployments --as=system:serviceaccount:dexorder-sandboxes:sandbox-lifecycle -n dexorder-sandboxes`
- Deployment name matches: Check `DEPLOYMENT_NAME` env var - Deployment name matches: Check `DEPLOYMENT_NAME` env var
### Gateway can't create deployments ### Gateway can't create deployments
Check gateway logs and verify: Check gateway logs and verify:
- ServiceAccount exists: `kubectl get sa gateway -n dexorder-system` - ServiceAccount exists: `kubectl get sa gateway -n dexorder-system`
- RoleBinding exists: `kubectl get rolebinding gateway-agent-creator -n dexorder-agents` - RoleBinding exists: `kubectl get rolebinding gateway-sandbox-creator -n dexorder-sandboxes`
- Admission policy allows image: Check image name matches allowlist in `admission-policy.yaml` - Admission policy allows image: Check image name matches allowlist in `admission-policy.yaml`
## Future Enhancements ## Future Enhancements

View File

@@ -60,10 +60,10 @@ All resources follow a consistent naming pattern based on `userId`:
```typescript ```typescript
userId: "user-abc123" userId: "user-abc123"
deploymentName: "agent-user-abc123" deploymentName: "sandbox-user-abc123"
serviceName: "agent-user-abc123" serviceName: "sandbox-user-abc123"
pvcName: "agent-user-abc123-data" pvcName: "sandbox-user-abc123-data"
mcpEndpoint: "http://agent-user-abc123.dexorder-agents.svc.cluster.local:3000" mcpEndpoint: "http://sandbox-user-abc123.dexorder-sandboxes.svc.cluster.local:3000"
``` ```
User IDs are sanitized to be Kubernetes-compliant (lowercase alphanumeric + hyphens). User IDs are sanitized to be Kubernetes-compliant (lowercase alphanumeric + hyphens).
@@ -82,7 +82,7 @@ Templates use simple string replacement:
- `{{deploymentName}}` - Computed deployment name - `{{deploymentName}}` - Computed deployment name
- `{{serviceName}}` - Computed service name - `{{serviceName}}` - Computed service name
- `{{pvcName}}` - Computed PVC name - `{{pvcName}}` - Computed PVC name
- `{{agentImage}}` - Agent container image (from env) - `{{sandboxImage}}` - Agent container image (from env)
- `{{sidecarImage}}` - Lifecycle sidecar image (from env) - `{{sidecarImage}}` - Lifecycle sidecar image (from env)
- `{{storageClass}}` - Kubernetes storage class (from env) - `{{storageClass}}` - Kubernetes storage class (from env)
@@ -145,16 +145,16 @@ Environment variables:
```bash ```bash
# Kubernetes # Kubernetes
KUBERNETES_NAMESPACE=dexorder-agents KUBERNETES_NAMESPACE=dexorder-sandboxes
KUBERNETES_IN_CLUSTER=true # false for local dev KUBERNETES_IN_CLUSTER=true # false for local dev
KUBERNETES_CONTEXT=minikube # for local dev only KUBERNETES_CONTEXT=minikube # for local dev only
# Container images # Container images
AGENT_IMAGE=ghcr.io/dexorder/agent:latest SANDBOX_IMAGE=ghcr.io/dexorder/sandbox:latest
SIDECAR_IMAGE=ghcr.io/dexorder/lifecycle-sidecar:latest SIDECAR_IMAGE=ghcr.io/dexorder/lifecycle-sidecar:latest
# Storage # Storage
AGENT_STORAGE_CLASS=standard SANDBOX_STORAGE_CLASS=standard
``` ```
## Security ## Security
@@ -162,9 +162,9 @@ AGENT_STORAGE_CLASS=standard
The gateway uses a restricted ServiceAccount with RBAC: The gateway uses a restricted ServiceAccount with RBAC:
**Can do:** **Can do:**
- ✅ Create deployments in `dexorder-agents` namespace - ✅ Create deployments in `dexorder-sandboxes` namespace
- ✅ Create services in `dexorder-agents` namespace - ✅ Create services in `dexorder-sandboxes` namespace
- ✅ Create PVCs in `dexorder-agents` namespace - ✅ Create PVCs in `dexorder-sandboxes` namespace
- ✅ Read pod status and logs (debugging) - ✅ Read pod status and logs (debugging)
- ✅ Update deployments (future: resource scaling) - ✅ Update deployments (future: resource scaling)
@@ -226,7 +226,7 @@ kubectl apply -k deploy/k8s/dev
# .env # .env
KUBERNETES_IN_CLUSTER=false KUBERNETES_IN_CLUSTER=false
KUBERNETES_CONTEXT=minikube KUBERNETES_CONTEXT=minikube
KUBERNETES_NAMESPACE=dexorder-agents KUBERNETES_NAMESPACE=dexorder-sandboxes
``` ```
4. Run gateway: 4. Run gateway:
@@ -242,9 +242,9 @@ wscat -c "ws://localhost:3000/ws/chat" -H "Authorization: Bearer your-jwt"
The gateway will create deployments in minikube. View with: The gateway will create deployments in minikube. View with:
```bash ```bash
kubectl get deployments -n dexorder-agents kubectl get deployments -n dexorder-sandboxes
kubectl get pods -n dexorder-agents kubectl get pods -n dexorder-sandboxes
kubectl logs -n dexorder-agents agent-user-abc123 -c agent kubectl logs -n dexorder-sandboxes sandbox-user-abc123 -c agent
``` ```
## Production Deployment ## Production Deployment
@@ -262,7 +262,7 @@ kubectl apply -k deploy/k8s/prod
``` ```
3. Gateway runs in `dexorder-system` namespace 3. Gateway runs in `dexorder-system` namespace
4. Creates agent containers in `dexorder-agents` namespace 4. Creates agent containers in `dexorder-sandboxes` namespace
5. Admission policies enforce image allowlist and security constraints 5. Admission policies enforce image allowlist and security constraints
## Monitoring ## Monitoring

View File

@@ -55,7 +55,7 @@ Two ZMQ patterns handle different delivery requirements:
### File Structure ### File Structure
``` ```
client-py/dexorder/ sandbox/dexorder/
├── events/ ├── events/
│ ├── __init__.py │ ├── __init__.py
│ ├── publisher.py # EventPublisher class │ ├── publisher.py # EventPublisher class
@@ -66,7 +66,7 @@ client-py/dexorder/
### Event Publisher Class ### Event Publisher Class
```python ```python
# client-py/dexorder/events/publisher.py # sandbox/dexorder/events/publisher.py
import asyncio import asyncio
import time import time
@@ -295,7 +295,7 @@ class EventPublisher:
### Event Types ### Event Types
```python ```python
# client-py/dexorder/events/types.py # sandbox/dexorder/events/types.py
from dataclasses import dataclass, field from dataclasses import dataclass, field
from enum import IntEnum from enum import IntEnum
@@ -465,7 +465,7 @@ class EventAck:
### Pending Event Persistence ### Pending Event Persistence
```python ```python
# client-py/dexorder/events/pending_store.py # sandbox/dexorder/events/pending_store.py
import json import json
import aiofiles import aiofiles
@@ -1169,7 +1169,7 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy kind: NetworkPolicy
metadata: metadata:
name: agent-to-gateway-events name: agent-to-gateway-events
namespace: dexorder-agents namespace: dexorder-sandboxes
spec: spec:
podSelector: podSelector:
matchLabels: matchLabels:

View File

@@ -28,12 +28,12 @@ DEFAULT_MODEL=claude-sonnet-4-6
TELEGRAM_BOT_TOKEN= TELEGRAM_BOT_TOKEN=
# Kubernetes configuration # Kubernetes configuration
KUBERNETES_NAMESPACE=dexorder-agents KUBERNETES_NAMESPACE=dexorder-sandboxes
KUBERNETES_IN_CLUSTER=false KUBERNETES_IN_CLUSTER=false
KUBERNETES_CONTEXT=minikube KUBERNETES_CONTEXT=minikube
AGENT_IMAGE=ghcr.io/dexorder/agent:latest SANDBOX_IMAGE=ghcr.io/dexorder/sandbox:latest
SIDECAR_IMAGE=ghcr.io/dexorder/lifecycle-sidecar:latest SIDECAR_IMAGE=ghcr.io/dexorder/lifecycle-sidecar:latest
AGENT_STORAGE_CLASS=standard SANDBOX_STORAGE_CLASS=standard
# Redis (for hot storage and session management) # Redis (for hot storage and session management)
REDIS_URL=redis://localhost:6379 REDIS_URL=redis://localhost:6379

2
gateway/.gitignore vendored
View File

@@ -4,3 +4,5 @@ dist
.env.local .env.local
*.log *.log
.DS_Store .DS_Store
# Auto-generated Python API files (copied at build time)
src/harness/subagents/research/api-source/

View File

@@ -18,6 +18,9 @@ COPY src ./src
# Build (includes protobuf generation) # Build (includes protobuf generation)
RUN npm run build RUN npm run build
# Note: Python API files for research subagent are copied by bin/build script
# to src/harness/subagents/research/api-source/ before docker build
# Production image # Production image
FROM node:22-slim FROM node:22-slim
@@ -62,6 +65,17 @@ COPY protobuf ./protobuf
# Copy k8s templates (not included in TypeScript build) # Copy k8s templates (not included in TypeScript build)
COPY src/k8s/templates ./dist/k8s/templates COPY src/k8s/templates ./dist/k8s/templates
# Copy harness prompts (not included in TypeScript build)
COPY src/harness/prompts ./dist/harness/prompts
# Copy all subagent directories (config.yaml, system-prompt.md, memory/, etc.)
# TypeScript build already compiled .ts files to .js in dist, so we copy the entire
# source directory to get all non-TypeScript assets, then remove .ts duplicates
COPY src/harness/subagents ./dist/harness/subagents
# Remove source .ts files (we only need the compiled .js from builder stage)
# Keep .yaml, .md files and memory/ directories
RUN find ./dist/harness/subagents -name "*.ts" -type f -delete
# Copy entrypoint script # Copy entrypoint script
COPY entrypoint.sh ./ COPY entrypoint.sh ./
RUN chmod +x entrypoint.sh RUN chmod +x entrypoint.sh

View File

@@ -47,10 +47,10 @@ license_models:
# Kubernetes configuration # Kubernetes configuration
kubernetes: kubernetes:
namespace: dexorder-agents namespace: dexorder-sandboxes
in_cluster: false in_cluster: false
context: minikube context: minikube
agent_image: ghcr.io/dexorder/agent:latest sandbox_image: ghcr.io/dexorder/sandbox:latest
sidecar_image: ghcr.io/dexorder/lifecycle-sidecar:latest sidecar_image: ghcr.io/dexorder/lifecycle-sidecar:latest
storage_class: standard storage_class: standard

View File

@@ -23,9 +23,11 @@
"@qdrant/js-client-rest": "^1.17.0", "@qdrant/js-client-rest": "^1.17.0",
"argon2": "^0.41.1", "argon2": "^0.41.1",
"better-auth": "^1.5.3", "better-auth": "^1.5.3",
"chrono-node": "^2.7.10",
"duckdb": "^1.1.3", "duckdb": "^1.1.3",
"fast-json-patch": "^3.1.1", "fast-json-patch": "^3.1.1",
"fastify": "^5.2.0", "fastify": "^5.2.0",
"gray-matter": "^4.0.3",
"ioredis": "^5.4.2", "ioredis": "^5.4.2",
"js-yaml": "^4.1.0", "js-yaml": "^4.1.0",
"kysely": "^0.27.3", "kysely": "^0.27.3",

View File

@@ -62,34 +62,18 @@ CREATE TABLE IF NOT EXISTS verification (
CREATE INDEX idx_verification_identifier ON verification(identifier); CREATE INDEX idx_verification_identifier ON verification(identifier);
-- User license and authorization schema (custom tables) -- User license and authorization schema (custom tables)
-- Per-user rows are copies of the tier template that can be customised independently.
-- See LICENSE_TIER_TEMPLATES in gateway/src/types/user.ts for tier defaults.
CREATE TABLE IF NOT EXISTS user_licenses ( CREATE TABLE IF NOT EXISTS user_licenses (
user_id TEXT PRIMARY KEY REFERENCES "user"(id) ON DELETE CASCADE, user_id TEXT PRIMARY KEY REFERENCES "user"(id) ON DELETE CASCADE,
email TEXT, email TEXT,
license_type TEXT NOT NULL CHECK (license_type IN ('free', 'pro', 'enterprise')), license JSONB NOT NULL,
features JSONB NOT NULL DEFAULT '{
"maxIndicators": 5,
"maxStrategies": 3,
"maxBacktestDays": 30,
"realtimeData": false,
"customExecutors": false,
"apiAccess": false
}',
resource_limits JSONB NOT NULL DEFAULT '{
"maxConcurrentSessions": 1,
"maxMessagesPerDay": 100,
"maxTokensPerMessage": 4096,
"rateLimitPerMinute": 10
}',
mcp_server_url TEXT NOT NULL, mcp_server_url TEXT NOT NULL,
preferred_model JSONB DEFAULT NULL,
expires_at TIMESTAMP WITH TIME ZONE, expires_at TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
); );
COMMENT ON COLUMN user_licenses.preferred_model IS 'Optional model preference: {"provider": "anthropic", "model": "claude-sonnet-4-6", "temperature": 0.7}';
CREATE INDEX idx_user_licenses_expires_at ON user_licenses(expires_at) CREATE INDEX idx_user_licenses_expires_at ON user_licenses(expires_at)
WHERE expires_at IS NOT NULL; WHERE expires_at IS NOT NULL;

View File

@@ -1,6 +1,7 @@
import type { BetterAuthInstance } from './better-auth-config.js'; import type { BetterAuthInstance } from './better-auth-config.js';
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
import type { Pool } from 'pg'; import type { Pool } from 'pg';
import { LICENSE_TIER_TEMPLATES } from '../types/user.js';
export interface AuthServiceConfig { export interface AuthServiceConfig {
auth: BetterAuthInstance; auth: BetterAuthInstance;
@@ -202,11 +203,11 @@ export class AuthService {
); );
if (licenseCheck.rows.length === 0) { if (licenseCheck.rows.length === 0) {
// Create default free license // Create default free license — copy the full tier template so every field is present
await client.query( await client.query(
`INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url) `INSERT INTO user_licenses (user_id, email, license, mcp_server_url)
VALUES ($1, $2, 'free', 'pending')`, VALUES ($1, $2, $3::jsonb, 'pending')`,
[userId, email] [userId, email, JSON.stringify(LICENSE_TIER_TEMPLATES.free)]
); );
this.config.logger.info({ userId }, 'Created default free license for new user'); this.config.logger.info({ userId }, 'Created default free license for new user');

View File

@@ -56,7 +56,7 @@ export class Authenticator {
this.config.logger.info({ userId }, 'Ensuring user container is running'); this.config.logger.info({ userId }, 'Ensuring user container is running');
const { mcpEndpoint, wasCreated, isSpinningUp } = await this.config.containerManager.ensureContainerRunning( const { mcpEndpoint, wasCreated, isSpinningUp } = await this.config.containerManager.ensureContainerRunning(
userId, userId,
license, license.license,
false // Don't wait for ready false // Don't wait for ready
); );
@@ -72,9 +72,6 @@ export class Authenticator {
); );
} }
// Update license with actual MCP endpoint
license.mcpServerUrl = mcpEndpoint;
const sessionId = `ws_${userId}_${Date.now()}`; const sessionId = `ws_${userId}_${Date.now()}`;
return { return {
@@ -83,7 +80,8 @@ export class Authenticator {
channelType: ChannelType.WEBSOCKET, channelType: ChannelType.WEBSOCKET,
channelUserId: userId, // For WebSocket, same as userId channelUserId: userId, // For WebSocket, same as userId
sessionId, sessionId,
license, license: license.license,
mcpServerUrl: mcpEndpoint,
authenticatedAt: new Date(), authenticatedAt: new Date(),
}, },
isSpinningUp, isSpinningUp,
@@ -123,7 +121,7 @@ export class Authenticator {
this.config.logger.info({ userId }, 'Ensuring user container is running'); this.config.logger.info({ userId }, 'Ensuring user container is running');
const { mcpEndpoint, wasCreated } = await this.config.containerManager.ensureContainerRunning( const { mcpEndpoint, wasCreated } = await this.config.containerManager.ensureContainerRunning(
userId, userId,
license license.license
); );
this.config.logger.info( this.config.logger.info(
@@ -131,9 +129,6 @@ export class Authenticator {
'Container is ready' 'Container is ready'
); );
// Update license with actual MCP endpoint
license.mcpServerUrl = mcpEndpoint;
const sessionId = `tg_${telegramUserId}_${Date.now()}`; const sessionId = `tg_${telegramUserId}_${Date.now()}`;
return { return {
@@ -141,7 +136,8 @@ export class Authenticator {
channelType: ChannelType.TELEGRAM, channelType: ChannelType.TELEGRAM,
channelUserId: telegramUserId, channelUserId: telegramUserId,
sessionId, sessionId,
license, license: license.license,
mcpServerUrl: mcpEndpoint,
authenticatedAt: new Date(), authenticatedAt: new Date(),
}; };
} catch (error) { } catch (error) {

View File

@@ -1,15 +1,15 @@
import type { FastifyInstance, FastifyRequest, FastifyReply } from 'fastify'; import type { FastifyInstance, FastifyRequest, FastifyReply } from 'fastify';
import type { Authenticator } from '../auth/authenticator.js'; import type { Authenticator } from '../auth/authenticator.js';
import { AgentHarness } from '../harness/agent-harness.js'; import type { AgentHarness, HarnessFactory } from '../harness/agent-harness.js';
import type { InboundMessage } from '../types/messages.js'; import type { InboundMessage } from '../types/messages.js';
import { randomUUID } from 'crypto'; import { randomUUID } from 'crypto';
import type { ProviderConfig } from '../llm/provider.js'; import type { ChannelAdapter, ChannelCapabilities } from '../workspace/index.js';
export interface TelegramHandlerConfig { export interface TelegramHandlerConfig {
authenticator: Authenticator; authenticator: Authenticator;
providerConfig: ProviderConfig;
telegramBotToken: string; telegramBotToken: string;
createHarness: HarnessFactory;
} }
interface TelegramUpdate { interface TelegramUpdate {
@@ -33,12 +33,18 @@ interface TelegramUpdate {
}; };
} }
interface TelegramSession {
harness: AgentHarness;
lastActivity: number;
}
/** /**
* Telegram webhook handler * Telegram webhook handler
*/ */
export class TelegramHandler { export class TelegramHandler {
private config: TelegramHandlerConfig; private config: TelegramHandlerConfig;
private sessions = new Map<string, AgentHarness>(); private sessions = new Map<string, TelegramSession>();
private chatIds = new Map<string, number>(); // sessionId -> chatId
constructor(config: TelegramHandlerConfig) { constructor(config: TelegramHandlerConfig) {
this.config = config; this.config = config;
@@ -90,18 +96,59 @@ export class TelegramHandler {
return; return;
} }
// Store chatId for this session
this.chatIds.set(authContext.sessionId, chatId);
// Create Telegram channel adapter
const telegramAdapter: ChannelAdapter = {
sendSnapshot: () => {
// Telegram doesn't support sync protocol
},
sendPatch: () => {
// Telegram doesn't support sync protocol
},
sendText: (msg) => {
this.sendTelegramMessage(chatId, msg.text).catch((err) => {
logger.error({ error: err }, 'Failed to send Telegram text');
});
},
sendChunk: () => {
// Telegram doesn't support streaming; full response sent after handleMessage resolves
},
sendImage: (msg) => {
this.sendTelegramPhoto(chatId, msg.data, msg.mimeType, msg.caption).catch((err) => {
logger.error({ error: err }, 'Failed to send Telegram image');
});
},
getCapabilities: (): ChannelCapabilities => ({
supportsSync: false,
supportsImages: true,
supportsMarkdown: true,
supportsStreaming: false,
supportsTradingViewEmbed: false,
}),
};
// Get or create harness // Get or create harness
let harness = this.sessions.get(authContext.sessionId); let session = this.sessions.get(authContext.sessionId);
if (!harness) { if (!session) {
harness = new AgentHarness({ const harness = this.config.createHarness({
userId: authContext.userId, userId: authContext.userId,
sessionId: authContext.sessionId, sessionId: authContext.sessionId,
license: authContext.license, license: authContext.license,
providerConfig: this.config.providerConfig, mcpServerUrl: authContext.mcpServerUrl,
logger, logger,
channelAdapter: telegramAdapter,
channelType: authContext.channelType,
channelUserId: authContext.channelUserId,
}); });
await harness.initialize(); await harness.initialize();
this.sessions.set(authContext.sessionId, harness); session = { harness, lastActivity: Date.now() };
this.sessions.set(authContext.sessionId, session);
} else {
// Update channel adapter and activity timestamp for existing session
session.harness.setChannelAdapter(telegramAdapter);
session.lastActivity = Date.now();
} }
// Process message // Process message
@@ -114,7 +161,7 @@ export class TelegramHandler {
timestamp: new Date(), timestamp: new Date(),
}; };
const response = await harness.handleMessage(inboundMessage); const response = await session.harness.handleMessage(inboundMessage);
// Send response back to Telegram // Send response back to Telegram
await this.sendTelegramMessage(chatId, response.content); await this.sendTelegramMessage(chatId, response.content);
@@ -127,7 +174,7 @@ export class TelegramHandler {
} }
/** /**
* Send message to Telegram chat * Send text message to Telegram chat
*/ */
private async sendTelegramMessage(chatId: number, text: string): Promise<void> { private async sendTelegramMessage(chatId: number, text: string): Promise<void> {
const url = `https://api.telegram.org/bot${this.config.telegramBotToken}/sendMessage`; const url = `https://api.telegram.org/bot${this.config.telegramBotToken}/sendMessage`;
@@ -155,10 +202,80 @@ export class TelegramHandler {
} }
/** /**
* Cleanup old sessions (call periodically) * Send photo to Telegram chat
* Converts base64 image data to a buffer and sends via sendPhoto API
*/ */
async cleanupSessions(_maxAgeMs = 30 * 60 * 1000): Promise<void> { private async sendTelegramPhoto(
// TODO: Track session last activity and cleanup chatId: number,
// For now, sessions persist until server restart base64Data: string,
mimeType: string,
caption?: string
): Promise<void> {
const url = `https://api.telegram.org/bot${this.config.telegramBotToken}/sendPhoto`;
try {
// Convert base64 to buffer
const imageBuffer = Buffer.from(base64Data, 'base64');
// Determine filename from mimeType
const extension = mimeType.split('/')[1] || 'png';
const filename = `image.${extension}`;
// Create FormData for multipart upload
const formData = new FormData();
formData.append('chat_id', chatId.toString());
formData.append('photo', new Blob([imageBuffer], { type: mimeType }), filename);
if (caption) {
formData.append('caption', caption);
}
const response = await fetch(url, {
method: 'POST',
body: formData,
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Telegram API error: ${response.statusText} - ${errorText}`);
}
} catch (error) {
console.error('Failed to send Telegram photo:', error);
throw error;
}
}
/**
* Clean up sessions that have been idle longer than maxAgeMs.
* Triggers Iceberg flush for each expired session via harness.cleanup().
*/
async cleanupSessions(maxAgeMs = 30 * 60 * 1000): Promise<void> {
const now = Date.now();
const expired: string[] = [];
for (const [sessionId, session] of this.sessions) {
if (now - session.lastActivity > maxAgeMs) {
expired.push(sessionId);
}
}
for (const sessionId of expired) {
const session = this.sessions.get(sessionId);
if (session) {
await session.harness.cleanup().catch(() => {});
this.sessions.delete(sessionId);
this.chatIds.delete(sessionId);
}
}
}
/**
* Flush and clean up all active sessions.
* Called during graceful shutdown.
*/
async endAllSessions(): Promise<void> {
const cleanups = Array.from(this.sessions.values()).map(s => s.harness.cleanup());
await Promise.allSettled(cleanups);
this.sessions.clear();
this.chatIds.clear();
} }
} }

View File

@@ -1,11 +1,9 @@
import type { FastifyInstance, FastifyRequest } from 'fastify'; import type { FastifyInstance, FastifyRequest } from 'fastify';
import type { WebSocket } from '@fastify/websocket'; import type { WebSocket } from '@fastify/websocket';
import type { Authenticator } from '../auth/authenticator.js'; import type { Authenticator } from '../auth/authenticator.js';
import { AgentHarness } from '../harness/agent-harness.js'; import type { AgentHarness, HarnessFactory } from '../harness/agent-harness.js';
import type { InboundMessage } from '../types/messages.js'; import type { InboundMessage } from '../types/messages.js';
import { randomUUID } from 'crypto'; import { randomUUID } from 'crypto';
import type { ProviderConfig } from '../llm/provider.js';
import type { SessionRegistry, EventSubscriber, Session } from '../events/index.js'; import type { SessionRegistry, EventSubscriber, Session } from '../events/index.js';
import type { OHLCService } from '../services/ohlc-service.js'; import type { OHLCService } from '../services/ohlc-service.js';
import type { SymbolIndexService } from '../services/symbol-index-service.js'; import type { SymbolIndexService } from '../services/symbol-index-service.js';
@@ -29,12 +27,18 @@ function jsonStringifySafe(obj: any): string {
); );
} }
export type SessionStatus = 'authenticating' | 'spinning_up' | 'initializing' | 'ready' | 'error'
function sendStatus(socket: WebSocket, status: SessionStatus, message: string): void {
socket.send(JSON.stringify({ type: 'status', status, message }))
}
export interface WebSocketHandlerConfig { export interface WebSocketHandlerConfig {
authenticator: Authenticator; authenticator: Authenticator;
containerManager: ContainerManager; containerManager: ContainerManager;
providerConfig: ProviderConfig;
sessionRegistry: SessionRegistry; sessionRegistry: SessionRegistry;
eventSubscriber: EventSubscriber; eventSubscriber: EventSubscriber;
createHarness: HarnessFactory;
ohlcService?: OHLCService; // Optional for historical data support ohlcService?: OHLCService; // Optional for historical data support
symbolIndexService?: SymbolIndexService; // Optional for symbol search symbolIndexService?: SymbolIndexService; // Optional for symbol search
} }
@@ -78,13 +82,7 @@ export class WebSocketHandler {
const logger = app.log; const logger = app.log;
// Send initial connecting message // Send initial connecting message
socket.send( sendStatus(socket, 'authenticating', 'Authenticating...');
JSON.stringify({
type: 'status',
status: 'authenticating',
message: 'Authenticating...',
})
);
// Authenticate (returns immediately if container is spinning up) // Authenticate (returns immediately if container is spinning up)
const { authContext, isSpinningUp } = await this.config.authenticator.authenticateWebSocket(request); const { authContext, isSpinningUp } = await this.config.authenticator.authenticateWebSocket(request);
@@ -105,33 +103,23 @@ export class WebSocketHandler {
'WebSocket connection authenticated' 'WebSocket connection authenticated'
); );
// If container is spinning up, send status and start background polling // If container is spinning up, wait for it to be ready before continuing
if (isSpinningUp) { if (isSpinningUp) {
socket.send( sendStatus(socket, 'spinning_up', 'Your workspace is starting up, please wait...');
JSON.stringify({
type: 'status',
status: 'spinning_up',
message: 'Your workspace is starting up, please wait...',
})
);
// Start background polling for container readiness const ready = await this.config.containerManager.waitForContainerReady(authContext.userId, 120000);
this.pollContainerReadiness(socket, authContext, app).catch((error) => { if (!ready) {
logger.error({ error, userId: authContext.userId }, 'Error polling container readiness'); logger.warn({ userId: authContext.userId }, 'Container failed to become ready within timeout');
}); socket.send(JSON.stringify({ type: 'error', message: 'Workspace failed to start. Please try again later.' }));
socket.close(1011, 'Container startup timeout');
return;
}
// Don't return - continue with session setup so we can receive messages once ready logger.info({ userId: authContext.userId }, 'Container is ready, proceeding with session setup');
} else {
// Send workspace starting message
socket.send(
JSON.stringify({
type: 'status',
status: 'initializing',
message: 'Starting your workspace...',
})
);
} }
sendStatus(socket, 'initializing', 'Starting your workspace...');
// Create workspace manager for this session // Create workspace manager for this session
const workspace = new WorkspaceManager({ const workspace = new WorkspaceManager({
userId: authContext.userId, userId: authContext.userId,
@@ -149,6 +137,34 @@ export class WebSocketHandler {
sendPatch: (msg: PatchMessage) => { sendPatch: (msg: PatchMessage) => {
socket.send(JSON.stringify(msg)); socket.send(JSON.stringify(msg));
}, },
sendText: (msg) => {
socket.send(JSON.stringify({
type: 'text',
text: msg.text,
}));
},
sendChunk: (content) => {
socket.send(JSON.stringify({
type: 'agent_chunk',
content,
done: false,
}));
},
sendImage: (msg) => {
socket.send(JSON.stringify({
type: 'image',
data: msg.data,
mimeType: msg.mimeType,
caption: msg.caption,
}));
},
sendToolCall: (toolName, label) => {
socket.send(JSON.stringify({
type: 'agent_tool_call',
toolName,
label: label ?? toolName,
}));
},
getCapabilities: (): ChannelCapabilities => ({ getCapabilities: (): ChannelCapabilities => ({
supportsSync: true, supportsSync: true,
supportsImages: true, supportsImages: true,
@@ -167,14 +183,17 @@ export class WebSocketHandler {
workspace.setAdapter(wsAdapter); workspace.setAdapter(wsAdapter);
this.workspaces.set(authContext.sessionId, workspace); this.workspaces.set(authContext.sessionId, workspace);
// Create agent harness with workspace manager // Create agent harness via factory (storage deps injected by factory)
harness = new AgentHarness({ harness = this.config.createHarness({
userId: authContext.userId, userId: authContext.userId,
sessionId: authContext.sessionId, sessionId: authContext.sessionId,
license: authContext.license, license: authContext.license,
providerConfig: this.config.providerConfig, mcpServerUrl: authContext.mcpServerUrl,
logger, logger,
workspaceManager: workspace, workspaceManager: workspace,
channelAdapter: wsAdapter,
channelType: authContext.channelType,
channelUserId: authContext.channelUserId,
}); });
await harness.initialize(); await harness.initialize();
@@ -182,7 +201,7 @@ export class WebSocketHandler {
// Register session for event system // Register session for event system
// Container endpoint is derived from the MCP server URL (same container, different port) // Container endpoint is derived from the MCP server URL (same container, different port)
const containerEventEndpoint = this.getContainerEventEndpoint(authContext.license.mcpServerUrl); const containerEventEndpoint = this.getContainerEventEndpoint(authContext.mcpServerUrl);
const session: Session = { const session: Session = {
userId: authContext.userId, userId: authContext.userId,
@@ -203,18 +222,16 @@ export class WebSocketHandler {
'Session registered for events' 'Session registered for events'
); );
// Send connected message (only if not spinning up - otherwise sent by pollContainerReadiness) sendStatus(socket, 'ready', 'Your workspace is ready!');
if (!isSpinningUp) { socket.send(
socket.send( JSON.stringify({
JSON.stringify({ type: 'connected',
type: 'connected', sessionId: authContext.sessionId,
sessionId: authContext.sessionId, userId: authContext.userId,
userId: authContext.userId, licenseType: authContext.license.licenseType,
licenseType: authContext.license.licenseType, message: 'Connected to Dexorder AI',
message: 'Connected to Dexorder AI', })
}) );
);
}
// Handle messages // Handle messages
socket.on('message', async (data: Buffer) => { socket.on('message', async (data: Buffer) => {
@@ -241,19 +258,16 @@ export class WebSocketHandler {
return; return;
} }
// Stream response chunks to client // Chunks are streamed via channelAdapter.sendChunk() during handleMessage
try { try {
for await (const chunk of harness.streamMessage(inboundMessage)) { // Acknowledge receipt immediately so the client can show the seen indicator
socket.send( socket.send(JSON.stringify({ type: 'agent_chunk', content: '', done: false }));
JSON.stringify({
type: 'agent_chunk',
content: chunk,
done: false,
})
);
}
// Send final chunk with done flag logger.info('Calling harness.handleMessage');
await harness.handleMessage(inboundMessage);
// Send done marker after all chunks have been streamed
logger.debug('Sending done marker to client');
socket.send( socket.send(
JSON.stringify({ JSON.stringify({
type: 'agent_chunk', type: 'agent_chunk',
@@ -331,73 +345,11 @@ export class WebSocketHandler {
} }
} }
/**
* Poll for container readiness in the background
* Sends notification to client when container is ready
*/
private async pollContainerReadiness(
socket: WebSocket,
authContext: any,
app: FastifyInstance
): Promise<void> {
const logger = app.log;
const userId = authContext.userId;
logger.info({ userId }, 'Starting background poll for container readiness');
try {
// Wait for container to become ready (2 minute timeout)
const ready = await this.config.containerManager.waitForContainerReady(userId, 120000);
if (ready) {
logger.info({ userId }, 'Container is now ready, notifying client');
// Send ready notification
socket.send(
JSON.stringify({
type: 'status',
status: 'ready',
message: 'Your workspace is ready!',
})
);
// Also send the 'connected' message
socket.send(
JSON.stringify({
type: 'connected',
sessionId: authContext.sessionId,
userId: authContext.userId,
licenseType: authContext.license.licenseType,
message: 'Connected to Dexorder AI',
})
);
} else {
logger.warn({ userId }, 'Container failed to become ready within timeout');
socket.send(
JSON.stringify({
type: 'error',
message: 'Workspace failed to start. Please try again later.',
})
);
}
} catch (error) {
logger.error({ error, userId }, 'Error waiting for container readiness');
socket.send(
JSON.stringify({
type: 'error',
message: 'Error starting workspace. Please try again later.',
})
);
}
}
/** /**
* Derive the container's XPUB event endpoint from the MCP server URL. * Derive the container's XPUB event endpoint from the MCP server URL.
* *
* MCP URL format: http://agent-user-abc123.dexorder-agents.svc.cluster.local:3000 * MCP URL format: http://sandbox-user-abc123.dexorder-sandboxes.svc.cluster.local:3000
* Event endpoint: tcp://agent-user-abc123.dexorder-agents.svc.cluster.local:5570 * Event endpoint: tcp://sandbox-user-abc123.dexorder-sandboxes.svc.cluster.local:5570
*/ */
private getContainerEventEndpoint(mcpServerUrl: string): string { private getContainerEventEndpoint(mcpServerUrl: string): string {
try { try {
@@ -578,4 +530,14 @@ export class WebSocketHandler {
); );
} }
} }
/**
* Flush and clean up all active sessions.
* Called during graceful shutdown to ensure conversations are persisted.
*/
async endAllSessions(): Promise<void> {
const cleanups = Array.from(this.harnesses.values()).map(h => h.cleanup());
await Promise.allSettled(cleanups);
this.harnesses.clear();
}
} }

View File

@@ -21,6 +21,7 @@ export interface DuckDBConfig {
s3Endpoint?: string; s3Endpoint?: string;
s3AccessKey?: string; s3AccessKey?: string;
s3SecretKey?: string; s3SecretKey?: string;
conversationsBucket?: string; // S3 bucket for conversation cold storage
} }
/** /**
@@ -40,6 +41,7 @@ export class DuckDBClient {
accessKey?: string; accessKey?: string;
secretKey?: string; secretKey?: string;
}; };
private conversationsBucket?: string;
private logger: FastifyBaseLogger; private logger: FastifyBaseLogger;
private initialized = false; private initialized = false;
@@ -49,6 +51,7 @@ export class DuckDBClient {
this.catalogUri = config.catalogUri; this.catalogUri = config.catalogUri;
this.ohlcCatalogUri = config.ohlcCatalogUri || config.catalogUri; this.ohlcCatalogUri = config.ohlcCatalogUri || config.catalogUri;
this.ohlcNamespace = config.ohlcNamespace || 'trading'; this.ohlcNamespace = config.ohlcNamespace || 'trading';
this.conversationsBucket = config.conversationsBucket;
this.s3Config = { this.s3Config = {
endpoint: config.s3Endpoint, endpoint: config.s3Endpoint,
accessKey: config.s3AccessKey, accessKey: config.s3AccessKey,
@@ -190,7 +193,23 @@ export class DuckDBClient {
); );
if (!tablePath) { if (!tablePath) {
this.logger.warn('Conversations table not found'); // Fallback: scan Parquet files written directly to conversations bucket
if (this.conversationsBucket) {
this.logger.debug({ userId, sessionId }, 'REST catalog miss, scanning Parquet cold storage');
const parquetPath = `s3://${this.conversationsBucket}/gateway/conversations/**/user_id=${userId}/${sessionId}.parquet`;
const fallbackSql = `
SELECT id, user_id, session_id, role, content, metadata, timestamp
FROM read_parquet('${parquetPath}')
ORDER BY timestamp ASC
${options?.limit ? `LIMIT ${options.limit}` : ''}
`;
try {
return await this.query(fallbackSql);
} catch {
// File may not exist yet
}
}
this.logger.warn('Conversations table not found and no cold storage configured');
return []; return [];
} }
@@ -526,6 +545,65 @@ export class DuckDBClient {
} }
} }
/**
* Append a batch of conversation messages as a Parquet file in S3.
* Called once per session at session end to avoid small-file fragmentation.
*/
async appendMessages(
userId: string,
sessionId: string,
messages: Array<{
id: string;
user_id: string;
session_id: string;
role: string;
content: string;
metadata: string;
timestamp: number;
}>
): Promise<void> {
await this.initialize();
if (!this.conversationsBucket || messages.length === 0) {
return;
}
const now = new Date();
const year = now.getUTCFullYear();
const month = String(now.getUTCMonth() + 1).padStart(2, '0');
const s3Path = `s3://${this.conversationsBucket}/gateway/conversations/year=${year}/month=${month}/user_id=${userId}/${sessionId}.parquet`;
// Use a timestamp-based name to avoid cross-session collisions
const tempTable = `msg_flush_${Date.now()}`;
try {
await this.query(`
CREATE TEMP TABLE ${tempTable} (
id VARCHAR,
user_id VARCHAR,
session_id VARCHAR,
role VARCHAR,
content VARCHAR,
metadata VARCHAR,
timestamp BIGINT
)
`);
for (const msg of messages) {
await this.query(
`INSERT INTO ${tempTable} VALUES (?, ?, ?, ?, ?, ?, ?)`,
[msg.id, msg.user_id, msg.session_id, msg.role, msg.content, msg.metadata, msg.timestamp]
);
}
await this.query(`COPY ${tempTable} TO '${s3Path}' (FORMAT PARQUET)`);
this.logger.info({ userId, sessionId, count: messages.length, s3Path }, 'Conversation flushed to Parquet');
} finally {
await this.query(`DROP TABLE IF EXISTS ${tempTable}`).catch(() => {});
}
}
/** /**
* Close the DuckDB connection * Close the DuckDB connection
*/ */

View File

@@ -27,6 +27,9 @@ export interface IcebergConfig {
// OHLC/Trading data catalog (can be same or different from conversation catalog) // OHLC/Trading data catalog (can be same or different from conversation catalog)
ohlcCatalogUri?: string; ohlcCatalogUri?: string;
ohlcNamespace?: string; ohlcNamespace?: string;
// S3 bucket for conversation cold storage (Parquet flush at session end)
conversationsBucket?: string;
} }
/** /**
@@ -99,6 +102,7 @@ export class IcebergClient {
s3Endpoint: config.s3Endpoint, s3Endpoint: config.s3Endpoint,
s3AccessKey: config.s3AccessKey, s3AccessKey: config.s3AccessKey,
s3SecretKey: config.s3SecretKey, s3SecretKey: config.s3SecretKey,
conversationsBucket: config.conversationsBucket,
}, },
logger logger
); );
@@ -137,6 +141,18 @@ export class IcebergClient {
return this.duckdb.queryCheckpoint(userId, sessionId, checkpointId); return this.duckdb.queryCheckpoint(userId, sessionId, checkpointId);
} }
/**
* Append a batch of conversation messages as a Parquet file in S3.
* Called once per session at session end.
*/
async appendMessages(
userId: string,
sessionId: string,
messages: IcebergMessage[]
): Promise<void> {
return this.duckdb.appendMessages(userId, sessionId, messages);
}
/** /**
* Get table metadata * Get table metadata
*/ */

View File

@@ -41,11 +41,8 @@ export class UserService {
`SELECT `SELECT
user_id as "userId", user_id as "userId",
email, email,
license_type as "licenseType", license,
features,
resource_limits as "resourceLimits",
mcp_server_url as "mcpServerUrl", mcp_server_url as "mcpServerUrl",
preferred_model as "preferredModel",
expires_at as "expiresAt", expires_at as "expiresAt",
created_at as "createdAt", created_at as "createdAt",
updated_at as "updatedAt" updated_at as "updatedAt"
@@ -65,11 +62,8 @@ export class UserService {
return UserLicenseSchema.parse({ return UserLicenseSchema.parse({
userId: row.userId, userId: row.userId,
email: row.email, email: row.email,
licenseType: row.licenseType, license: row.license,
features: row.features,
resourceLimits: row.resourceLimits,
mcpServerUrl: row.mcpServerUrl, mcpServerUrl: row.mcpServerUrl,
preferredModel: row.preferredModel,
expiresAt: row.expiresAt, expiresAt: row.expiresAt,
createdAt: row.createdAt, createdAt: row.createdAt,
updatedAt: row.updatedAt, updatedAt: row.updatedAt,

View File

@@ -1,25 +1,29 @@
# Agent Harness # Agent Harness
Comprehensive agent orchestration system for Dexorder AI platform, built on LangChain.js and LangGraph.js. Comprehensive agent orchestration system for Dexorder AI platform, built on LangChain.js deep agents architecture.
## Architecture Overview ## Architecture Overview
``` ```
gateway/src/harness/ gateway/src/
├── memory/ # Storage layer (Redis + Iceberg + Qdrant) ├── harness/
├── skills/ # Individual capabilities (markdown + TypeScript) │ ├── memory/ # Storage layer (Redis + Iceberg + Qdrant)
├── subagents/ # Specialized agents with multi-file memory ├── subagents/ # Specialized agents with multi-file memory
├── workflows/ # LangGraph state machines ├── workflows/ # LangGraph state machines
├── tools/ # Platform tools (non-MCP) │ ├── prompts/ # System prompts
├── config/ # Configuration files │ ├── agent-harness.ts # Main orchestrator
└── index.ts # Main exports └── index.ts # Exports
└── tools/ # LangChain tools (platform + MCP)
├── platform/ # Local platform tools
├── mcp/ # Remote MCP tool wrappers
└── tool-registry.ts # Tool-to-agent routing
``` ```
## Core Components ## Core Components
### 1. Memory Layer (`memory/`) ### 1. Memory Layer (`memory/`)
Tiered storage architecture as per [architecture discussion](/chat/harness-rag.txt): Tiered storage architecture:
- **Redis**: Hot state (active sessions, checkpoints) - **Redis**: Hot state (active sessions, checkpoints)
- **Iceberg**: Cold storage (durable conversations, analytics) - **Iceberg**: Cold storage (durable conversations, analytics)
@@ -32,27 +36,32 @@ Tiered storage architecture as per [architecture discussion](/chat/harness-rag.t
- `embedding-service.ts`: Text→vector conversion - `embedding-service.ts`: Text→vector conversion
- `session-context.ts`: User context with channel metadata - `session-context.ts`: User context with channel metadata
### 2. Skills (`skills/`) ### 2. Tools (`../tools/`)
Self-contained capabilities with markdown definitions: Standard LangChain tools following deep agents best practices:
- `*.skill.md`: Human-readable documentation **Platform Tools** (local services):
- `*.ts`: Implementation extending `BaseSkill` - `symbol_lookup`: Symbol search and metadata resolution
- Input validation and error handling - `get_chart_data`: OHLCV data with workspace defaults
- Can use LLM, MCP tools, or platform tools
**MCP Tools** (remote, per-user):
- Dynamically discovered from user's MCP server
- Wrapped as standard LangChain `DynamicStructuredTool`
- Filtered per-agent via `ToolRegistry`
**Example:** **Example:**
```typescript ```typescript
import { MarketAnalysisSkill } from './skills'; import { getToolRegistry } from '../tools';
const skill = new MarketAnalysisSkill(logger, model); const toolRegistry = getToolRegistry();
const result = await skill.execute({ const tools = await toolRegistry.getToolsForAgent(
context: userContext, 'main',
parameters: { ticker: 'BTC/USDT', period: '4h' } mcpClient,
}); availableMCPTools
);
``` ```
See [skills/README.md](skills/README.md) for authoring guide. See `../tools/tool-registry.ts` for tool configuration.
### 3. Subagents (`subagents/`) ### 3. Subagents (`subagents/`)
@@ -75,11 +84,20 @@ subagents/
- Split memory into logical files (better organization) - Split memory into logical files (better organization)
- Model overrides - Model overrides
- Capability tagging - Capability tagging
- Configurable tool access via ToolRegistry
**Tool Configuration** (in `config.yaml`):
```yaml
tools:
platform: ['symbol_lookup'] # Platform tools
mcp: ['category_*'] # MCP tool patterns
```
**Example:** **Example:**
```typescript ```typescript
const codeReviewer = await createCodeReviewerSubagent(model, logger, basePath); const tools = await toolRegistry.getToolsForAgent('research', mcpClient, availableMCPTools);
const review = await codeReviewer.execute({ userContext }, strategyCode); const subagent = await createResearchSubagent(model, logger, basePath, mcpClient, tools);
const result = await subagent.execute({ userContext }, instruction);
``` ```
### 4. Workflows (`workflows/`) ### 4. Workflows (`workflows/`)

View File

@@ -1,22 +1,56 @@
import type { BaseMessage } from '@langchain/core/messages'; import type { BaseMessage } from '@langchain/core/messages';
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages'; import { HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
import type { UserLicense } from '../types/user.js'; import type { License } from '../types/user.js';
import { ChannelType } from '../types/user.js';
import type { ConversationStore } from './memory/conversation-store.js';
import type { InboundMessage, OutboundMessage } from '../types/messages.js'; import type { InboundMessage, OutboundMessage } from '../types/messages.js';
import { MCPClientConnector } from './mcp-client.js'; import { MCPClientConnector } from './mcp-client.js';
import { CONTEXT_URIS, type ResourceContent } from '../types/resources.js';
import { LLMProviderFactory, type ProviderConfig } from '../llm/provider.js'; import { LLMProviderFactory, type ProviderConfig } from '../llm/provider.js';
import { ModelRouter, RoutingStrategy } from '../llm/router.js'; import { ModelRouter, RoutingStrategy } from '../llm/router.js';
import type { WorkspaceManager } from '../workspace/workspace-manager.js'; import type { WorkspaceManager } from '../workspace/workspace-manager.js';
import type { ChannelAdapter } from '../workspace/index.js';
import type { ResearchSubagent } from './subagents/research/index.js';
import type { DynamicStructuredTool } from '@langchain/core/tools';
import { getToolRegistry } from '../tools/tool-registry.js';
import type { MCPToolInfo } from '../tools/mcp/mcp-tool-wrapper.js';
import { createResearchAgentTool } from '../tools/platform/research-agent.tool.js';
import { createUserContext } from './memory/session-context.js';
import { readFile } from 'fs/promises';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
export interface AgentHarnessConfig { const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* Session-specific config provided by channel handlers.
* Contains only per-connection details — no infrastructure dependencies.
*/
export interface HarnessSessionConfig {
userId: string; userId: string;
sessionId: string; sessionId: string;
license: UserLicense; license: License;
providerConfig: ProviderConfig; mcpServerUrl: string;
logger: FastifyBaseLogger; logger: FastifyBaseLogger;
workspaceManager?: WorkspaceManager; workspaceManager?: WorkspaceManager;
channelAdapter?: ChannelAdapter;
channelType?: ChannelType;
channelUserId?: string;
}
/**
* Factory function type for creating AgentHarness instances.
* Created in main.ts with infrastructure (storage, providerConfig) captured in closure.
* Channel handlers call this factory without knowing about Redis or Iceberg.
*/
export type HarnessFactory = (sessionConfig: HarnessSessionConfig) => AgentHarness;
export interface AgentHarnessConfig extends HarnessSessionConfig {
providerConfig: ProviderConfig;
conversationStore?: ConversationStore;
historyLimit: number;
researchSubagent?: ResearchSubagent;
} }
/** /**
@@ -27,32 +61,59 @@ export interface AgentHarnessConfig {
* 1. Fetches context from user's MCP resources * 1. Fetches context from user's MCP resources
* 2. Routes to appropriate LLM model * 2. Routes to appropriate LLM model
* 3. Calls LLM with embedded context * 3. Calls LLM with embedded context
* 4. Routes tool calls to user's MCP or platform tools * 4. Routes tool calls to platform tools or user's MCP tools
* 5. Saves messages back to user's MCP * 5. Saves messages back to user's MCP
*/ */
export class AgentHarness { export class AgentHarness {
private static systemPromptTemplate: string | null = null;
private config: AgentHarnessConfig; private config: AgentHarnessConfig;
private modelFactory: LLMProviderFactory; private modelFactory: LLMProviderFactory;
private modelRouter: ModelRouter; private modelRouter: ModelRouter;
private mcpClient: MCPClientConnector; private mcpClient: MCPClientConnector;
private workspaceManager?: WorkspaceManager; private workspaceManager?: WorkspaceManager;
private lastWorkspaceSeq: number = 0; private channelAdapter?: ChannelAdapter;
private isFirstMessage: boolean = true; private isFirstMessage: boolean = true;
private researchSubagent?: ResearchSubagent;
private availableMCPTools: MCPToolInfo[] = [];
private researchImageCapture: Array<{ data: string; mimeType: string }> = [];
private conversationStore?: ConversationStore;
constructor(config: AgentHarnessConfig) { constructor(config: AgentHarnessConfig) {
this.config = config; this.config = config;
this.workspaceManager = config.workspaceManager; this.workspaceManager = config.workspaceManager;
this.channelAdapter = config.channelAdapter;
this.researchSubagent = config.researchSubagent;
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger); this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
this.modelRouter = new ModelRouter(this.modelFactory, config.logger); this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
this.conversationStore = config.conversationStore;
this.mcpClient = new MCPClientConnector({ this.mcpClient = new MCPClientConnector({
userId: config.userId, userId: config.userId,
mcpServerUrl: config.license.mcpServerUrl, mcpServerUrl: config.mcpServerUrl,
logger: config.logger, logger: config.logger,
}); });
} }
/**
* Load system prompt template from file (cached)
*/
private static async loadSystemPromptTemplate(): Promise<string> {
if (!AgentHarness.systemPromptTemplate) {
const templatePath = join(__dirname, 'prompts', 'system-prompt.md');
AgentHarness.systemPromptTemplate = await readFile(templatePath, 'utf-8');
}
return AgentHarness.systemPromptTemplate;
}
/**
* Set the channel adapter (can be called after construction)
*/
setChannelAdapter(adapter: ChannelAdapter): void {
this.channelAdapter = adapter;
}
/** /**
* Initialize harness and connect to user's MCP server * Initialize harness and connect to user's MCP server
*/ */
@@ -64,6 +125,13 @@ export class AgentHarness {
try { try {
await this.mcpClient.connect(); await this.mcpClient.connect();
// Discover available MCP tools from user's server
await this.discoverMCPTools();
// Initialize research subagent if not provided
await this.initializeResearchSubagent();
this.config.logger.info('Agent harness initialized'); this.config.logger.info('Agent harness initialized');
} catch (error) { } catch (error) {
this.config.logger.error({ error }, 'Failed to initialize agent harness'); this.config.logger.error({ error }, 'Failed to initialize agent harness');
@@ -71,46 +139,384 @@ export class AgentHarness {
} }
} }
/**
* Discover available MCP tools from user's server
*/
private async discoverMCPTools(): Promise<void> {
try {
this.config.logger.debug('Discovering MCP tools from user server');
// Call MCP client to list tools
const tools = await this.mcpClient.listTools();
// Convert to MCPToolInfo format
this.availableMCPTools = tools.map(tool => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema as any,
}));
this.config.logger.info(
{
toolCount: this.availableMCPTools.length,
toolNames: this.availableMCPTools.map(t => t.name),
},
'MCP tools discovered'
);
} catch (error) {
this.config.logger.warn(
{
error,
errorMessage: (error as Error)?.message,
errorName: (error as Error)?.name,
errorCode: (error as any)?.code,
},
'Failed to discover MCP tools - continuing without remote tools'
);
// Don't throw - MCP tools are optional, agent can still work with platform tools
this.availableMCPTools = [];
}
}
/**
* Initialize research subagent
*/
private async initializeResearchSubagent(): Promise<void> {
if (this.researchSubagent) {
this.config.logger.debug('Research subagent already provided');
return;
}
this.config.logger.debug('Creating research subagent for session');
try {
const { createResearchSubagent } = await import('./subagents/research/index.js');
// Create a model for the research subagent
const model = await this.modelRouter.route(
'research analysis', // dummy query
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId
);
// Get tools for research subagent from registry
// Images from MCP responses are captured via onImage and routed to the subagent
const toolRegistry = getToolRegistry();
const researchTools = await toolRegistry.getToolsForAgent(
'research',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager,
(img) => this.researchImageCapture.push(img)
);
// Path resolution: use the compiled output path
const researchSubagentPath = join(__dirname, 'subagents', 'research');
this.config.logger.debug({ researchSubagentPath }, 'Using research subagent path');
this.researchSubagent = await createResearchSubagent(
model,
this.config.logger,
researchSubagentPath,
this.mcpClient,
researchTools,
this.researchImageCapture
);
this.config.logger.info(
{
toolCount: researchTools.length,
toolNames: researchTools.map(t => t.name),
},
'Research subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create research subagent'
);
// Don't throw - research subagent is optional
}
}
/**
* Execute model with tool calling loop
* Handles multi-turn tool calls until the model produces a final text response
*/
private async executeWithToolCalling(
model: any,
messages: BaseMessage[],
tools: DynamicStructuredTool[],
maxIterations: number = 2
): Promise<string> {
this.config.logger.info(
{ toolCount: tools.length, maxIterations },
'Starting tool calling loop'
);
const messagesCopy = [...messages];
let iterations = 0;
while (iterations < maxIterations) {
iterations++;
this.config.logger.info(
{
iteration: iterations,
messageCount: messagesCopy.length,
lastMessageType: messagesCopy[messagesCopy.length - 1]?.constructor.name,
},
'Tool calling loop iteration'
);
this.config.logger.debug('Streaming model response...');
let response: any = null;
try {
const stream = await model.stream(messagesCopy);
for await (const chunk of stream) {
if (typeof chunk.content === 'string' && chunk.content.length > 0) {
this.channelAdapter?.sendChunk(chunk.content);
} else if (Array.isArray(chunk.content)) {
for (const block of chunk.content) {
if (block.type === 'text' && block.text) {
this.channelAdapter?.sendChunk(block.text);
}
}
}
response = response ? response.concat(chunk) : chunk;
}
} catch (invokeError: any) {
this.config.logger.error(
{
error: invokeError,
errorMessage: invokeError?.message,
errorStack: invokeError?.stack,
iteration: iterations,
messageCount: messagesCopy.length,
},
'Model streaming failed in tool calling loop'
);
throw invokeError;
}
this.config.logger.info(
{
hasContent: !!response.content,
contentLength: typeof response.content === 'string' ? response.content.length : 0,
hasToolCalls: !!response.tool_calls,
toolCallCount: response.tool_calls?.length || 0,
},
'Model response received'
);
// Check if model wants to call tools
if (!response.tool_calls || response.tool_calls.length === 0) {
// No tool calls - return final response
let finalContent: string;
if (typeof response.content === 'string') {
finalContent = response.content;
} else if (Array.isArray(response.content)) {
finalContent = response.content
.filter((block: any) => block.type === 'text')
.map((block: any) => block.text || '')
.join('');
} else {
finalContent = JSON.stringify(response.content);
}
this.config.logger.info(
{ finalContentLength: finalContent.length, iterations },
'Tool calling loop complete - no more tool calls'
);
return finalContent;
}
this.config.logger.info(
{ toolCalls: response.tool_calls.map((tc: any) => tc.name) },
'Processing tool calls'
);
// Add assistant message with tool calls to history
messagesCopy.push(response);
// Execute each tool call
for (const toolCall of response.tool_calls) {
this.config.logger.info(
{ tool: toolCall.name, args: toolCall.args },
'Executing tool call'
);
const tool = tools.find(t => t.name === toolCall.name);
if (!tool) {
this.config.logger.warn({ tool: toolCall.name }, 'Tool not found');
messagesCopy.push(
new ToolMessage({
content: `Error: Tool '${toolCall.name}' not found`,
tool_call_id: toolCall.id,
})
);
continue;
}
try {
this.channelAdapter?.sendToolCall?.(toolCall.name, this.getToolLabel(toolCall.name));
const result = await tool.func(toolCall.args);
// Process result to extract images and send them via channel adapter
const processedResult = this.processToolResult(result, toolCall.name);
this.config.logger.debug(
{
tool: toolCall.name,
originalResultLength: result.length,
processedResultLength: processedResult.length,
},
'Tool result processed'
);
messagesCopy.push(
new ToolMessage({
content: processedResult,
tool_call_id: toolCall.id,
})
);
this.config.logger.info(
{ tool: toolCall.name, resultLength: processedResult.length },
'Tool execution completed'
);
} catch (error) {
this.config.logger.error(
{
error,
errorMessage: (error as Error)?.message,
errorStack: (error as Error)?.stack,
tool: toolCall.name,
args: toolCall.args,
},
'Tool execution failed'
);
messagesCopy.push(
new ToolMessage({
content: `Error: ${error}`,
tool_call_id: toolCall.id,
})
);
}
}
}
// Max iterations reached - return what we have
this.config.logger.warn('Max tool calling iterations reached');
return 'I apologize, but I encountered an issue processing your request. Please try rephrasing your question.';
}
/** /**
* Handle incoming message from user * Handle incoming message from user
*/ */
async handleMessage(message: InboundMessage): Promise<OutboundMessage> { async handleMessage(message: InboundMessage): Promise<OutboundMessage> {
this.config.logger.info( this.config.logger.info(
{ messageId: message.messageId, userId: message.userId }, { messageId: message.messageId, userId: message.userId, content: message.content.substring(0, 100) },
'Processing user message' 'Processing user message'
); );
try { try {
// 1. Fetch context resources from user's MCP server // 1. Build system prompt from template
this.config.logger.debug('Fetching context resources from MCP'); this.config.logger.debug('Building system prompt');
const contextResources = await this.fetchContextResources(); const systemPrompt = await this.buildSystemPrompt();
this.config.logger.debug({ systemPromptLength: systemPrompt.length }, 'System prompt built');
// 2. Build system prompt from resources // 2. Load recent conversation history
const systemPrompt = this.buildSystemPrompt(contextResources); const channelKey = this.config.channelType ?? ChannelType.WEBSOCKET;
const storedMessages = this.conversationStore
? await this.conversationStore.getRecentMessages(
this.config.userId, this.config.sessionId, this.config.historyLimit, channelKey
)
: [];
const history = this.conversationStore
? this.conversationStore.toLangChainMessages(storedMessages)
: [];
this.config.logger.debug({ historyLength: history.length }, 'Conversation history loaded');
// 3. Build messages with conversation context from MCP // 4. Get the configured model
const messages = this.buildMessages(message, contextResources); this.config.logger.debug('Routing to model');
// 4. Route to appropriate model
const model = await this.modelRouter.route( const model = await this.modelRouter.route(
message.content, message.content,
this.config.license, this.config.license,
RoutingStrategy.COMPLEXITY RoutingStrategy.COMPLEXITY,
this.config.userId
); );
this.config.logger.info({ modelName: model.constructor.name }, 'Model selected');
// 5. Build LangChain messages // 5. Build LangChain messages
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages); const langchainMessages = this.buildLangChainMessages(systemPrompt, history, message.content);
this.config.logger.debug({ messageCount: langchainMessages.length }, 'LangChain messages built');
// 6. Call LLM with streaming // 6. Get tools for main agent from registry
this.config.logger.debug('Invoking LLM'); const toolRegistry = getToolRegistry();
const response = await model.invoke(langchainMessages); const tools = await toolRegistry.getToolsForAgent(
'main',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager // Pass session workspace manager
);
// 7. Extract text response (tool handling TODO) // Add research subagent as a tool if available
const assistantMessage = response.content as string; if (this.researchSubagent) {
const subagentContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
// TODO: Save messages to Iceberg conversation table instead of MCP tools.push(createResearchAgentTool({
// Should batch-insert periodically or on session end to avoid many small Parquet files researchSubagent: this.researchSubagent,
// await icebergConversationStore.appendMessages([...]); context: subagentContext,
logger: this.config.logger,
}));
}
this.config.logger.info(
{
toolCount: tools.length,
toolNames: tools.map(t => t.name),
},
'Tools loaded for main agent'
);
// 7. Bind tools to model
const modelWithTools = tools.length > 0 && model.bindTools ? model.bindTools(tools) : model;
if (tools.length > 0) {
this.config.logger.info(
{ modelType: modelWithTools.constructor.name, toolsBound: tools.length > 0 && !!model.bindTools },
'Model bound with tools'
);
}
// 8. Call LLM with tool calling loop
this.config.logger.info('Invoking LLM with tool support');
const assistantMessage = await this.executeWithToolCalling(modelWithTools, langchainMessages, tools);
this.config.logger.info(
{ responseLength: assistantMessage.length },
'LLM response received'
);
// Save user message and assistant response to conversation store
if (this.conversationStore) {
await this.conversationStore.saveMessage(
this.config.userId, this.config.sessionId, 'user', message.content, undefined, channelKey
);
await this.conversationStore.saveMessage(
this.config.userId, this.config.sessionId, 'assistant', assistantMessage, undefined, channelKey
);
}
// Mark first message as processed // Mark first message as processed
if (this.isFirstMessage) { if (this.isFirstMessage) {
@@ -129,214 +535,174 @@ export class AgentHarness {
} }
} }
/**
* Stream response from LLM
*/
async *streamMessage(message: InboundMessage): AsyncGenerator<string> {
try {
// Fetch context
const contextResources = await this.fetchContextResources();
const systemPrompt = this.buildSystemPrompt(contextResources);
const messages = this.buildMessages(message, contextResources);
// Route to model
const model = await this.modelRouter.route(
message.content,
this.config.license,
RoutingStrategy.COMPLEXITY
);
// Build messages
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages);
// Stream response
const stream = await model.stream(langchainMessages);
let fullResponse = '';
for await (const chunk of stream) {
const content = chunk.content as string;
fullResponse += content;
yield content;
}
// TODO: Save messages to Iceberg conversation table instead of MCP
// Should batch-insert periodically or on session end to avoid many small Parquet files
// await icebergConversationStore.appendMessages([
// { role: 'user', content: message.content, timestamp: message.timestamp },
// { role: 'assistant', content: fullResponse, timestamp: new Date() }
// ]);
// Mark first message as processed
if (this.isFirstMessage) {
this.isFirstMessage = false;
}
} catch (error) {
this.config.logger.error({ error }, 'Error streaming message');
throw error;
}
}
/**
* Fetch context resources from user's MCP server
*/
private async fetchContextResources(): Promise<ResourceContent[]> {
const contextUris = [
CONTEXT_URIS.USER_PROFILE,
CONTEXT_URIS.CONVERSATION_SUMMARY,
CONTEXT_URIS.WORKSPACE_STATE,
CONTEXT_URIS.SYSTEM_PROMPT,
];
const resources = await Promise.all(
contextUris.map(async (uri) => {
try {
return await this.mcpClient.readResource(uri);
} catch (error) {
this.config.logger.warn({ error, uri }, 'Failed to fetch resource, using empty');
return { uri, text: '' };
}
})
);
return resources;
}
/**
* Build messages array with context from resources
*/
private buildMessages(
currentMessage: InboundMessage,
contextResources: ResourceContent[]
): Array<{ role: string; content: string }> {
const conversationSummary = contextResources.find(
(r) => r.uri === CONTEXT_URIS.CONVERSATION_SUMMARY
);
const messages: Array<{ role: string; content: string }> = [];
// Add conversation context as a system-like user message
if (conversationSummary?.text) {
messages.push({
role: 'user',
content: `[Previous Conversation Context]\n${conversationSummary.text}`,
});
messages.push({
role: 'assistant',
content: 'I understand the context from our previous conversations.',
});
}
// Add workspace delta (for subsequent turns)
const workspaceDelta = this.buildWorkspaceDelta();
if (workspaceDelta) {
messages.push({
role: 'user',
content: workspaceDelta,
});
}
// Add current user message
messages.push({
role: 'user',
content: currentMessage.content,
});
return messages;
}
/** /**
* Convert to LangChain message format * Convert to LangChain message format
*/ */
private buildLangChainMessages( private buildLangChainMessages(
systemPrompt: string, systemPrompt: string,
messages: Array<{ role: string; content: string }> history: BaseMessage[],
currentUserMessage: string
): BaseMessage[] { ): BaseMessage[] {
const langchainMessages: BaseMessage[] = [new SystemMessage(systemPrompt)]; return [
new SystemMessage(systemPrompt),
for (const msg of messages) { ...history,
if (msg.role === 'user') { new HumanMessage(currentUserMessage),
langchainMessages.push(new HumanMessage(msg.content)); ];
} else if (msg.role === 'assistant') {
langchainMessages.push(new AIMessage(msg.content));
}
}
return langchainMessages;
} }
/** /**
* Build system prompt from platform base + user resources * Build system prompt from template
*/ */
private buildSystemPrompt(contextResources: ResourceContent[]): string { private async buildSystemPrompt(): Promise<string> {
const userProfile = contextResources.find((r) => r.uri === CONTEXT_URIS.USER_PROFILE); // Load template and populate with license info
const customPrompt = contextResources.find((r) => r.uri === CONTEXT_URIS.SYSTEM_PROMPT); const template = await AgentHarness.loadSystemPromptTemplate();
const workspaceState = contextResources.find((r) => r.uri === CONTEXT_URIS.WORKSPACE_STATE); let prompt = template
.replace('{{licenseType}}', this.config.license.licenseType)
// Base platform prompt .replace('{{features}}', JSON.stringify(this.config.license.features, null, 2));
let prompt = `You are a helpful AI assistant for Dexorder, an AI-first trading platform.
You help users research markets, develop indicators and strategies, and analyze trading data.
User license: ${this.config.license.licenseType}
Available features: ${JSON.stringify(this.config.license.features, null, 2)}`;
// Add user profile context
if (userProfile?.text) {
prompt += `\n\n# User Profile\n${userProfile.text}`;
}
// Add workspace context from MCP resource (if available)
if (workspaceState?.text) {
prompt += `\n\n# Current Workspace (from MCP)\n${workspaceState.text}`;
}
// Add full workspace state from WorkspaceManager (first message only) // Add full workspace state from WorkspaceManager (first message only)
if (this.isFirstMessage && this.workspaceManager) { if (this.isFirstMessage && this.workspaceManager) {
const workspaceJSON = this.workspaceManager.serializeState(); const workspaceJSON = this.workspaceManager.serializeState();
prompt += `\n\n# Workspace State (JSON)\n\`\`\`json\n${workspaceJSON}\n\`\`\``; prompt += `\n\n# Current Workspace State\n\`\`\`json\n${workspaceJSON}\n\`\`\``;
// Record current workspace sequence for delta tracking
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
}
// Add user's custom instructions (highest priority)
if (customPrompt?.text) {
prompt += `\n\n# User Instructions\n${customPrompt.text}`;
} }
return prompt; return prompt;
} }
/** /**
* Build workspace delta message for subsequent turns. * Map tool names to user-friendly status labels.
* Returns null if no changes since last message.
*/ */
private buildWorkspaceDelta(): string | null { private getToolLabel(toolName: string): string {
if (!this.workspaceManager || this.isFirstMessage) { const labels: Record<string, string> = {
return null; research_agent: 'Researching...',
} get_chart_data: 'Fetching chart data...',
symbol_lookup: 'Looking up symbol...',
const changes = this.workspaceManager.getChangesSince(this.lastWorkspaceSeq); };
return labels[toolName] ?? `Running ${toolName}...`;
if (Object.keys(changes).length === 0) {
return null;
}
// Format changes as JSON
const deltaJSON = JSON.stringify(changes, null, 2);
// Update sequence marker
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
return `[Workspace Changes Since Last Turn]\n\`\`\`json\n${deltaJSON}\n\`\`\``;
} }
/**
* Process tool result to extract images and send via channel adapter.
* Returns text-only version for LLM context (no base64 image data).
*/
private processToolResult(result: string, toolName: string): string {
// Most tools return plain strings - only process JSON results
if (!result || typeof result !== 'string') {
return String(result || '');
}
// Try to parse as JSON
let parsedResult: any;
try {
parsedResult = JSON.parse(result);
} catch {
// Not JSON, return as-is
return result;
}
// Check if result has images array (from ResearchSubagent)
if (parsedResult && Array.isArray(parsedResult.images) && parsedResult.images.length > 0) {
this.config.logger.info(
{ tool: toolName, imageCount: parsedResult.images.length },
'Extracting images from tool result'
);
// Send each image via channel adapter
for (const image of parsedResult.images) {
if (image.data && image.mimeType) {
if (this.channelAdapter) {
this.config.logger.debug({ mimeType: image.mimeType }, 'Sending image to channel');
this.channelAdapter.sendImage({
data: image.data,
mimeType: image.mimeType,
caption: undefined,
});
} else {
this.config.logger.warn('No channel adapter set, cannot send image');
}
}
}
// Create text-only version for LLM
const textOnlyResult = {
...parsedResult,
images: undefined,
imageCount: parsedResult.images.length,
};
// Clean up undefined values
Object.keys(textOnlyResult).forEach(key => {
if (textOnlyResult[key] === undefined) {
delete textOnlyResult[key];
}
});
return JSON.stringify(textOnlyResult);
}
// Check for nested chart_images object
if (parsedResult && parsedResult.chart_images && typeof parsedResult.chart_images === 'object') {
this.config.logger.info(
{ tool: toolName, chartCount: Object.keys(parsedResult.chart_images).length },
'Extracting chart images from tool result'
);
// Send each chart image via channel adapter
for (const [chartId, chartData] of Object.entries(parsedResult.chart_images)) {
const chart = chartData as any;
if (chart.type === 'image' && chart.data) {
if (this.channelAdapter) {
this.config.logger.debug({ chartId }, 'Sending chart image to channel');
this.channelAdapter.sendImage({
data: chart.data,
mimeType: 'image/png',
caption: undefined,
});
} else {
this.config.logger.warn('No channel adapter set, cannot send chart image');
}
}
}
// Create text-only version for LLM
const textOnlyResult = {
...parsedResult,
chart_images: undefined,
chartCount: Object.keys(parsedResult.chart_images).length,
};
// Clean up undefined values
Object.keys(textOnlyResult).forEach(key => {
if (textOnlyResult[key] === undefined) {
delete textOnlyResult[key];
}
});
return JSON.stringify(textOnlyResult);
}
// No images found, return stringified result
return result;
}
/** /**
* Cleanup resources * End the session: flush conversation to cold storage, then release resources.
* Called by channel handlers on disconnect, session expiry, or graceful shutdown.
*/ */
async cleanup(): Promise<void> { async cleanup(): Promise<void> {
this.config.logger.info('Cleaning up agent harness'); this.config.logger.info('Cleaning up agent harness');
if (this.conversationStore) {
const channelKey = this.config.channelType ?? ChannelType.WEBSOCKET;
try {
await this.conversationStore.flushToIceberg(
this.config.userId, this.config.sessionId, this.config.historyLimit, channelKey
);
} catch (error) {
this.config.logger.error({ error }, 'Failed to flush conversation to Iceberg during cleanup');
}
}
await this.mcpClient.disconnect(); await this.mcpClient.disconnect();
} }
} }

View File

@@ -3,9 +3,6 @@
// Memory // Memory
export * from './memory/index.js'; export * from './memory/index.js';
// Skills
export * from './skills/index.js';
// Subagents // Subagents
export * from './subagents/index.js'; export * from './subagents/index.js';

View File

@@ -88,7 +88,7 @@ export class MCPClientConnector {
/** /**
* List available tools from user's MCP server * List available tools from user's MCP server
* Filters to only return tools marked as agent_accessible * Returns all available tools from the MCP server
*/ */
async listTools(): Promise<Array<{ name: string; description?: string; inputSchema?: any }>> { async listTools(): Promise<Array<{ name: string; description?: string; inputSchema?: any }>> {
if (!this.client || !this.connected) { if (!this.client || !this.connected) {
@@ -96,36 +96,54 @@ export class MCPClientConnector {
} }
try { try {
this.config.logger.debug('Requesting tool list from MCP server');
const response = await this.client.listTools(); const response = await this.client.listTools();
// Filter tools to only include agent-accessible ones this.config.logger.debug(
const tools = response.tools {
.filter((tool: any) => { hasTools: !!response.tools,
// Check if tool has agent_accessible annotation toolCount: response.tools?.length || 0,
const annotations = tool.annotations || {}; },
return annotations.agent_accessible === true; 'Received tool list response'
}) );
.map((tool: any) => ({
name: tool.name, // Handle case where response.tools might be undefined
description: tool.description, if (!response.tools || !Array.isArray(response.tools)) {
inputSchema: tool.inputSchema, this.config.logger.warn('MCP server returned no tools array');
})); return [];
}
// Return all tools - agent-to-tool binding is handled by the tool registry
const tools = response.tools.map((tool: any) => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema,
}));
this.config.logger.debug( this.config.logger.debug(
{ totalTools: response.tools.length, agentAccessibleTools: tools.length }, { toolCount: tools.length },
'Listed MCP tools with filtering' 'Listed MCP tools'
); );
return tools; return tools;
} catch (error) { } catch (error) {
this.config.logger.error({ error }, 'Failed to list MCP tools'); this.config.logger.error(
{
error,
errorMessage: (error as Error)?.message,
errorName: (error as Error)?.name,
errorCode: (error as any)?.code,
errorStack: (error as Error)?.stack,
},
'Failed to list MCP tools'
);
throw error; throw error;
} }
} }
/** /**
* List available resources from user's MCP server * List available resources from user's MCP server
* Filters to only return resources marked as agent_accessible * Returns all available resources from the MCP server
*/ */
async listResources(): Promise<Array<{ uri: string; name: string; description?: string; mimeType?: string }>> { async listResources(): Promise<Array<{ uri: string; name: string; description?: string; mimeType?: string }>> {
if (!this.client || !this.connected) { if (!this.client || !this.connected) {
@@ -135,23 +153,17 @@ export class MCPClientConnector {
try { try {
const response = await this.client.listResources(); const response = await this.client.listResources();
// Filter resources to only include agent-accessible ones // Return all resources - agent-to-resource binding is handled by the tool registry
const resources = response.resources const resources = response.resources.map((resource: any) => ({
.filter((resource: any) => { uri: resource.uri,
// Check if resource has agent_accessible annotation name: resource.name,
const annotations = resource.annotations || {}; description: resource.description,
return annotations.agent_accessible === true; mimeType: resource.mimeType,
}) }));
.map((resource: any) => ({
uri: resource.uri,
name: resource.name,
description: resource.description,
mimeType: resource.mimeType,
}));
this.config.logger.debug( this.config.logger.debug(
{ totalResources: response.resources.length, agentAccessibleResources: resources.length }, { resourceCount: resources.length },
'Listed MCP resources with filtering' 'Listed MCP resources'
); );
return resources; return resources;

View File

@@ -2,6 +2,7 @@ import type Redis from 'ioredis';
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
import type { BaseMessage } from '@langchain/core/messages'; import type { BaseMessage } from '@langchain/core/messages';
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages'; import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
import type { IcebergClient } from '../../clients/iceberg-client.js';
/** /**
* Message record for storage * Message record for storage
@@ -17,36 +18,36 @@ export interface StoredMessage {
} }
/** /**
* Conversation store: Redis (hot) + Iceberg (cold) * Conversation store: Redis (hot) + Iceberg/Parquet (cold)
* *
* Hot path: Recent messages in Redis for fast access * Hot path: Recent messages in Redis for fast context loading
* Cold path: Full history in Iceberg for durability and analytics * Cold path: Full session flushed as a single Parquet file at session end
* *
* Architecture: * Architecture:
* - Redis stores last N messages per session with TTL * - Redis stores last N messages per session with TTL
* - Iceberg stores all messages partitioned by user_id, session_id * - Parquet file written to S3 at session close (one file per session)
* - Supports time-travel queries for debugging and analysis * - Cold read falls back to Parquet scan when Redis TTL has expired
*/ */
export class ConversationStore { export class ConversationStore {
private readonly HOT_MESSAGE_LIMIT = 50; // Keep last 50 messages in Redis private readonly HOT_MESSAGE_LIMIT = 50; // Redis buffer ceiling
private readonly HOT_TTL_SECONDS = 3600; // 1 hour private readonly HOT_TTL_SECONDS = 3600; // 1 hour
constructor( constructor(
private redis: Redis, private redis: Redis,
private logger: FastifyBaseLogger private logger: FastifyBaseLogger,
// TODO: Add Iceberg catalog private icebergClient?: IcebergClient
// private iceberg: IcebergCatalog
) {} ) {}
/** /**
* Save a message to both Redis and Iceberg * Save a message to Redis hot path
*/ */
async saveMessage( async saveMessage(
userId: string, userId: string,
sessionId: string, sessionId: string,
role: 'user' | 'assistant' | 'system', role: 'user' | 'assistant' | 'system',
content: string, content: string,
metadata?: Record<string, unknown> metadata?: Record<string, unknown>,
channelType?: string
): Promise<void> { ): Promise<void> {
const message: StoredMessage = { const message: StoredMessage = {
id: `${userId}:${sessionId}:${Date.now()}`, id: `${userId}:${sessionId}:${Date.now()}`,
@@ -60,20 +61,10 @@ export class ConversationStore {
this.logger.debug({ userId, sessionId, role }, 'Saving message'); this.logger.debug({ userId, sessionId, role }, 'Saving message');
// Hot: Add to Redis list (LPUSH for newest first) const key = this.getRedisKey(userId, sessionId, channelType);
const key = this.getRedisKey(userId, sessionId);
await this.redis.lpush(key, JSON.stringify(message)); await this.redis.lpush(key, JSON.stringify(message));
// Trim to keep only recent messages
await this.redis.ltrim(key, 0, this.HOT_MESSAGE_LIMIT - 1); await this.redis.ltrim(key, 0, this.HOT_MESSAGE_LIMIT - 1);
// Set TTL
await this.redis.expire(key, this.HOT_TTL_SECONDS); await this.redis.expire(key, this.HOT_TTL_SECONDS);
// Cold: Async append to Iceberg
this.appendToIceberg(message).catch((error) => {
this.logger.error({ error, userId, sessionId }, 'Failed to append message to Iceberg');
});
} }
/** /**
@@ -82,9 +73,10 @@ export class ConversationStore {
async getRecentMessages( async getRecentMessages(
userId: string, userId: string,
sessionId: string, sessionId: string,
limit: number = 20 limit: number,
channelType?: string
): Promise<StoredMessage[]> { ): Promise<StoredMessage[]> {
const key = this.getRedisKey(userId, sessionId); const key = this.getRedisKey(userId, sessionId, channelType);
const messages = await this.redis.lrange(key, 0, limit - 1); const messages = await this.redis.lrange(key, 0, limit - 1);
return messages return messages
@@ -101,37 +93,70 @@ export class ConversationStore {
} }
/** /**
* Get full conversation history from Iceberg (cold path) * Get full conversation history — Redis first, falls back to Iceberg cold path
*/ */
async getFullHistory( async getFullHistory(
userId: string, userId: string,
sessionId: string, sessionId: string,
limit: number,
channelType?: string,
timeRange?: { start: number; end: number } timeRange?: { start: number; end: number }
): Promise<StoredMessage[]> { ): Promise<StoredMessage[]> {
this.logger.debug({ userId, sessionId, timeRange }, 'Loading full history from Iceberg'); this.logger.debug({ userId, sessionId }, 'Loading full history');
// TODO: Implement Iceberg query // Try Redis hot path first
// const table = this.iceberg.loadTable('gateway.conversations'); const hot = await this.getRecentMessages(userId, sessionId, limit, channelType);
// const filters = [ if (hot.length > 0) {
// EqualTo('user_id', userId), return hot;
// EqualTo('session_id', sessionId), }
// ];
//
// if (timeRange) {
// filters.push(GreaterThanOrEqual('timestamp', timeRange.start));
// filters.push(LessThanOrEqual('timestamp', timeRange.end));
// }
//
// const df = await table.scan({
// row_filter: And(...filters)
// }).to_pandas();
//
// if (!df.empty) {
// return df.sort_values('timestamp').to_dict('records');
// }
// Fallback to Redis if Iceberg not available // Fall back to Iceberg cold path (post-TTL recovery)
return await this.getRecentMessages(userId, sessionId, 1000); if (this.icebergClient) {
this.logger.debug({ userId, sessionId }, 'Redis miss, querying Iceberg cold path');
const coldMessages = await this.icebergClient.queryMessages(userId, sessionId, {
startTime: timeRange?.start,
endTime: timeRange?.end,
limit,
});
return coldMessages.map((m) => ({
id: m.id,
userId: m.user_id,
sessionId: m.session_id,
role: m.role as StoredMessage['role'],
content: m.content,
timestamp: m.timestamp,
}));
}
return [];
}
/**
* Flush the full session from Redis to Iceberg as a single Parquet file.
* Called once at session end — prevents small-file fragmentation.
*/
async flushToIceberg(userId: string, sessionId: string, limit: number, channelType?: string): Promise<void> {
if (!this.icebergClient) {
return;
}
const messages = await this.getRecentMessages(userId, sessionId, limit, channelType);
if (messages.length === 0) {
return;
}
const icebergMessages = messages.map((m) => ({
id: m.id,
user_id: m.userId,
session_id: m.sessionId,
role: m.role,
content: m.content,
metadata: JSON.stringify(m.metadata || {}),
timestamp: m.timestamp,
}));
await this.icebergClient.appendMessages(userId, sessionId, icebergMessages);
this.logger.info({ userId, sessionId, count: icebergMessages.length }, 'Conversation flushed to Iceberg');
} }
/** /**
@@ -155,9 +180,9 @@ export class ConversationStore {
/** /**
* Delete all messages for a session (Redis only, Iceberg handled separately) * Delete all messages for a session (Redis only, Iceberg handled separately)
*/ */
async deleteSession(userId: string, sessionId: string): Promise<void> { async deleteSession(userId: string, sessionId: string, channelType?: string): Promise<void> {
this.logger.info({ userId, sessionId }, 'Deleting session from Redis'); this.logger.info({ userId, sessionId }, 'Deleting session from Redis');
const key = this.getRedisKey(userId, sessionId); const key = this.getRedisKey(userId, sessionId, channelType);
await this.redis.del(key); await this.redis.del(key);
} }
@@ -167,62 +192,22 @@ export class ConversationStore {
async deleteUserData(userId: string): Promise<void> { async deleteUserData(userId: string): Promise<void> {
this.logger.info({ userId }, 'Deleting all user messages for GDPR compliance'); this.logger.info({ userId }, 'Deleting all user messages for GDPR compliance');
// Delete from Redis
const pattern = `conv:${userId}:*`; const pattern = `conv:${userId}:*`;
const keys = await this.redis.keys(pattern); const keys = await this.redis.keys(pattern);
if (keys.length > 0) { if (keys.length > 0) {
await this.redis.del(...keys); await this.redis.del(...keys);
} }
// Delete from Iceberg
// Note: For GDPR compliance, need to:
// 1. Send delete command via Kafka OR
// 2. Use Iceberg REST API to delete rows (if supported) OR
// 3. Coordinate with Flink job to handle deletes
//
// Iceberg delete flow:
// - Mark rows for deletion (equality delete files)
// - Run compaction to physically remove
// - Expire old snapshots
this.logger.info({ userId }, 'User messages deleted from Redis - Iceberg GDPR delete not yet implemented'); this.logger.info({ userId }, 'User messages deleted from Redis - Iceberg GDPR delete not yet implemented');
} }
/** /**
* Get Redis key for conversation * Get Redis key for conversation, namespaced by channel type
*/ */
private getRedisKey(userId: string, sessionId: string): string { private getRedisKey(userId: string, sessionId: string, channelType?: string): string {
return `conv:${userId}:${sessionId}`; return channelType
} ? `conv:${channelType}:${userId}:${sessionId}`
: `conv:${userId}:${sessionId}`;
/**
* Append message to Iceberg for durable storage
*
* Note: For production, send to Kafka topic that Flink consumes:
* - Topic: gateway_conversations
* - Flink job writes to gateway.conversations Iceberg table
* - Ensures consistent write pattern with rest of system
*/
private async appendToIceberg(message: StoredMessage): Promise<void> {
// TODO: Send to Kafka topic for Flink processing
// const kafkaMessage = {
// id: message.id,
// user_id: message.userId,
// session_id: message.sessionId,
// role: message.role,
// content: message.content,
// metadata: JSON.stringify(message.metadata || {}),
// timestamp: message.timestamp,
// };
// await this.kafkaProducer.send({
// topic: 'gateway_conversations',
// messages: [{ value: JSON.stringify(kafkaMessage) }]
// });
this.logger.debug(
{ messageId: message.id, userId: message.userId, sessionId: message.sessionId },
'Message append to Iceberg (via Kafka) not yet implemented'
);
} }
/** /**
@@ -241,7 +226,7 @@ export class ConversationStore {
} }
const messages = await this.getRecentMessages(userId, sessionId, count); const messages = await this.getRecentMessages(userId, sessionId, count);
const timestamps = messages.map((m) => m.timestamp / 1000); // Convert to milliseconds const timestamps = messages.map((m) => m.timestamp / 1000);
return { return {
messageCount: count, messageCount: count,

View File

@@ -1,4 +1,4 @@
import type { UserLicense, ChannelType } from '../../types/user.js'; import type { License, ChannelType } from '../../types/user.js';
import type { BaseMessage } from '@langchain/core/messages'; import type { BaseMessage } from '@langchain/core/messages';
/** /**
@@ -62,7 +62,7 @@ export interface UserContext {
// Identity // Identity
userId: string; userId: string;
sessionId: string; sessionId: string;
license: UserLicense; license: License;
// Channel context (for multi-channel routing) // Channel context (for multi-channel routing)
activeChannel: ActiveChannel; activeChannel: ActiveChannel;
@@ -146,7 +146,7 @@ export function getDefaultCapabilities(channelType: ChannelType): ChannelCapabil
export function createUserContext(params: { export function createUserContext(params: {
userId: string; userId: string;
sessionId: string; sessionId: string;
license: UserLicense; license: License;
channelType: ChannelType; channelType: ChannelType;
channelUserId: string; channelUserId: string;
channelCapabilities?: Partial<ChannelCapabilities>; channelCapabilities?: Partial<ChannelCapabilities>;

View File

@@ -0,0 +1,99 @@
# Dexorder AI Assistant System Prompt
You are a helpful AI assistant for Dexorder, an AI-first trading platform.
You help users research markets, develop indicators and strategies, and analyze trading data.
**User License:** {{licenseType}}
**Available Features:**
{{features}}
---
# Important Instructions
## Task Delegation
- For ANY research questions, deep analysis, statistical analysis, charting requests, plotting, ML tasks, or market data queries that require computation, you MUST use the 'research' tool
- The research tool creates and runs Python scripts that generate charts and perform analysis
- Use 'research' for anything involving: plotting, statistics, calculations, correlations, patterns, volume analysis, technical indicators, or any non-trivial data processing
- NEVER write Python code directly in your responses to the user
- NEVER show code to the user - delegate to the research tool instead
- NEVER attempt to do analysis yourself - let the research subagent handle it
## Available Tools
You have access to the following tools:
### research
**This is your PRIMARY tool for any analysis, computation, charting, or plotting tasks.**
Creates and runs Python research scripts via a specialized research subagent.
The subagent autonomously writes code, executes it, handles errors, and generates charts.
**ALWAYS use research for:**
- Any plotting, charting, or visualization requests
- Price action analysis and correlations
- Technical indicators and overlays
- Statistical analysis of market data
- Volume analysis and patterns
- Machine learning or predictive modeling
- Any data-intensive computations
- Multi-symbol comparisons
- Custom calculations or transformations
- Deep analysis requiring Python libraries (pandas, numpy, scipy, matplotlib, etc.)
**NEVER attempt to do analysis yourself in the chat.**
Let the research subagent write and execute the Python code.
**Examples of when to use research:**
- "Plot BTC with volume overlay" → use research
- "Calculate correlation between ETH and BTC" → use research
- "Show me RSI divergences" → use research
- "Analyze Monday price patterns" → use research
- "Does volume predict price movement?" → use research
Parameters:
- instruction: Natural language description of the analysis to perform (be specific!)
- name: A unique name for the research script (e.g., "BTC Weekly Analysis")
Example usage:
- User: "Does Friday price action correlate with Monday?"
- You: Call research tool with instruction="Analyze correlation between Friday and Monday price action during NY trading hours (9:30-4:00 ET)", name="Friday-Monday Correlation"
### symbol-lookup
Look up trading symbols and get metadata.
Use this when users mention tickers or need symbol information.
### get-chart-data
**IMPORTANT: This is for QUICK, CASUAL information ONLY. This tool just returns raw data - it does NOT create charts or plots.**
Use ONLY when the user wants to:
- Quickly glance at recent price data
- Get a rough sense of current market conditions
- Check basic OHLC values
- Retrieve raw data without any processing
**DO NOT use get-chart-data for:**
- Plotting, charting, or any visualization
- Statistical analysis or correlations
- Calculations or data transformations
- Multi-symbol comparisons
- Volume analysis or patterns
- Any non-trivial computation
- Technical indicators or overlays
**For anything beyond casual data retrieval, use the 'research' tool instead.**
The research tool can create proper analysis with charts, statistics, and computations.
**Time Parameters:** Both from_time and to_time accept:
- Unix timestamps as numbers (e.g., 1774126800)
- Unix timestamps as strings (e.g., "1774126800")
- Date strings (e.g., "2 days ago", "2024-01-01", "yesterday")
## Workspace Tools (MCP)
You also have access to workspace persistence tools via MCP:
- **workspace_read(store_name)**: Read a workspace store (returns JSON object)
- **workspace_write(store_name, data)**: Write/overwrite a workspace store
- **workspace_patch(store_name, patch)**: Apply JSON patch to a workspace store
These are useful for persisting user preferences, analysis results, and custom data across sessions.

View File

@@ -1,146 +0,0 @@
# Skills
Skills are individual capabilities that the agent can use to accomplish tasks. Each skill is a self-contained unit with:
- A markdown definition file (`*.skill.md`)
- A TypeScript implementation extending `BaseSkill`
- Clear input/output contracts
- Parameter validation
- Error handling
## Skill Structure
```
skills/
├── base-skill.ts # Base class
├── {skill-name}.skill.md # Definition
├── {skill-name}.ts # Implementation
└── README.md # This file
```
## Creating a New Skill
### 1. Create the Definition File
Create `{skill-name}.skill.md`:
```markdown
# My Skill
**Version:** 1.0.0
**Author:** Your Name
**Tags:** category1, category2
## Description
What does this skill do?
## Inputs
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| param1 | string | Yes | What it does |
## Outputs
What does it return?
## Example Usage
Show code example
```
### 2. Create the Implementation
Create `{skill-name}.ts`:
```typescript
import { BaseSkill, SkillInput, SkillResult, SkillMetadata } from './base-skill.js';
export class MySkill extends BaseSkill {
getMetadata(): SkillMetadata {
return {
name: 'my-skill',
description: 'What it does',
version: '1.0.0',
};
}
getParametersSchema(): Record<string, unknown> {
return {
type: 'object',
required: ['param1'],
properties: {
param1: { type: 'string' },
},
};
}
validateInput(parameters: Record<string, unknown>): boolean {
return typeof parameters.param1 === 'string';
}
async execute(input: SkillInput): Promise<SkillResult> {
this.logStart(input);
try {
// Your implementation here
const result = this.success({ data: 'result' });
this.logEnd(result);
return result;
} catch (error) {
return this.error(error as Error);
}
}
}
```
### 3. Register the Skill
Add to `index.ts`:
```typescript
export { MySkill } from './my-skill.js';
```
## Using Skills in Workflows
Skills can be used in LangGraph workflows:
```typescript
import { MarketAnalysisSkill } from '../skills/market-analysis.js';
const analyzeNode = async (state) => {
const skill = new MarketAnalysisSkill(logger, model);
const result = await skill.execute({
context: state.userContext,
parameters: {
ticker: state.ticker,
period: '4h',
},
});
return {
analysis: result.data,
};
};
```
## Best Practices
1. **Single Responsibility**: Each skill should do one thing well
2. **Validation**: Always validate inputs thoroughly
3. **Error Handling**: Use try/catch and return meaningful errors
4. **Logging**: Use `logStart()` and `logEnd()` helpers
5. **Documentation**: Keep the `.skill.md` file up to date
6. **Testing**: Write unit tests for skill logic
7. **Idempotency**: Skills should be safe to retry
## Available Skills
- **market-analysis**: Analyze market conditions and trends
- *(Add more as you build them)*
## Skill Categories
- **Market Data**: Query and analyze market information
- **Trading**: Execute trades, manage positions
- **Analysis**: Technical and fundamental analysis
- **Risk**: Risk assessment and management
- **Utilities**: Helper functions and utilities

View File

@@ -1,128 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
import type { UserContext } from '../memory/session-context.js';
/**
* Skill metadata
*/
export interface SkillMetadata {
name: string;
description: string;
version: string;
author?: string;
tags?: string[];
}
/**
* Skill input parameters
*/
export interface SkillInput {
context: UserContext;
parameters: Record<string, unknown>;
}
/**
* Skill execution result
*/
export interface SkillResult {
success: boolean;
data?: unknown;
error?: string;
metadata?: Record<string, unknown>;
}
/**
* Base skill interface
*
* Skills are individual capabilities that the agent can use.
* Each skill is defined by:
* - A markdown file (*.skill.md) describing purpose, inputs, outputs
* - A TypeScript implementation extending BaseSkill
*
* Skills can use:
* - LLM calls for reasoning
* - User's MCP server tools
* - Platform tools (market data, charts, etc.)
*/
export abstract class BaseSkill {
protected logger: FastifyBaseLogger;
protected model?: BaseChatModel;
constructor(logger: FastifyBaseLogger, model?: BaseChatModel) {
this.logger = logger;
this.model = model;
}
/**
* Get skill metadata
*/
abstract getMetadata(): SkillMetadata;
/**
* Validate input parameters
*/
abstract validateInput(parameters: Record<string, unknown>): boolean;
/**
* Execute the skill
*/
abstract execute(input: SkillInput): Promise<SkillResult>;
/**
* Get required parameters schema (JSON Schema format)
*/
abstract getParametersSchema(): Record<string, unknown>;
/**
* Helper: Log skill execution start
*/
protected logStart(input: SkillInput): void {
const metadata = this.getMetadata();
this.logger.info(
{
skill: metadata.name,
userId: input.context.userId,
sessionId: input.context.sessionId,
parameters: input.parameters,
},
'Starting skill execution'
);
}
/**
* Helper: Log skill execution end
*/
protected logEnd(result: SkillResult): void {
const metadata = this.getMetadata();
this.logger.info(
{
skill: metadata.name,
success: result.success,
error: result.error,
},
'Skill execution completed'
);
}
/**
* Helper: Create success result
*/
protected success(data: unknown, metadata?: Record<string, unknown>): SkillResult {
return {
success: true,
data,
metadata,
};
}
/**
* Helper: Create error result
*/
protected error(error: string | Error, metadata?: Record<string, unknown>): SkillResult {
return {
success: false,
error: error instanceof Error ? error.message : error,
metadata,
};
}
}

View File

@@ -1,10 +0,0 @@
// Skills exports
export {
BaseSkill,
type SkillMetadata,
type SkillInput,
type SkillResult,
} from './base-skill.js';
export { MarketAnalysisSkill } from './market-analysis.js';

View File

@@ -1,78 +0,0 @@
# Market Analysis Skill
**Version:** 1.0.0
**Author:** Dexorder AI Platform
**Tags:** market-data, analysis, trading
## Description
Analyzes market conditions for a given ticker and timeframe. Provides insights on:
- Price trends and patterns
- Volume analysis
- Support and resistance levels
- Market sentiment indicators
## Inputs
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `ticker` | string | Yes | Market identifier (e.g., "BINANCE:BTC/USDT") |
| `period` | string | Yes | Analysis period ("1h", "4h", "1d", "1w") |
| `startTime` | number | No | Start timestamp (microseconds), defaults to 7 days ago |
| `endTime` | number | No | End timestamp (microseconds), defaults to now |
| `indicators` | string[] | No | Additional indicators to include (e.g., ["RSI", "MACD"]) |
## Outputs
```typescript
{
success: true,
data: {
ticker: string,
period: string,
timeRange: { start: number, end: number },
trend: "bullish" | "bearish" | "neutral",
priceChange: number,
volumeProfile: {
average: number,
recent: number,
trend: "increasing" | "decreasing" | "stable"
},
supportLevels: number[],
resistanceLevels: number[],
indicators: Record<string, unknown>,
analysis: string // LLM-generated natural language analysis
}
}
```
## Example Usage
```typescript
const skill = new MarketAnalysisSkill(logger, model);
const result = await skill.execute({
context: userContext,
parameters: {
ticker: "BINANCE:BTC/USDT",
period: "4h",
indicators: ["RSI", "MACD"]
}
});
console.log(result.data.analysis);
// "Bitcoin is showing bullish momentum with RSI at 65 and MACD crossing above signal line..."
```
## Implementation Notes
- Queries OHLC data from Iceberg warehouse
- Uses LLM for natural language analysis
- Caches results for 5 minutes to reduce computation
- Falls back to reduced analysis if Iceberg unavailable
## Dependencies
- Iceberg client (market data)
- LLM model (analysis generation)
- User's MCP server (optional custom indicators)

View File

@@ -1,198 +0,0 @@
import { BaseSkill, type SkillInput, type SkillResult, type SkillMetadata } from './base-skill.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
/**
* Market analysis skill implementation
*
* See market-analysis.skill.md for full documentation
*/
export class MarketAnalysisSkill extends BaseSkill {
constructor(logger: FastifyBaseLogger, model?: BaseChatModel) {
super(logger, model);
}
getMetadata(): SkillMetadata {
return {
name: 'market-analysis',
description: 'Analyze market conditions for a given ticker and timeframe',
version: '1.0.0',
author: 'Dexorder AI Platform',
tags: ['market-data', 'analysis', 'trading'],
};
}
getParametersSchema(): Record<string, unknown> {
return {
type: 'object',
required: ['ticker', 'period'],
properties: {
ticker: {
type: 'string',
description: 'Market identifier (e.g., "BINANCE:BTC/USDT")',
},
period: {
type: 'string',
enum: ['1h', '4h', '1d', '1w'],
description: 'Analysis period',
},
startTime: {
type: 'number',
description: 'Start timestamp in microseconds',
},
endTime: {
type: 'number',
description: 'End timestamp in microseconds',
},
indicators: {
type: 'array',
items: { type: 'string' },
description: 'Additional indicators to include',
},
},
};
}
validateInput(parameters: Record<string, unknown>): boolean {
if (!parameters.ticker || typeof parameters.ticker !== 'string') {
return false;
}
if (!parameters.period || typeof parameters.period !== 'string') {
return false;
}
return true;
}
async execute(input: SkillInput): Promise<SkillResult> {
this.logStart(input);
if (!this.validateInput(input.parameters)) {
return this.error('Invalid parameters: ticker and period are required');
}
try {
const ticker = input.parameters.ticker as string;
const period = input.parameters.period as string;
const indicators = (input.parameters.indicators as string[]) || [];
// 1. Fetch OHLC data from Iceberg
// TODO: Implement Iceberg query
// const ohlcData = await this.fetchOHLCData(ticker, period, startTime, endTime);
const ohlcData = this.getMockOHLCData(); // Placeholder
// 2. Calculate technical indicators
const analysis = this.calculateAnalysis(ohlcData, indicators);
// 3. Generate natural language analysis using LLM
let narrativeAnalysis = '';
if (this.model) {
narrativeAnalysis = await this.generateNarrativeAnalysis(
ticker,
period,
analysis
);
}
const result = this.success({
ticker,
period,
timeRange: {
start: ohlcData.startTime,
end: ohlcData.endTime,
},
trend: analysis.trend,
priceChange: analysis.priceChange,
volumeProfile: analysis.volumeProfile,
supportLevels: analysis.supportLevels,
resistanceLevels: analysis.resistanceLevels,
indicators: analysis.indicators,
analysis: narrativeAnalysis,
});
this.logEnd(result);
return result;
} catch (error) {
const result = this.error(error as Error);
this.logEnd(result);
return result;
}
}
/**
* Calculate technical analysis from OHLC data
*/
private calculateAnalysis(
ohlcData: any,
_requestedIndicators: string[]
): any {
// TODO: Implement proper technical analysis
// This is a simplified placeholder
const priceChange = ((ohlcData.close - ohlcData.open) / ohlcData.open) * 100;
const trend = priceChange > 1 ? 'bullish' : priceChange < -1 ? 'bearish' : 'neutral';
return {
trend,
priceChange,
volumeProfile: {
average: ohlcData.avgVolume,
recent: ohlcData.currentVolume,
trend: ohlcData.currentVolume > ohlcData.avgVolume ? 'increasing' : 'decreasing',
},
supportLevels: [ohlcData.low * 0.98, ohlcData.low * 0.95],
resistanceLevels: [ohlcData.high * 1.02, ohlcData.high * 1.05],
indicators: {},
};
}
/**
* Generate natural language analysis using LLM
*/
private async generateNarrativeAnalysis(
ticker: string,
period: string,
analysis: any
): Promise<string> {
if (!this.model) {
return 'LLM not available for narrative analysis';
}
const systemPrompt = `You are a professional market analyst.
Provide concise, actionable market analysis based on technical data.
Focus on key insights and avoid jargon.`;
const userPrompt = `Analyze the following market data for ${ticker} (${period}):
Trend: ${analysis.trend}
Price Change: ${analysis.priceChange.toFixed(2)}%
Volume: ${analysis.volumeProfile.trend}
Support Levels: ${analysis.supportLevels.join(', ')}
Resistance Levels: ${analysis.resistanceLevels.join(', ')}
Provide a 2-3 sentence analysis suitable for a trading decision.`;
const response = await this.model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
return response.content as string;
}
/**
* Mock OHLC data (placeholder until Iceberg integration)
*/
private getMockOHLCData(): any {
return {
startTime: Date.now() - 7 * 24 * 60 * 60 * 1000,
endTime: Date.now(),
open: 50000,
high: 52000,
low: 49000,
close: 51500,
avgVolume: 1000000,
currentVolume: 1200000,
};
}
}

View File

@@ -3,6 +3,8 @@ import type { BaseMessage } from '@langchain/core/messages';
import { SystemMessage, HumanMessage } from '@langchain/core/messages'; import { SystemMessage, HumanMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
import type { UserContext } from '../memory/session-context.js'; import type { UserContext } from '../memory/session-context.js';
import type { MCPClientConnector } from '../mcp-client.js';
import type { DynamicStructuredTool } from '@langchain/core/tools';
import { readFile } from 'fs/promises'; import { readFile } from 'fs/promises';
import { join } from 'path'; import { join } from 'path';
@@ -17,6 +19,10 @@ export interface SubagentConfig {
memoryFiles: string[]; // Memory files to load from memory/ directory memoryFiles: string[]; // Memory files to load from memory/ directory
capabilities: string[]; capabilities: string[];
systemPromptFile?: string; // Path to system-prompt.md systemPromptFile?: string; // Path to system-prompt.md
tools?: {
platform?: string[]; // Platform tool names
mcp?: string[]; // MCP tool patterns/names
};
} }
/** /**
@@ -52,15 +58,21 @@ export abstract class BaseSubagent {
protected config: SubagentConfig; protected config: SubagentConfig;
protected systemPrompt?: string; protected systemPrompt?: string;
protected memoryContext: string[] = []; protected memoryContext: string[] = [];
protected mcpClient?: MCPClientConnector;
protected tools: DynamicStructuredTool[] = [];
constructor( constructor(
config: SubagentConfig, config: SubagentConfig,
model: BaseChatModel, model: BaseChatModel,
logger: FastifyBaseLogger logger: FastifyBaseLogger,
mcpClient?: MCPClientConnector,
tools?: DynamicStructuredTool[]
) { ) {
this.config = config; this.config = config;
this.model = model; this.model = model;
this.logger = logger; this.logger = logger;
this.mcpClient = mcpClient;
this.tools = tools || [];
} }
/** /**
@@ -176,4 +188,56 @@ export abstract class BaseSubagent {
hasCapability(capability: string): boolean { hasCapability(capability: string): boolean {
return this.config.capabilities.includes(capability); return this.config.capabilities.includes(capability);
} }
/**
* Call a tool on the user's MCP server
*
* @param name Tool name
* @param args Tool arguments
* @returns Tool result
* @throws Error if MCP client not available or tool call fails
*/
protected async callMCPTool(name: string, args: Record<string, unknown>): Promise<unknown> {
if (!this.mcpClient) {
throw new Error('MCP client not available for this subagent');
}
try {
this.logger.debug({ tool: name, args }, 'Calling MCP tool from subagent');
const result = await this.mcpClient.callTool(name, args);
return result;
} catch (error) {
this.logger.error({ error, tool: name }, 'MCP tool call failed');
throw error;
}
}
/**
* Check if MCP client is available
*/
protected hasMCPClient(): boolean {
return this.mcpClient !== undefined;
}
/**
* Get tools available to this subagent
*/
getTools(): DynamicStructuredTool[] {
return this.tools;
}
/**
* Set tools for this subagent (used during initialization)
*/
setTools(tools: DynamicStructuredTool[]): void {
this.tools = tools;
this.logger.debug(
{
subagent: this.config.name,
toolCount: tools.length,
toolNames: tools.map(t => t.name),
},
'Tools set for subagent'
);
}
} }

View File

@@ -19,8 +19,8 @@ import type { FastifyBaseLogger } from 'fastify';
* - best-practices.md: Industry standards * - best-practices.md: Industry standards
*/ */
export class CodeReviewerSubagent extends BaseSubagent { export class CodeReviewerSubagent extends BaseSubagent {
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger) { constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger, mcpClient?: any, tools?: any[]) {
super(config, model, logger); super(config, model, logger, mcpClient, tools);
} }
/** /**
@@ -72,7 +72,9 @@ export class CodeReviewerSubagent extends BaseSubagent {
export async function createCodeReviewerSubagent( export async function createCodeReviewerSubagent(
model: BaseChatModel, model: BaseChatModel,
logger: FastifyBaseLogger, logger: FastifyBaseLogger,
basePath: string basePath: string,
mcpClient?: any,
tools?: any[]
): Promise<CodeReviewerSubagent> { ): Promise<CodeReviewerSubagent> {
const { readFile } = await import('fs/promises'); const { readFile } = await import('fs/promises');
const { join } = await import('path'); const { join } = await import('path');
@@ -84,7 +86,7 @@ export async function createCodeReviewerSubagent(
const config = yaml.load(configContent) as SubagentConfig; const config = yaml.load(configContent) as SubagentConfig;
// Create and initialize subagent // Create and initialize subagent
const subagent = new CodeReviewerSubagent(config, model, logger); const subagent = new CodeReviewerSubagent(config, model, logger, mcpClient, tools);
await subagent.initialize(basePath); await subagent.initialize(basePath);
return subagent; return subagent;

View File

@@ -10,3 +10,9 @@ export {
CodeReviewerSubagent, CodeReviewerSubagent,
createCodeReviewerSubagent, createCodeReviewerSubagent,
} from './code-reviewer/index.js'; } from './code-reviewer/index.js';
export {
ResearchSubagent,
createResearchSubagent,
type ResearchResult,
} from './research/index.js';

View File

@@ -0,0 +1,2 @@
# Auto-generated at build time by bin/build
api-source/

View File

@@ -0,0 +1,31 @@
# Research Subagent Configuration
name: research
description: Creates and runs Python research scripts for market analysis, charting, and statistical analysis
# Model configuration
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 8192
# Memory files to load from memory/ directory
memoryFiles:
- api-reference.md
- usage-examples.md
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- research_scripting
- data_analysis
- charting
- statistical_analysis
# Tools available to this subagent
tools:
platform: [] # No platform tools needed (works at script level)
mcp:
- category_* # All category_ tools (write, edit, read, list)
- execute_research # Script execution tool

View File

@@ -0,0 +1,209 @@
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { SystemMessage } from '@langchain/core/messages';
import { createReactAgent } from '@langchain/langgraph/prebuilt';
import type { FastifyBaseLogger } from 'fastify';
import type { MCPClientConnector } from '../../mcp-client.js';
/**
* Result from research subagent execution
*/
export interface ResearchResult {
text: string;
images: Array<{
data: string;
mimeType: string;
}>;
}
/**
* Research Subagent
*
* Specialized agent for creating and running Python research scripts.
* Uses category_* MCP tools to:
* - Create/edit research scripts with DataAPI and ChartingAPI
* - Execute scripts and capture matplotlib charts
* - Iterate on errors with autonomous coding loop
*
* The subagent has direct access to MCP tools and handles the full
* coding loop without requiring skill-level orchestration.
*
* Images from script execution are extracted and returned separately
* but are NOT loaded into the LLM context (pass-through only).
*/
export class ResearchSubagent extends BaseSubagent {
private lastImages: Array<{data: string; mimeType: string}> = [];
// Shared with the MCP tool wrappers — populated as tools run, cleared per execution
private imageCapture: Array<{data: string; mimeType: string}> = [];
constructor(
config: SubagentConfig,
model: BaseChatModel,
logger: FastifyBaseLogger,
mcpClient?: MCPClientConnector,
tools?: any[]
) {
super(config, model, logger, mcpClient, tools);
}
setImageCapture(capture: Array<{data: string; mimeType: string}>): void {
this.imageCapture = capture;
}
/**
* Execute research request using LangGraph's createReactAgent.
* This is the standard LangChain pattern for agents with tool access —
* createReactAgent handles the tool calling loop automatically.
*/
async execute(context: SubagentContext, instruction: string): Promise<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
instruction: instruction.substring(0, 200),
toolCount: this.tools.length,
toolNames: this.tools.map(t => t.name),
},
'Research subagent starting'
);
if (!this.hasMCPClient()) {
throw new Error('MCP client not available for research subagent');
}
if (this.tools.length === 0) {
this.logger.warn('Research subagent has no tools — cannot write or execute scripts');
}
// Clear previous images (in-place so tool wrappers keep the same array reference)
this.imageCapture.length = 0;
this.lastImages = [];
// Build system prompt (with memory context appended)
const initialMessages = this.buildMessages(context, instruction);
// buildMessages returns [SystemMessage, ...history, HumanMessage]
// Extract system content for createReactAgent's prompt parameter
const systemMessage = initialMessages[0];
const humanMessage = initialMessages[initialMessages.length - 1];
// createReactAgent is the standard LangChain/LangGraph pattern for tool-using agents.
// It manages the tool calling loop, message accumulation, and termination automatically.
const agent = createReactAgent({
llm: this.model,
tools: this.tools,
prompt: systemMessage as SystemMessage,
});
const result = await agent.invoke(
{ messages: [humanMessage] },
{ recursionLimit: 20 }
);
// The final message in the graph output is the agent's last AIMessage
const allMessages: any[] = result.messages ?? [];
this.logger.info(
{ messageCount: allMessages.length },
'Research subagent graph completed'
);
// Images were captured in real-time by the MCP tool wrappers into this.imageCapture
this.lastImages = [...this.imageCapture];
// Return the final AI response
const lastAI = [...allMessages].reverse().find(
(m: any) => m.constructor?.name === 'AIMessage' || m._getType?.() === 'ai'
);
const finalText = lastAI
? (typeof lastAI.content === 'string' ? lastAI.content : JSON.stringify(lastAI.content))
: 'Research completed.';
this.logger.info(
{ textLength: finalText.length, imageCount: this.lastImages.length },
'Research subagent finished'
);
return finalText;
}
/**
* Execute with full result including images
* This is the method that ResearchSkill should use
*/
async executeWithImages(context: SubagentContext, instruction: string): Promise<ResearchResult> {
const text = await this.execute(context, instruction);
return {
text,
images: this.lastImages,
};
}
/**
* Get images from last execution
*/
getLastImages(): Array<{data: string; mimeType: string}> {
return this.lastImages;
}
/**
* Stream research execution
*/
async *stream(context: SubagentContext, instruction: string): AsyncGenerator<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
},
'Streaming research request'
);
if (!this.hasMCPClient()) {
throw new Error('MCP client not available for research subagent');
}
// Clear previous images
this.lastImages = [];
const messages = this.buildMessages(context, instruction);
const stream = await this.model.stream(messages);
for await (const chunk of stream) {
if (typeof chunk.content === 'string') {
yield chunk.content;
}
}
}
}
/**
* Factory function to create and initialize ResearchSubagent
*/
export async function createResearchSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
mcpClient?: MCPClientConnector,
tools?: any[],
imageCapture?: Array<{data: string; mimeType: string}>
): Promise<ResearchSubagent> {
const { readFile } = await import('fs/promises');
const { join } = await import('path');
const yaml = await import('js-yaml');
// Load config
const configPath = join(basePath, 'config.yaml');
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as SubagentConfig;
// Create and initialize subagent
const subagent = new ResearchSubagent(config, model, logger, mcpClient, tools);
if (imageCapture !== undefined) {
subagent.setImageCapture(imageCapture);
}
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -0,0 +1,480 @@
# Dexorder Research API Reference
This file contains the complete Python API source code with full docstrings.
These files are copied verbatim from `sandbox/dexorder/api/`.
The API provides access to market data and charting capabilities for research scripts.
---
## Overview
Research scripts access the API via:
```python
from dexorder.api import get_api
api = get_api()
```
The API instance provides:
- `api.data` - DataAPI for fetching OHLC market data
- `api.charting` - ChartingAPI for creating financial charts
---
## Complete API Source Code
The following sections contain the verbatim Python source files with complete
type hints, docstrings, and examples.
### api.py
```python
"""
Main DexOrder API - provides access to market data and charting.
"""
import logging
from .charting_api import ChartingAPI
from .data_api import DataAPI
log = logging.getLogger(__name__)
class API:
"""
Main API for accessing market data and creating charts.
This is the primary interface for research scripts and trading strategies.
Access this via get_api() in research scripts.
Attributes:
data: DataAPI for fetching historical and current market data
charting: ChartingAPI for creating candlestick charts and visualizations
Example:
from dexorder.api import get_api
import asyncio
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21"
))
# Create chart
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT 1H")
"""
def __init__(self, charting: ChartingAPI, data: DataAPI):
self.charting: ChartingAPI = charting
self.data: DataAPI = data
```
### data_api.py
```python
from abc import ABC, abstractmethod
from typing import Optional, List
import pandas as pd
from dexorder.utils import TimestampInput
class DataAPI(ABC):
"""
API for accessing market data.
Provides methods to query OHLC (Open, High, Low, Close) candlestick data
for cryptocurrency markets.
"""
@abstractmethod
async def historical_ohlc(
self,
ticker: str,
period_seconds: int,
start_time: TimestampInput,
end_time: TimestampInput,
extra_columns: Optional[List[str]] = None,
) -> pd.DataFrame:
"""
Fetch historical OHLC candlestick data for a market.
Args:
ticker: Market identifier in format "EXCHANGE:SYMBOL"
Examples: "BINANCE:BTC/USDT", "COINBASE:ETH/USD"
period_seconds: Candle period in seconds
Common values:
- 60 (1 minute)
- 300 (5 minutes)
- 900 (15 minutes)
- 3600 (1 hour)
- 86400 (1 day)
- 604800 (1 week)
start_time: Start of time range. Accepts:
- Unix timestamp in seconds (int/float): 1640000000
- Date string: "2021-12-20" or "2021-12-20 12:00:00"
- datetime object: datetime(2021, 12, 20)
- pandas Timestamp: pd.Timestamp("2021-12-20")
end_time: End of time range. Same formats as start_time.
extra_columns: Optional additional columns to include beyond the standard
OHLC columns. Available options:
- "volume" - Total volume (decimal float)
- "buy_vol" - Buy-side volume (decimal float)
- "sell_vol" - Sell-side volume (decimal float)
- "open_time", "high_time", "low_time", "close_time" (timestamps)
- "open_interest" (for futures markets)
- "ticker", "period_seconds"
Returns:
DataFrame with candlestick data sorted by timestamp (ascending).
Standard columns (always included):
- timestamp: Period start time in microseconds
- open: Opening price (decimal float)
- high: Highest price (decimal float)
- low: Lowest price (decimal float)
- close: Closing price (decimal float)
Plus any columns specified in extra_columns.
All prices and volumes are automatically converted to decimal floats
using market metadata. No manual conversion is needed.
Returns empty DataFrame if no data is available.
Examples:
# Basic OHLC with Unix timestamp
df = await api.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time=1640000000,
end_time=1640086400
)
# Using date strings with volume
df = await api.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21",
extra_columns=["volume"]
)
# Using datetime objects
from datetime import datetime
df = await api.historical_ohlc(
ticker="COINBASE:ETH/USD",
period_seconds=300,
start_time=datetime(2021, 12, 20, 9, 30),
end_time=datetime(2021, 12, 20, 16, 30),
extra_columns=["volume", "buy_vol", "sell_vol"]
)
"""
pass
@abstractmethod
async def latest_ohlc(
self,
ticker: str,
period_seconds: int,
length: int = 1,
extra_columns: Optional[List[str]] = None,
) -> pd.DataFrame:
"""
Query the most recent OHLC candles for a ticker.
This method fetches the latest N completed candles without needing to
specify exact timestamps. Useful for real-time analysis and indicators.
Args:
ticker: Market identifier in format "EXCHANGE:SYMBOL"
Examples: "BINANCE:BTC/USDT", "COINBASE:ETH/USD"
period_seconds: OHLC candle period in seconds
Common values: 60 (1m), 300 (5m), 900 (15m), 3600 (1h),
86400 (1d), 604800 (1w)
length: Number of most recent candles to return (default: 1)
extra_columns: Optional list of additional column names to include.
Same column options as historical_ohlc:
- "volume", "buy_vol", "sell_vol"
- "open_time", "high_time", "low_time", "close_time"
- "open_interest", "ticker", "period_seconds"
Returns:
Pandas DataFrame with the same column structure as historical_ohlc,
containing the N most recent completed candles sorted by timestamp.
Returns empty DataFrame if no data is available.
Examples:
# Get the last candle
df = await api.latest_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600
)
# Returns: timestamp, open, high, low, close
# Get the last 50 5-minute candles with volume
df = await api.latest_ohlc(
ticker="COINBASE:ETH/USD",
period_seconds=300,
length=50,
extra_columns=["volume", "buy_vol", "sell_vol"]
)
# Get recent candles with all timing data
df = await api.latest_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=60,
length=100,
extra_columns=["open_time", "high_time", "low_time", "close_time"]
)
Note:
This method returns only completed candles. The current (incomplete)
candle is not included.
"""
pass
```
### charting_api.py
```python
import logging
from abc import abstractmethod, ABC
from typing import Optional, Tuple, List
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
class ChartingAPI(ABC):
"""
API for creating financial charts and visualizations.
Provides methods to create candlestick charts, add technical indicator panels,
and build custom visualizations. All figures are automatically captured and
returned to the client as images.
Basic workflow:
1. Create a chart with plot_ohlc() → returns Figure and Axes
2. Optionally overlay indicators on the main axes (e.g., moving averages)
3. Optionally add indicator panels below with add_indicator_panel()
4. Figures are automatically captured (no need to save manually)
"""
@abstractmethod
def plot_ohlc(
self,
df: pd.DataFrame,
title: Optional[str] = None,
volume: bool = False,
style: str = "charles",
figsize: Tuple[int, int] = (12, 8),
**kwargs
) -> Tuple[Figure, plt.Axes]:
"""
Create a candlestick chart from OHLC data.
Args:
df: DataFrame with OHLC data. Required columns: open, high, low, close.
Column names are case-insensitive.
title: Chart title (optional)
volume: If True, shows volume bars below the candlesticks (requires 'volume' column)
style: Visual style for the chart. Available styles:
"charles" (default), "binance", "blueskies", "brasil", "checkers",
"classic", "mike", "nightclouds", "sas", "starsandstripes", "yahoo"
figsize: Figure size as (width, height) in inches. Default: (12, 8)
**kwargs: Additional styling arguments
Returns:
Tuple of (Figure, Axes):
- Figure: matplotlib Figure object
- Axes: Main candlestick axes (use for overlaying indicators)
Examples:
# Basic chart
fig, ax = api.plot_ohlc(df)
# With volume and title
fig, ax = api.plot_ohlc(
df,
title="BTC/USDT 1H",
volume=True,
style="binance"
)
# Overlay moving average
fig, ax = api.plot_ohlc(df)
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue")
ax.legend()
"""
pass
@abstractmethod
def add_indicator_panel(
self,
fig: Figure,
df: pd.DataFrame,
columns: Optional[List[str]] = None,
ylabel: Optional[str] = None,
height_ratio: float = 0.3,
ylim: Optional[Tuple[float, float]] = None,
**kwargs
) -> plt.Axes:
"""
Add an indicator panel below the chart with time-aligned x-axis.
Use this to display indicators that should be shown separately from the
price chart (e.g., RSI, MACD, volume).
Args:
fig: Figure object from plot_ohlc()
df: DataFrame with indicator data (must have same index as OHLC data)
columns: Column names to plot. If None, plots all numeric columns.
ylabel: Y-axis label (e.g., "RSI", "MACD")
height_ratio: Panel height relative to main chart (default: 0.3 = 30%)
ylim: Y-axis limits as (min, max). If None, auto-scales.
**kwargs: Line styling options (color, linewidth, linestyle, alpha)
Returns:
Axes object for the new panel (use for further customization)
Examples:
# Add RSI panel with reference lines
fig, ax = api.plot_ohlc(df)
rsi_ax = api.add_indicator_panel(
fig, df,
columns=["rsi"],
ylabel="RSI",
ylim=(0, 100)
)
rsi_ax.axhline(30, color='green', linestyle='--', alpha=0.5)
rsi_ax.axhline(70, color='red', linestyle='--', alpha=0.5)
# Add MACD panel
fig, ax = api.plot_ohlc(df)
api.add_indicator_panel(
fig, df,
columns=["macd", "macd_signal"],
ylabel="MACD"
)
"""
pass
@abstractmethod
def create_figure(
self,
figsize: Tuple[int, int] = (12, 8),
style: str = "charles"
) -> Tuple[Figure, plt.Axes]:
"""
Create a styled figure for custom visualizations.
Use this when you want to create charts other than candlesticks
(e.g., histograms, scatter plots, heatmaps).
Args:
figsize: Figure size as (width, height) in inches. Default: (12, 8)
style: Style name for consistent theming. Default: "charles"
Returns:
Tuple of (Figure, Axes) ready for plotting
Examples:
# Histogram
fig, ax = api.create_figure()
ax.hist(returns, bins=50)
ax.set_title("Return Distribution")
# Heatmap
fig, ax = api.create_figure(figsize=(10, 10))
import seaborn as sns
sns.heatmap(correlation_matrix, ax=ax)
ax.set_title("Correlation Matrix")
"""
pass
```
### __init__.py
```python
"""
DexOrder API - market data and charting for research and trading.
For research scripts, import and use get_api() to access the API:
from dexorder.api import get_api
import asyncio
api = get_api()
df = asyncio.run(api.data.historical_ohlc(...))
fig, ax = api.charting.plot_ohlc(df)
"""
import logging
from typing import Optional
from dexorder.api.api import API
from dexorder.api.charting_api import ChartingAPI
from dexorder.api.data_api import DataAPI
log = logging.getLogger(__name__)
# Global API instance - managed by main.py
_global_api: Optional[API] = None
def get_api() -> API:
"""
Get the global API instance for accessing market data and charts.
Use this in research scripts to access the data and charting APIs.
Returns:
API instance with data and charting capabilities
Raises:
RuntimeError: If called before API initialization (should not happen in research scripts)
Example:
from dexorder.api import get_api
import asyncio
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21"
))
# Create chart
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT")
"""
if _global_api is None:
raise RuntimeError("API not initialized")
return _global_api
def set_api(api: API) -> None:
"""Set the global API instance. Internal use only."""
global _global_api
_global_api = api
__all__ = ['API', 'ChartingAPI', 'DataAPI', 'get_api', 'set_api']
```
---
For practical usage patterns and complete working examples, see `usage-examples.md`.

View File

@@ -0,0 +1,221 @@
# Research Script API Usage
Research scripts executed via the `execute_research` MCP tool have access to the global API instance, which provides both data fetching and charting capabilities.
## Accessing the API
```python
from dexorder.api import get_api
import asyncio
# Get the global API instance
api = get_api()
```
## Using the Data API
The data API provides access to historical OHLC (Open, High, Low, Close) market data with smart caching via Iceberg.
### Fetching Historical Data
The API accepts flexible timestamp formats for convenience:
```python
from dexorder.api import get_api
import asyncio
from datetime import datetime
api = get_api()
# Method 1: Using Unix timestamps (seconds)
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600, # 1 hour candles
start_time=1640000000, # Unix timestamp in seconds
end_time=1640086400,
extra_columns=["volume"]
))
# Method 2: Using date strings
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20", # Simple date string
end_time="2021-12-21",
extra_columns=["volume"]
))
# Method 3: Using date strings with time
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20 00:00:00",
end_time="2021-12-20 23:59:59",
extra_columns=["volume"]
))
# Method 4: Using datetime objects
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time=datetime(2021, 12, 20),
end_time=datetime(2021, 12, 21),
extra_columns=["volume"]
))
print(f"Loaded {len(df)} candles")
print(df.head())
```
### Available Extra Columns
- `"volume"` - Total volume
- `"buy_vol"` - Buy-side volume
- `"sell_vol"` - Sell-side volume
- `"open_time"`, `"high_time"`, `"low_time"`, `"close_time"` - Timestamps for each price point
- `"open_interest"` - Open interest (for futures)
- `"ticker"` - Market identifier
- `"period_seconds"` - Period in seconds
## Using the Charting API
The charting API provides styled financial charts with OHLC candlesticks and technical indicators.
### Creating a Basic Candlestick Chart
```python
from dexorder.api import get_api
import asyncio
from datetime import datetime
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21",
extra_columns=["volume"]
))
# Create candlestick chart (synchronous)
fig, ax = api.charting.plot_ohlc(
df,
title="BTC/USDT 1H",
volume=True, # Show volume bars
style="charles" # Chart style
)
# The figure is automatically captured and returned to the MCP client
```
### Adding Indicator Panels
```python
from dexorder.api import get_api
import asyncio
import pandas as pd
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21"
))
# Calculate a simple moving average
df['sma_20'] = df['close'].rolling(window=20).mean()
# Create chart
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT with SMA")
# Overlay the SMA on the price chart
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue", linewidth=2)
ax.legend()
# Add RSI indicator panel below
df['rsi'] = calculate_rsi(df['close'], 14) # Your RSI calculation
rsi_ax = api.charting.add_indicator_panel(
fig, df,
columns=["rsi"],
ylabel="RSI",
ylim=(0, 100)
)
rsi_ax.axhline(70, color='red', linestyle='--', alpha=0.5)
rsi_ax.axhline(30, color='green', linestyle='--', alpha=0.5)
```
## Complete Example
```python
from dexorder.api import get_api
import asyncio
import pandas as pd
# Get API instance
api = get_api()
# Fetch historical data using date strings (easiest for research)
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600, # 1 hour
start_time="2021-12-20",
end_time="2021-12-21",
extra_columns=["volume"]
))
# Add some analysis
df['sma_20'] = df['close'].rolling(window=20).mean()
df['sma_50'] = df['close'].rolling(window=50).mean()
# Create chart with volume
fig, ax = api.charting.plot_ohlc(
df,
title="BTC/USDT Analysis",
volume=True,
style="charles"
)
# Overlay moving averages
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue", linewidth=1.5)
ax.plot(df.index, df['sma_50'], label="SMA 50", color="red", linewidth=1.5)
ax.legend()
# Print summary statistics
print(f"Period: {len(df)} candles")
print(f"High: {df['high'].max()}")
print(f"Low: {df['low'].min()}")
print(f"Mean Volume: {df['volume'].mean():.2f}")
```
## Notes
- **Async vs Sync**: Data API methods are async and require `asyncio.run()`. Charting API methods are synchronous.
- **Figure Capture**: All matplotlib figures created during script execution are automatically captured and returned as PNG images.
- **Print Statements**: All `print()` output is captured and returned as text content.
- **Errors**: Exceptions are caught and reported in the execution results.
- **Timestamps**: The API accepts flexible timestamp formats:
- Unix timestamps in **seconds** (int or float) - e.g., `1640000000`
- Date strings - e.g., `"2021-12-20"` or `"2021-12-20 12:00:00"`
- datetime objects - e.g., `datetime(2021, 12, 20)`
- pandas Timestamp objects
- Internally, the system uses microseconds since epoch, but you don't need to worry about this conversion.
- **Price/Volume Values**: All prices and volumes are returned as decimal floats, automatically converted from internal storage format using market metadata. No manual conversion is needed.
## Available Chart Styles
- `"charles"` (default)
- `"binance"`
- `"blueskies"`
- `"brasil"`
- `"checkers"`
- `"classic"`
- `"mike"`
- `"nightclouds"`
- `"sas"`
- `"starsandstripes"`
- `"yahoo"`

View File

@@ -0,0 +1,138 @@
# Research Script Assistant
You are a specialized assistant that creates Python research scripts for market data analysis and visualization.
## Your Purpose
Create Python scripts that:
- Fetch historical market data using the Dexorder DataAPI
- Perform statistical analysis and calculations
- Generate professional charts using matplotlib via the ChartingAPI
- All matplotlib figures are automatically captured and sent to the user as images
## Available Tools
You have direct access to these MCP tools:
- **category_write**: Create a new research script
- Required: category="research", name, description, code
- Optional: metadata (with conda_packages list if needed)
- Automatically executes the script after writing
- Returns validation results and execution output (text + images)
- **category_edit**: Update an existing research script
- Required: category="research", name
- Optional: code, description, metadata
- Automatically re-executes if code is updated
- Returns validation results and execution output
- **category_read**: Read an existing research script
- Returns: code, metadata
- **category_list**: List all research scripts
- Returns: array of {name, description, metadata}
- **execute_research**: Manually run a research script
- Note: Usually not needed since write/edit auto-execute
- Returns: text output and images
## Research Script API
All research scripts have access to the Dexorder API via:
```python
from dexorder.api import get_api
import asyncio
api = get_api()
```
The API provides two main components:
- `api.data` - DataAPI for fetching OHLC market data
- `api.charting` - ChartingAPI for creating financial charts
See your knowledge base for complete API documentation and examples.
## Coding Loop Pattern
When a user requests analysis:
1. **Understand the request**: What data is needed? What analysis? What visualization?
2. **Check for existing scripts**: Use `category_list` to see if a similar script exists
- If exists and suitable: use `category_read` to review it
- Consider editing existing script vs creating new one
3. **Write the script**: Use `category_write` (or `category_edit`)
- Write clean, well-commented Python code
- Include proper error handling
- Use appropriate ticker symbols, time ranges, and periods
- The script will auto-execute after writing
4. **Check execution results**: The tool returns:
- `validation.success`: Whether script ran without errors
- `validation.output`: Any stdout/stderr text output
- `execution.content`: Array of text and image results
- Note: Images are NOT included in your context - only text output is visible to you
5. **Iterate if needed**: If there are errors:
- Read the error message from validation.output or execution text
- Use `category_edit` to fix the script
- The script will auto-execute again
6. **Return results**: Once successful, summarize what was done
- The user will receive both your text response AND the chart images
- Don't try to describe the images in detail - the user can see them
## Important Guidelines
- **Images are pass-through only**: Chart images go directly to the user. You only see text output (print statements, errors). Don't try to analyze or describe images you can't see.
- **Async data fetching**: All `api.data` methods are async. Always use `asyncio.run()`:
```python
df = asyncio.run(api.data.historical_ohlc(...))
```
- **Charting is sync**: All `api.charting` methods are synchronous:
```python
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT")
```
- **Automatic figure capture**: All matplotlib figures are automatically captured. Don't save manually.
- **Print for debugging**: Use `print()` statements for debugging - you'll see this output.
- **Package management**: If script needs packages beyond base environment (pandas, numpy, matplotlib):
- Add `conda_packages: ["package-name"]` to metadata
- Packages are auto-installed during validation
- **Script naming**: Choose descriptive, unique names. Examples:
- "BTC Weekly Analysis"
- "ETH Volume Profile"
- "Market Correlation Heatmap"
- **Error handling**: Wrap data fetching in try/except to provide helpful error messages
## Example Workflow
User: "Show me BTC price action for the last 7 days with volume"
You:
1. Call `category_write` with:
- name: "BTC 7-Day Price Action"
- description: "BTC/USDT price and volume analysis for the last 7 days"
- code: (Python script that fetches data and creates chart)
2. Check execution results
3. If successful, respond: "I've created a 7-day BTC price chart with volume analysis. The chart shows [brief summary of what the script does]."
4. User receives: Your text response + the actual chart image
## Response Format
When reporting results:
- Be concise and factual
- Mention what data was fetched and what analysis was performed
- Don't try to interpret the charts (user can see them)
- If errors occurred and you fixed them, briefly mention the resolution
- Always confirm the script name for future reference
Remember: You're creating tools for the user, not just answering questions. Each research script becomes a reusable analysis tool.

View File

@@ -4,6 +4,7 @@ import * as yaml from 'js-yaml';
import * as fs from 'fs/promises'; import * as fs from 'fs/promises';
import * as path from 'path'; import * as path from 'path';
import { fileURLToPath } from 'url'; import { fileURLToPath } from 'url';
import type { K8sResources } from '../types/user.js';
const __filename = fileURLToPath(import.meta.url); const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename); const __dirname = path.dirname(__filename);
@@ -18,14 +19,15 @@ export interface K8sClientConfig {
export interface DeploymentSpec { export interface DeploymentSpec {
userId: string; userId: string;
licenseType: 'free' | 'pro' | 'enterprise'; licenseType: 'free' | 'pro' | 'enterprise';
agentImage: string; k8sResources: K8sResources;
sandboxImage: string;
sidecarImage: string; sidecarImage: string;
storageClass: string; storageClass: string;
imagePullPolicy?: string; imagePullPolicy?: string;
} }
/** /**
* Kubernetes client wrapper for managing agent deployments * Kubernetes client wrapper for managing sandbox deployments
*/ */
export class KubernetesClient { export class KubernetesClient {
private config: K8sClientConfig; private config: K8sClientConfig;
@@ -59,7 +61,7 @@ export class KubernetesClient {
static getDeploymentName(userId: string): string { static getDeploymentName(userId: string): string {
// Sanitize userId to be k8s-compliant (lowercase alphanumeric + hyphens) // Sanitize userId to be k8s-compliant (lowercase alphanumeric + hyphens)
const sanitized = userId.toLowerCase().replace(/[^a-z0-9-]/g, '-'); const sanitized = userId.toLowerCase().replace(/[^a-z0-9-]/g, '-');
return `agent-${sanitized}`; return `sandbox-${sanitized}`;
} }
/** /**
@@ -104,7 +106,7 @@ export class KubernetesClient {
} }
/** /**
* Create agent deployment from template * Create sandbox deployment from template
*/ */
async createAgentDeployment(spec: DeploymentSpec): Promise<void> { async createAgentDeployment(spec: DeploymentSpec): Promise<void> {
const deploymentName = KubernetesClient.getDeploymentName(spec.userId); const deploymentName = KubernetesClient.getDeploymentName(spec.userId);
@@ -113,28 +115,31 @@ export class KubernetesClient {
this.config.logger.info( this.config.logger.info(
{ userId: spec.userId, licenseType: spec.licenseType, deploymentName }, { userId: spec.userId, licenseType: spec.licenseType, deploymentName },
'Creating agent deployment' 'Creating sandbox deployment'
);
// Load template based on license type
const templatePath = path.join(
__dirname,
'templates',
`${spec.licenseType}-tier.yaml`
); );
const templatePath = path.join(__dirname, 'templates', 'sandbox.yaml');
const templateContent = await fs.readFile(templatePath, 'utf-8'); const templateContent = await fs.readFile(templatePath, 'utf-8');
// Substitute variables const r = spec.k8sResources;
const rendered = templateContent const rendered = templateContent
.replace(/\{\{userId\}\}/g, spec.userId) .replace(/\{\{userId\}\}/g, spec.userId)
.replace(/\{\{deploymentName\}\}/g, deploymentName) .replace(/\{\{deploymentName\}\}/g, deploymentName)
.replace(/\{\{serviceName\}\}/g, serviceName) .replace(/\{\{serviceName\}\}/g, serviceName)
.replace(/\{\{pvcName\}\}/g, pvcName) .replace(/\{\{pvcName\}\}/g, pvcName)
.replace(/\{\{agentImage\}\}/g, spec.agentImage) .replace(/\{\{sandboxImage\}\}/g, spec.sandboxImage)
.replace(/\{\{sidecarImage\}\}/g, spec.sidecarImage) .replace(/\{\{sidecarImage\}\}/g, spec.sidecarImage)
.replace(/\{\{storageClass\}\}/g, spec.storageClass) .replace(/\{\{storageClass\}\}/g, spec.storageClass)
.replace(/\{\{imagePullPolicy\}\}/g, spec.imagePullPolicy || 'Always'); .replace(/\{\{imagePullPolicy\}\}/g, spec.imagePullPolicy || 'Always')
.replace(/\{\{licenseType\}\}/g, spec.licenseType)
.replace(/\{\{memoryRequest\}\}/g, r.memoryRequest)
.replace(/\{\{memoryLimit\}\}/g, r.memoryLimit)
.replace(/\{\{cpuRequest\}\}/g, r.cpuRequest)
.replace(/\{\{cpuLimit\}\}/g, r.cpuLimit)
.replace(/\{\{storage\}\}/g, r.storage)
.replace(/\{\{tmpSizeLimit\}\}/g, r.tmpSizeLimit)
.replace(/\{\{enableIdleShutdown\}\}/g, String(r.enableIdleShutdown))
.replace(/\{\{idleTimeoutMinutes\}\}/g, String(r.idleTimeoutMinutes));
// Parse YAML documents (deployment, pvc, service) // Parse YAML documents (deployment, pvc, service)
const documents = yaml.loadAll(rendered) as any[]; const documents = yaml.loadAll(rendered) as any[];
@@ -186,7 +191,7 @@ export class KubernetesClient {
} }
} }
this.config.logger.info({ deploymentName }, 'Agent deployment created successfully'); this.config.logger.info({ deploymentName }, 'Sandbox deployment created successfully');
} }
/** /**
@@ -302,7 +307,7 @@ export class KubernetesClient {
const serviceName = KubernetesClient.getServiceName(userId); const serviceName = KubernetesClient.getServiceName(userId);
const pvcName = KubernetesClient.getPvcName(userId); const pvcName = KubernetesClient.getPvcName(userId);
this.config.logger.info({ userId, deploymentName }, 'Deleting agent deployment'); this.config.logger.info({ userId, deploymentName }, 'Deleting sandbox deployment');
// Delete deployment // Delete deployment
try { try {

View File

@@ -1,10 +1,10 @@
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
import { KubernetesClient, type DeploymentSpec } from './client.js'; import { KubernetesClient, type DeploymentSpec } from './client.js';
import type { UserLicense } from '../types/user.js'; import type { License } from '../types/user.js';
export interface ContainerManagerConfig { export interface ContainerManagerConfig {
k8sClient: KubernetesClient; k8sClient: KubernetesClient;
agentImage: string; sandboxImage: string;
sidecarImage: string; sidecarImage: string;
storageClass: string; storageClass: string;
imagePullPolicy?: string; imagePullPolicy?: string;
@@ -25,7 +25,7 @@ export interface EnsureContainerResult {
} }
/** /**
* Container manager orchestrates agent container lifecycle * Container manager orchestrates sandbox container lifecycle
*/ */
export class ContainerManager { export class ContainerManager {
private config: ContainerManagerConfig; private config: ContainerManagerConfig;
@@ -41,7 +41,7 @@ export class ContainerManager {
*/ */
async ensureContainerRunning( async ensureContainerRunning(
userId: string, userId: string,
license: UserLicense, license: License,
waitForReady: boolean = true waitForReady: boolean = true
): Promise<EnsureContainerResult> { ): Promise<EnsureContainerResult> {
const deploymentName = KubernetesClient.getDeploymentName(userId); const deploymentName = KubernetesClient.getDeploymentName(userId);
@@ -80,7 +80,8 @@ export class ContainerManager {
const spec: DeploymentSpec = { const spec: DeploymentSpec = {
userId, userId,
licenseType: license.licenseType, licenseType: license.licenseType,
agentImage: this.config.agentImage, k8sResources: license.k8sResources,
sandboxImage: this.config.sandboxImage,
sidecarImage: this.config.sidecarImage, sidecarImage: this.config.sidecarImage,
storageClass: this.config.storageClass, storageClass: this.config.storageClass,
imagePullPolicy: this.config.imagePullPolicy, imagePullPolicy: this.config.imagePullPolicy,

View File

@@ -1,206 +0,0 @@
# Free tier agent deployment template
# Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{deploymentName}}
namespace: dexorder-agents
labels:
app.kubernetes.io/name: agent
app.kubernetes.io/component: user-agent
dexorder.io/component: agent
dexorder.io/user-id: {{userId}}
dexorder.io/deployment: {{deploymentName}}
dexorder.io/license-tier: free
spec:
replicas: 1
selector:
matchLabels:
dexorder.io/user-id: {{userId}}
template:
metadata:
labels:
dexorder.io/component: agent
dexorder.io/user-id: {{userId}}
dexorder.io/deployment: {{deploymentName}}
dexorder.io/license-tier: free
spec:
serviceAccountName: agent-lifecycle
shareProcessNamespace: true
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
containers:
- name: agent
image: {{agentImage}}
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 1000
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
env:
- name: USER_ID
value: {{userId}}
- name: IDLE_TIMEOUT_MINUTES
value: "15"
- name: IDLE_CHECK_INTERVAL_SECONDS
value: "60"
- name: ENABLE_IDLE_SHUTDOWN
value: "true"
- name: MCP_SERVER_PORT
value: "3000"
- name: ZMQ_CONTROL_PORT
value: "5555"
- name: ZMQ_GATEWAY_ENDPOINT
value: "tcp://gateway.default.svc.cluster.local:5571"
ports:
- name: mcp
containerPort: 3000
protocol: TCP
- name: zmq-control
containerPort: 5555
protocol: TCP
volumeMounts:
- name: agent-data
mountPath: /app/data
- name: agent-config
mountPath: /app/config
readOnly: true
- name: tmp
mountPath: /tmp
- name: shared-run
mountPath: /var/run/agent
livenessProbe:
httpGet:
path: /health
port: mcp
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /health
port: mcp
initialDelaySeconds: 5
periodSeconds: 10
- name: lifecycle-sidecar
image: {{sidecarImage}}
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 1000
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
resources:
requests:
memory: "32Mi"
cpu: "10m"
limits:
memory: "64Mi"
cpu: "50m"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: DEPLOYMENT_NAME
valueFrom:
fieldRef:
fieldPath: metadata.labels['dexorder.io/deployment']
- name: USER_TYPE
value: "free"
- name: MAIN_CONTAINER_PID
value: "1"
volumeMounts:
- name: shared-run
mountPath: /var/run/agent
readOnly: true
volumes:
- name: agent-data
persistentVolumeClaim:
claimName: {{pvcName}}
- name: agent-config
configMap:
name: agent-config
- name: tmp
emptyDir:
medium: Memory
sizeLimit: 128Mi
- name: shared-run
emptyDir:
medium: Memory
sizeLimit: 1Mi
restartPolicy: Always
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{pvcName}}
namespace: dexorder-agents
labels:
dexorder.io/user-id: {{userId}}
dexorder.io/license-tier: free
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: {{storageClass}}
---
apiVersion: v1
kind: Service
metadata:
name: {{serviceName}}
namespace: dexorder-agents
labels:
dexorder.io/user-id: {{userId}}
dexorder.io/license-tier: free
spec:
type: ClusterIP
selector:
dexorder.io/user-id: {{userId}}
ports:
- name: mcp
port: 3000
targetPort: mcp
protocol: TCP
- name: zmq-control
port: 5555
targetPort: zmq-control
protocol: TCP

View File

@@ -1,206 +0,0 @@
# Pro tier agent deployment template
# Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{deploymentName}}
namespace: dexorder-agents
labels:
app.kubernetes.io/name: agent
app.kubernetes.io/component: user-agent
dexorder.io/component: agent
dexorder.io/user-id: {{userId}}
dexorder.io/deployment: {{deploymentName}}
dexorder.io/license-tier: pro
spec:
replicas: 1
selector:
matchLabels:
dexorder.io/user-id: {{userId}}
template:
metadata:
labels:
dexorder.io/component: agent
dexorder.io/user-id: {{userId}}
dexorder.io/deployment: {{deploymentName}}
dexorder.io/license-tier: pro
spec:
serviceAccountName: agent-lifecycle
shareProcessNamespace: true
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
containers:
- name: agent
image: {{agentImage}}
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 1000
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "2Gi"
cpu: "2000m"
env:
- name: USER_ID
value: {{userId}}
- name: IDLE_TIMEOUT_MINUTES
value: "60"
- name: IDLE_CHECK_INTERVAL_SECONDS
value: "60"
- name: ENABLE_IDLE_SHUTDOWN
value: "true"
- name: MCP_SERVER_PORT
value: "3000"
- name: ZMQ_CONTROL_PORT
value: "5555"
- name: ZMQ_GATEWAY_ENDPOINT
value: "tcp://gateway.default.svc.cluster.local:5571"
ports:
- name: mcp
containerPort: 3000
protocol: TCP
- name: zmq-control
containerPort: 5555
protocol: TCP
volumeMounts:
- name: agent-data
mountPath: /app/data
- name: agent-config
mountPath: /app/config
readOnly: true
- name: tmp
mountPath: /tmp
- name: shared-run
mountPath: /var/run/agent
livenessProbe:
httpGet:
path: /health
port: mcp
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /health
port: mcp
initialDelaySeconds: 5
periodSeconds: 10
- name: lifecycle-sidecar
image: {{sidecarImage}}
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 1000
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
resources:
requests:
memory: "32Mi"
cpu: "10m"
limits:
memory: "64Mi"
cpu: "50m"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: DEPLOYMENT_NAME
valueFrom:
fieldRef:
fieldPath: metadata.labels['dexorder.io/deployment']
- name: USER_TYPE
value: "pro"
- name: MAIN_CONTAINER_PID
value: "1"
volumeMounts:
- name: shared-run
mountPath: /var/run/agent
readOnly: true
volumes:
- name: agent-data
persistentVolumeClaim:
claimName: {{pvcName}}
- name: agent-config
configMap:
name: agent-config
- name: tmp
emptyDir:
medium: Memory
sizeLimit: 256Mi
- name: shared-run
emptyDir:
medium: Memory
sizeLimit: 1Mi
restartPolicy: Always
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{pvcName}}
namespace: dexorder-agents
labels:
dexorder.io/user-id: {{userId}}
dexorder.io/license-tier: pro
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: {{storageClass}}
---
apiVersion: v1
kind: Service
metadata:
name: {{serviceName}}
namespace: dexorder-agents
labels:
dexorder.io/user-id: {{userId}}
dexorder.io/license-tier: pro
spec:
type: ClusterIP
selector:
dexorder.io/user-id: {{userId}}
ports:
- name: mcp
port: 3000
targetPort: mcp
protocol: TCP
- name: zmq-control
port: 5555
targetPort: zmq-control
protocol: TCP

View File

@@ -1,19 +1,23 @@
# Enterprise tier agent deployment template # Sandbox deployment template — variables are populated from the user's License k8sResources.
# Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}} # Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}},
# Enterprise: No idle shutdown, larger resources # {{sandboxImage}}, {{sidecarImage}}, {{imagePullPolicy}}, {{storageClass}},
# {{licenseType}},
# {{memoryRequest}}, {{memoryLimit}}, {{cpuRequest}}, {{cpuLimit}},
# {{storage}}, {{tmpSizeLimit}},
# {{enableIdleShutdown}}, {{idleTimeoutMinutes}}
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: {{deploymentName}} name: {{deploymentName}}
namespace: dexorder-agents namespace: dexorder-sandboxes
labels: labels:
app.kubernetes.io/name: agent app.kubernetes.io/name: sandbox
app.kubernetes.io/component: user-agent app.kubernetes.io/component: user-sandbox
dexorder.io/component: agent dexorder.io/component: sandbox
dexorder.io/user-id: {{userId}} dexorder.io/user-id: {{userId}}
dexorder.io/deployment: {{deploymentName}} dexorder.io/deployment: {{deploymentName}}
dexorder.io/license-tier: enterprise dexorder.io/license-tier: {{licenseType}}
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@@ -22,26 +26,26 @@ spec:
template: template:
metadata: metadata:
labels: labels:
dexorder.io/component: agent dexorder.io/component: sandbox
dexorder.io/user-id: {{userId}} dexorder.io/user-id: {{userId}}
dexorder.io/deployment: {{deploymentName}} dexorder.io/deployment: {{deploymentName}}
dexorder.io/license-tier: enterprise dexorder.io/license-tier: {{licenseType}}
spec: spec:
serviceAccountName: agent-lifecycle serviceAccountName: sandbox-lifecycle
shareProcessNamespace: true shareProcessNamespace: true
securityContext: securityContext:
runAsNonRoot: true runAsNonRoot: true
runAsUser: 1000 runAsUser: 1000
fsGroup: 1000 fsGroup: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
containers: containers:
- name: agent - name: sandbox
image: {{agentImage}} image: {{sandboxImage}}
imagePullPolicy: {{imagePullPolicy}} imagePullPolicy: {{imagePullPolicy}}
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
runAsNonRoot: true runAsNonRoot: true
@@ -50,31 +54,39 @@ spec:
capabilities: capabilities:
drop: drop:
- ALL - ALL
resources: resources:
requests: requests:
memory: "1Gi" memory: "{{memoryRequest}}"
cpu: "500m" cpu: "{{cpuRequest}}"
limits: limits:
memory: "4Gi" memory: "{{memoryLimit}}"
cpu: "4000m" cpu: "{{cpuLimit}}"
env: env:
- name: USER_ID - name: USER_ID
value: {{userId}} value: {{userId}}
- name: IDLE_TIMEOUT_MINUTES - name: IDLE_TIMEOUT_MINUTES
value: "0" value: "{{idleTimeoutMinutes}}"
- name: IDLE_CHECK_INTERVAL_SECONDS - name: IDLE_CHECK_INTERVAL_SECONDS
value: "60" value: "60"
- name: ENABLE_IDLE_SHUTDOWN - name: ENABLE_IDLE_SHUTDOWN
value: "false" value: "{{enableIdleShutdown}}"
- name: MCP_SERVER_PORT - name: MCP_SERVER_PORT
value: "3000" value: "3000"
- name: ZMQ_CONTROL_PORT - name: ZMQ_CONTROL_PORT
value: "5555" value: "5555"
- name: ZMQ_GATEWAY_ENDPOINT - name: ZMQ_GATEWAY_ENDPOINT
value: "tcp://gateway.default.svc.cluster.local:5571" value: "tcp://gateway.default.svc.cluster.local:5571"
- name: ICEBERG_CATALOG_URI
value: "http://iceberg-catalog.default.svc.cluster.local:8181"
- name: ICEBERG_NAMESPACE
value: "trading"
- name: S3_ENDPOINT
value: "http://minio.default.svc.cluster.local:9000"
- name: RELAY_ENDPOINT
value: "tcp://relay.default.svc.cluster.local:5559"
ports: ports:
- name: mcp - name: mcp
containerPort: 3000 containerPort: 3000
@@ -82,17 +94,17 @@ spec:
- name: zmq-control - name: zmq-control
containerPort: 5555 containerPort: 5555
protocol: TCP protocol: TCP
volumeMounts: volumeMounts:
- name: agent-data - name: sandbox-data
mountPath: /app/data mountPath: /app/data
- name: agent-config - name: sandbox-config
mountPath: /app/config mountPath: /app/config
readOnly: true readOnly: true
- name: tmp - name: tmp
mountPath: /tmp mountPath: /tmp
- name: shared-run - name: shared-run
mountPath: /var/run/agent mountPath: /var/run/sandbox
livenessProbe: livenessProbe:
httpGet: httpGet:
@@ -112,7 +124,7 @@ spec:
- name: lifecycle-sidecar - name: lifecycle-sidecar
image: {{sidecarImage}} image: {{sidecarImage}}
imagePullPolicy: {{imagePullPolicy}} imagePullPolicy: {{imagePullPolicy}}
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
runAsNonRoot: true runAsNonRoot: true
@@ -121,7 +133,7 @@ spec:
capabilities: capabilities:
drop: drop:
- ALL - ALL
resources: resources:
requests: requests:
memory: "32Mi" memory: "32Mi"
@@ -129,7 +141,7 @@ spec:
limits: limits:
memory: "64Mi" memory: "64Mi"
cpu: "50m" cpu: "50m"
env: env:
- name: NAMESPACE - name: NAMESPACE
valueFrom: valueFrom:
@@ -140,26 +152,30 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.labels['dexorder.io/deployment'] fieldPath: metadata.labels['dexorder.io/deployment']
- name: USER_TYPE - name: USER_TYPE
value: "enterprise" value: "{{licenseType}}"
- name: MAIN_CONTAINER_PID - name: MAIN_CONTAINER_PID
value: "1" value: "1"
volumeMounts: volumeMounts:
- name: shared-run - name: shared-run
mountPath: /var/run/agent mountPath: /var/run/sandbox
readOnly: true readOnly: true
volumes: volumes:
- name: agent-data - name: sandbox-data
persistentVolumeClaim: persistentVolumeClaim:
claimName: {{pvcName}} claimName: {{pvcName}}
- name: agent-config - name: sandbox-config
configMap: projected:
name: agent-config sources:
- configMap:
name: sandbox-config
- secret:
name: sandbox-secrets
- name: tmp - name: tmp
emptyDir: emptyDir:
medium: Memory medium: Memory
sizeLimit: 512Mi sizeLimit: {{tmpSizeLimit}}
- name: shared-run - name: shared-run
emptyDir: emptyDir:
medium: Memory medium: Memory
@@ -172,26 +188,26 @@ apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: {{pvcName}} name: {{pvcName}}
namespace: dexorder-agents namespace: dexorder-sandboxes
labels: labels:
dexorder.io/user-id: {{userId}} dexorder.io/user-id: {{userId}}
dexorder.io/license-tier: enterprise dexorder.io/license-tier: {{licenseType}}
spec: spec:
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 50Gi storage: {{storage}}
storageClassName: {{storageClass}} storageClassName: {{storageClass}}
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: {{serviceName}} name: {{serviceName}}
namespace: dexorder-agents namespace: dexorder-sandboxes
labels: labels:
dexorder.io/user-id: {{userId}} dexorder.io/user-id: {{userId}}
dexorder.io/license-tier: enterprise dexorder.io/license-tier: {{licenseType}}
spec: spec:
type: ClusterIP type: ClusterIP
selector: selector:

View File

@@ -1,7 +1,7 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
import { LLMProviderFactory, type ModelConfig, LLMProvider, type LicenseModelsConfig } from './provider.js'; import { LLMProviderFactory, type ModelConfig, LLMProvider, type LicenseModelsConfig } from './provider.js';
import type { UserLicense } from '../types/user.js'; import type { License } from '../types/user.js';
/** /**
* Model routing strategies * Model routing strategies
@@ -39,8 +39,9 @@ export class ModelRouter {
*/ */
async route( async route(
message: string, message: string,
license: UserLicense, license: License,
strategy: RoutingStrategy = RoutingStrategy.USER_PREFERENCE strategy: RoutingStrategy = RoutingStrategy.USER_PREFERENCE,
userId?: string
): Promise<BaseChatModel> { ): Promise<BaseChatModel> {
let modelConfig: ModelConfig; let modelConfig: ModelConfig;
@@ -67,7 +68,7 @@ export class ModelRouter {
this.logger.info( this.logger.info(
{ {
userId: license.userId, userId,
strategy, strategy,
provider: modelConfig.provider, provider: modelConfig.provider,
model: modelConfig.model, model: modelConfig.model,
@@ -81,9 +82,9 @@ export class ModelRouter {
/** /**
* Route based on user's preferred model (if set in license) * Route based on user's preferred model (if set in license)
*/ */
private routeByUserPreference(license: UserLicense): ModelConfig { private routeByUserPreference(license: License): ModelConfig {
// Check if user has custom model preference // Check if user has custom model preference
const preferredModel = (license as any).preferredModel as ModelConfig | undefined; const preferredModel = license.preferredModel as ModelConfig | undefined;
if (preferredModel && this.isModelAllowed(preferredModel, license)) { if (preferredModel && this.isModelAllowed(preferredModel, license)) {
return preferredModel; return preferredModel;
@@ -96,7 +97,7 @@ export class ModelRouter {
/** /**
* Route based on query complexity * Route based on query complexity
*/ */
private routeByComplexity(message: string, license: UserLicense): ModelConfig { private routeByComplexity(message: string, license: License): ModelConfig {
const isComplex = this.isComplexQuery(message); const isComplex = this.isComplexQuery(message);
// Use configuration if available // Use configuration if available
@@ -127,7 +128,7 @@ export class ModelRouter {
/** /**
* Route based on license tier * Route based on license tier
*/ */
private routeByLicenseTier(license: UserLicense): ModelConfig { private routeByLicenseTier(license: License): ModelConfig {
// Use configuration if available // Use configuration if available
if (this.licenseModels) { if (this.licenseModels) {
const tierConfig = this.licenseModels[license.licenseType]; const tierConfig = this.licenseModels[license.licenseType];
@@ -155,7 +156,7 @@ export class ModelRouter {
/** /**
* Route to cheapest available model * Route to cheapest available model
*/ */
private routeByCost(license: UserLicense): ModelConfig { private routeByCost(license: License): ModelConfig {
// Use configuration if available // Use configuration if available
if (this.licenseModels) { if (this.licenseModels) {
const tierConfig = this.licenseModels[license.licenseType]; const tierConfig = this.licenseModels[license.licenseType];
@@ -171,7 +172,7 @@ export class ModelRouter {
/** /**
* Check if model is allowed for user's license * Check if model is allowed for user's license
*/ */
private isModelAllowed(model: ModelConfig, license: UserLicense): boolean { private isModelAllowed(model: ModelConfig, license: License): boolean {
// Use configuration if available // Use configuration if available
if (this.licenseModels) { if (this.licenseModels) {
const tierConfig = this.licenseModels[license.licenseType]; const tierConfig = this.licenseModels[license.licenseType];

View File

@@ -15,6 +15,8 @@ import { KubernetesClient } from './k8s/client.js';
import { ContainerManager } from './k8s/container-manager.js'; import { ContainerManager } from './k8s/container-manager.js';
import { ZMQRelayClient } from './clients/zmq-relay-client.js'; import { ZMQRelayClient } from './clients/zmq-relay-client.js';
import { IcebergClient } from './clients/iceberg-client.js'; import { IcebergClient } from './clients/iceberg-client.js';
import { ConversationStore } from './harness/memory/conversation-store.js';
import { AgentHarness, type HarnessSessionConfig } from './harness/agent-harness.js';
import { OHLCService } from './services/ohlc-service.js'; import { OHLCService } from './services/ohlc-service.js';
import { SymbolIndexService } from './services/symbol-index-service.js'; import { SymbolIndexService } from './services/symbol-index-service.js';
import { SymbolRoutes } from './routes/symbol-routes.js'; import { SymbolRoutes } from './routes/symbol-routes.js';
@@ -38,6 +40,7 @@ import {
} from './events/index.js'; } from './events/index.js';
import { QdrantClient } from './clients/qdrant-client.js'; import { QdrantClient } from './clients/qdrant-client.js';
import { EmbeddingService, RAGRetriever, DocumentLoader } from './harness/memory/index.js'; import { EmbeddingService, RAGRetriever, DocumentLoader } from './harness/memory/index.js';
import { initializeToolRegistry } from './tools/tool-registry.js';
import { join } from 'path'; import { join } from 'path';
import { fileURLToPath } from 'url'; import { fileURLToPath } from 'url';
import { dirname } from 'path'; import { dirname } from 'path';
@@ -131,6 +134,9 @@ function loadConfig() {
// Redis configuration (for harness memory layer) // Redis configuration (for harness memory layer)
redisUrl: configData.redis?.url || process.env.REDIS_URL || 'redis://localhost:6379', redisUrl: configData.redis?.url || process.env.REDIS_URL || 'redis://localhost:6379',
// Conversation history limit: number of prior turns loaded as LLM context and flushed to Iceberg
conversationHistoryLimit: configData.agent?.conversation_history_limit || parseInt(process.env.CONVERSATION_HISTORY_LIMIT || '20'),
// Qdrant configuration (for RAG) // Qdrant configuration (for RAG)
qdrant: { qdrant: {
url: configData.qdrant?.url || process.env.QDRANT_URL || 'http://localhost:6333', url: configData.qdrant?.url || process.env.QDRANT_URL || 'http://localhost:6333',
@@ -147,6 +153,7 @@ function loadConfig() {
s3Endpoint: configData.iceberg?.s3_endpoint || process.env.S3_ENDPOINT, s3Endpoint: configData.iceberg?.s3_endpoint || process.env.S3_ENDPOINT,
s3AccessKey: secretsData.iceberg?.s3_access_key || process.env.S3_ACCESS_KEY, s3AccessKey: secretsData.iceberg?.s3_access_key || process.env.S3_ACCESS_KEY,
s3SecretKey: secretsData.iceberg?.s3_secret_key || process.env.S3_SECRET_KEY, s3SecretKey: secretsData.iceberg?.s3_secret_key || process.env.S3_SECRET_KEY,
conversationsBucket: configData.iceberg?.conversations_bucket || process.env.CONVERSATIONS_S3_BUCKET,
}, },
// Relay configuration (for historical data) // Relay configuration (for historical data)
@@ -165,12 +172,12 @@ function loadConfig() {
// Kubernetes configuration // Kubernetes configuration
kubernetes: { kubernetes: {
namespace: configData.kubernetes?.namespace || process.env.KUBERNETES_NAMESPACE || 'dexorder-agents', namespace: configData.kubernetes?.namespace || process.env.KUBERNETES_NAMESPACE || 'dexorder-sandboxes',
inCluster: configData.kubernetes?.in_cluster ?? (process.env.KUBERNETES_IN_CLUSTER === 'true'), inCluster: configData.kubernetes?.in_cluster ?? (process.env.KUBERNETES_IN_CLUSTER === 'true'),
context: configData.kubernetes?.context || process.env.KUBERNETES_CONTEXT, context: configData.kubernetes?.context || process.env.KUBERNETES_CONTEXT,
agentImage: configData.kubernetes?.agent_image || process.env.AGENT_IMAGE || 'ghcr.io/dexorder/agent:latest', sandboxImage: configData.kubernetes?.sandbox_image || process.env.SANDBOX_IMAGE || 'ghcr.io/dexorder/sandbox:latest',
sidecarImage: configData.kubernetes?.sidecar_image || process.env.SIDECAR_IMAGE || 'ghcr.io/dexorder/lifecycle-sidecar:latest', sidecarImage: configData.kubernetes?.sidecar_image || process.env.SIDECAR_IMAGE || 'ghcr.io/dexorder/lifecycle-sidecar:latest',
storageClass: configData.kubernetes?.storage_class || process.env.AGENT_STORAGE_CLASS || 'standard', storageClass: configData.kubernetes?.storage_class || process.env.SANDBOX_STORAGE_CLASS || 'standard',
imagePullPolicy: configData.kubernetes?.image_pull_policy || process.env.IMAGE_PULL_POLICY || 'Always', imagePullPolicy: configData.kubernetes?.image_pull_policy || process.env.IMAGE_PULL_POLICY || 'Always',
}, },
}; };
@@ -261,11 +268,25 @@ const qdrantClient = new QdrantClient(config.qdrant, app.log);
// Initialize Iceberg client (for durable storage) // Initialize Iceberg client (for durable storage)
// const icebergClient = new IcebergClient(config.iceberg, app.log); // const icebergClient = new IcebergClient(config.iceberg, app.log);
// Create metadata update callback that will be wired up when SymbolIndexService initializes
// This ensures we don't miss notifications sent before the service is ready
let symbolIndexService: SymbolIndexService | undefined;
const onMetadataUpdate = async () => {
if (symbolIndexService) {
app.log.info('Reloading symbol metadata from Iceberg');
await symbolIndexService.initialize();
app.log.info({ stats: symbolIndexService.getStats() }, 'Symbol metadata reloaded');
} else {
app.log.warn('Received METADATA_UPDATE before SymbolIndexService initialized, ignoring');
}
};
// Initialize ZMQ Relay client (for historical data) // Initialize ZMQ Relay client (for historical data)
// Note: onMetadataUpdate callback will be set after symbolIndexService is initialized // Pass onMetadataUpdate callback so it's registered before connection
const zmqRelayClient = new ZMQRelayClient({ const zmqRelayClient = new ZMQRelayClient({
relayRequestEndpoint: config.relay.requestEndpoint, relayRequestEndpoint: config.relay.requestEndpoint,
relayNotificationEndpoint: config.relay.notificationEndpoint, relayNotificationEndpoint: config.relay.notificationEndpoint,
onMetadataUpdate,
}, app.log); }, app.log);
app.log.info({ app.log.info({
@@ -286,7 +307,7 @@ const k8sClient = new KubernetesClient({
const containerManager = new ContainerManager({ const containerManager = new ContainerManager({
k8sClient, k8sClient,
agentImage: config.kubernetes.agentImage, sandboxImage: config.kubernetes.sandboxImage,
sidecarImage: config.kubernetes.sidecarImage, sidecarImage: config.kubernetes.sidecarImage,
storageClass: config.kubernetes.storageClass, storageClass: config.kubernetes.storageClass,
imagePullPolicy: config.kubernetes.imagePullPolicy, imagePullPolicy: config.kubernetes.imagePullPolicy,
@@ -326,10 +347,13 @@ const eventRouter = new EventRouter({
}); });
app.log.debug('Event router initialized'); app.log.debug('Event router initialized');
// Initialize shared Iceberg client (used by both OHLC service and conversation store)
const icebergClient = new IcebergClient(config.iceberg, app.log);
app.log.debug('Iceberg client initialized');
// Initialize OHLC service (optional - only if relay is available) // Initialize OHLC service (optional - only if relay is available)
let ohlcService: OHLCService | undefined; let ohlcService: OHLCService | undefined;
try { try {
const icebergClient = new IcebergClient(config.iceberg, app.log);
ohlcService = new OHLCService({ ohlcService = new OHLCService({
icebergClient, icebergClient,
relayClient: zmqRelayClient, relayClient: zmqRelayClient,
@@ -340,16 +364,30 @@ try {
app.log.warn({ error }, 'Failed to initialize OHLC service - historical data will not be available'); app.log.warn({ error }, 'Failed to initialize OHLC service - historical data will not be available');
} }
// Initialize Symbol Index Service (deferred to after server starts) // Initialize conversation store (Redis hot path + Iceberg cold path)
let symbolIndexService: SymbolIndexService | undefined; const conversationStore = new ConversationStore(redis, app.log, icebergClient);
app.log.debug('Conversation store initialized');
// Harness factory: captures infrastructure deps; channel handlers stay infrastructure-free
function createHarness(sessionConfig: HarnessSessionConfig): AgentHarness {
return new AgentHarness({
...sessionConfig,
providerConfig: config.providerConfig,
conversationStore,
historyLimit: config.conversationHistoryLimit,
});
}
// Symbol Index Service will be initialized after server starts
// (declared above near ZMQ client initialization)
// Initialize channel handlers // Initialize channel handlers
const websocketHandler = new WebSocketHandler({ const websocketHandler = new WebSocketHandler({
authenticator, authenticator,
containerManager, containerManager,
providerConfig: config.providerConfig,
sessionRegistry, sessionRegistry,
eventSubscriber, eventSubscriber,
createHarness,
ohlcService, // Optional ohlcService, // Optional
symbolIndexService, // Optional symbolIndexService, // Optional
}); });
@@ -357,8 +395,8 @@ app.log.debug('WebSocket handler initialized');
const telegramHandler = new TelegramHandler({ const telegramHandler = new TelegramHandler({
authenticator, authenticator,
providerConfig: config.providerConfig,
telegramBotToken: config.telegramBotToken, telegramBotToken: config.telegramBotToken,
createHarness,
}); });
app.log.debug('Telegram handler initialized'); app.log.debug('Telegram handler initialized');
@@ -477,6 +515,10 @@ app.get('/admin/knowledge-stats', async (_request, reply) => {
const shutdown = async () => { const shutdown = async () => {
app.log.info('Shutting down gracefully...'); app.log.info('Shutting down gracefully...');
try { try {
// Flush all active sessions to Iceberg before shutdown
await websocketHandler.endAllSessions();
await telegramHandler.endAllSessions();
// Stop event system first // Stop event system first
await eventSubscriber.stop(); await eventSubscriber.stop();
await eventRouter.stop(); await eventRouter.stop();
@@ -529,6 +571,53 @@ try {
app.log.warn({ error }, 'Qdrant initialization failed - RAG will not be available'); app.log.warn({ error }, 'Qdrant initialization failed - RAG will not be available');
} }
// Initialize tool registry
app.log.debug('Initializing tool registry...');
try {
const toolRegistry = initializeToolRegistry(app.log, {
// Use getter functions to support lazy initialization
ohlcService: () => ohlcService,
symbolIndexService: () => symbolIndexService,
workspaceManager: undefined, // Will be set per-session
});
// Register agent tool configurations
// Main agent: platform tools + user's general MCP tools
toolRegistry.registerAgentTools({
agentName: 'main',
platformTools: ['symbol_lookup', 'get_chart_data'],
mcpTools: [], // No MCP tools for main agent by default (can be extended later)
});
// Research subagent: only MCP tools for script creation/execution
toolRegistry.registerAgentTools({
agentName: 'research',
platformTools: [], // No platform tools (works at script level)
mcpTools: ['category_*', 'execute_research'],
});
// Code reviewer subagent: no tools by default
toolRegistry.registerAgentTools({
agentName: 'code-reviewer',
platformTools: [],
mcpTools: [],
});
app.log.info(
{
agents: toolRegistry.getRegisteredAgents(),
configs: toolRegistry.getRegisteredAgents().map(name => ({
name,
config: toolRegistry.getAgentToolConfig(name),
})),
},
'Tool registry initialized'
);
} catch (error) {
app.log.error({ error }, 'Failed to initialize tool registry');
// Non-fatal - continue without tools
}
// Initialize RAG system and load global knowledge // Initialize RAG system and load global knowledge
app.log.debug('Initializing RAG system...'); app.log.debug('Initializing RAG system...');
try { try {
@@ -586,6 +675,7 @@ try {
// Initialize Symbol Index Service (after server is running) // Initialize Symbol Index Service (after server is running)
// This is done asynchronously to not block server startup // This is done asynchronously to not block server startup
// The onMetadataUpdate callback is already registered with zmqRelayClient
(async () => { (async () => {
try { try {
const icebergClient = new IcebergClient(config.iceberg, app.log); const icebergClient = new IcebergClient(config.iceberg, app.log);
@@ -594,18 +684,13 @@ try {
logger: app.log, logger: app.log,
}); });
await indexService.initialize(); await indexService.initialize();
// Assign to module-level variable so onMetadataUpdate callback can use it
symbolIndexService = indexService; symbolIndexService = indexService;
// Update websocket handler's config so it can use the service // Update websocket handler's config so it can use the service
(websocketHandler as any).config.symbolIndexService = indexService; (websocketHandler as any).config.symbolIndexService = indexService;
// Configure ZMQ relay to reload symbol metadata on updates
(zmqRelayClient as any).config.onMetadataUpdate = async () => {
app.log.info('Reloading symbol metadata from Iceberg');
await indexService.initialize();
app.log.info({ stats: indexService.getStats() }, 'Symbol metadata reloaded');
};
app.log.info({ stats: symbolIndexService.getStats() }, 'Symbol index service initialized'); app.log.info({ stats: symbolIndexService.getStats() }, 'Symbol index service initialized');
} catch (error) { } catch (error) {
app.log.warn({ error }, 'Failed to initialize symbol index service - symbol search will not be available'); app.log.warn({ error }, 'Failed to initialize symbol index service - symbol search will not be available');

View File

@@ -1,7 +1,7 @@
/** /**
* OHLC Service - High-level API for historical market data * OHLC Service - High-level API for historical market data
* *
* Workflow (mirroring client-py/dexorder/ohlc_client.py): * Workflow (mirroring sandbox/dexorder/ohlc_client.py):
* 1. Check Iceberg for existing data * 1. Check Iceberg for existing data
* 2. Identify missing ranges * 2. Identify missing ranges
* 3. If complete, return immediately * 3. If complete, return immediately

View File

@@ -0,0 +1,11 @@
// Tools exports
export * from './platform/index.js';
export * from './mcp/index.js';
export {
ToolRegistry,
initializeToolRegistry,
getToolRegistry,
type AgentToolConfig,
type PlatformServices,
} from './tool-registry.js';

View File

@@ -0,0 +1,7 @@
// MCP tool wrappers exports
export {
createMCPToolWrapper,
createMCPToolWrappers,
type MCPToolInfo,
} from './mcp-tool-wrapper.js';

View File

@@ -0,0 +1,186 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { MCPClientConnector } from '../../harness/mcp-client.js';
/**
* MCP Tool Wrapper
*
* Wraps remote MCP server tools as standard LangChain tools.
* Provides dynamic tool creation based on MCP tool definitions.
*/
export interface MCPToolInfo {
name: string;
description?: string;
inputSchema?: {
type: string;
properties?: Record<string, any>;
required?: string[];
};
}
/**
* Create a LangChain tool from an MCP tool definition
*/
export function createMCPToolWrapper(
toolInfo: MCPToolInfo,
mcpClient: MCPClientConnector,
logger: FastifyBaseLogger,
onImage?: (image: { data: string; mimeType: string }) => void
): DynamicStructuredTool {
// Convert MCP input schema to Zod schema
const zodSchema = mcpInputSchemaToZod(toolInfo.inputSchema);
return new DynamicStructuredTool({
name: toolInfo.name,
description: toolInfo.description || `MCP tool: ${toolInfo.name}`,
schema: zodSchema,
func: async (input: Record<string, unknown>) => {
try {
const result = await mcpClient.callTool(toolInfo.name, input);
logger.info({ tool: toolInfo.name }, 'MCP tool call completed');
// Handle different MCP result formats
if (typeof result === 'string') {
return result;
}
// Handle structured MCP responses with content arrays
if (result && typeof result === 'object') {
// Extract text content from MCP response
const textParts: string[] = [];
// Check for content array (standard MCP format)
if (Array.isArray((result as any).content)) {
logger.debug({ tool: toolInfo.name, itemCount: (result as any).content.length }, 'Processing MCP content array');
for (const item of (result as any).content) {
if (item.type === 'text' && item.text) {
textParts.push(item.text);
} else if (item.type === 'image' && item.data && item.mimeType) {
logger.info({ tool: toolInfo.name, mimeType: item.mimeType }, 'Capturing image from MCP response');
onImage?.({ data: item.data, mimeType: item.mimeType });
}
}
if (textParts.length > 0) {
return textParts.join('\n\n');
}
}
// Check for nested execution.content
if ((result as any).execution && Array.isArray((result as any).execution.content)) {
for (const item of (result as any).execution.content) {
if (item.type === 'text' && item.text) {
textParts.push(item.text);
} else if (item.type === 'image' && item.data && item.mimeType) {
onImage?.({ data: item.data, mimeType: item.mimeType });
}
}
if (textParts.length > 0) {
return textParts.join('\n\n');
}
}
// Fallback: stringify the result
return JSON.stringify(result, null, 2);
}
return String(result || '');
} catch (error) {
logger.error({ error, tool: toolInfo.name, input }, 'MCP tool call failed');
return `Error calling MCP tool ${toolInfo.name}: ${error instanceof Error ? error.message : String(error)}`;
}
},
});
}
/**
* Convert MCP input schema to Zod schema
*/
function mcpInputSchemaToZod(inputSchema?: MCPToolInfo['inputSchema']): z.ZodObject<any> {
if (!inputSchema || !inputSchema.properties) {
// Generic schema that accepts any properties
return z.object({}).passthrough();
}
const properties = inputSchema.properties;
const required = inputSchema.required || [];
const zodFields: Record<string, z.ZodTypeAny> = {};
for (const [key, prop] of Object.entries(properties)) {
let zodType: z.ZodTypeAny;
// Map JSON Schema types to Zod types
switch (prop.type) {
case 'string':
zodType = z.string().describe(prop.description || '');
break;
case 'number':
zodType = z.number().describe(prop.description || '');
break;
case 'integer':
zodType = z.number().int().describe(prop.description || '');
break;
case 'boolean':
zodType = z.boolean().describe(prop.description || '');
break;
case 'array':
// Handle array items
if (prop.items) {
const itemType = getZodTypeForProperty(prop.items);
zodType = z.array(itemType).describe(prop.description || '');
} else {
zodType = z.array(z.any()).describe(prop.description || '');
}
break;
case 'object':
zodType = z.object({}).passthrough().describe(prop.description || '');
break;
default:
zodType = z.any().describe(prop.description || '');
}
// Make optional if not required
if (!required.includes(key)) {
zodType = zodType.optional();
}
zodFields[key] = zodType;
}
return z.object(zodFields);
}
/**
* Helper to get Zod type for a property definition
*/
function getZodTypeForProperty(prop: any): z.ZodTypeAny {
switch (prop.type) {
case 'string':
return z.string();
case 'number':
return z.number();
case 'integer':
return z.number().int();
case 'boolean':
return z.boolean();
case 'object':
return z.object({}).passthrough();
default:
return z.any();
}
}
/**
* Create multiple MCP tool wrappers from tool list
*/
export function createMCPToolWrappers(
toolInfos: MCPToolInfo[],
mcpClient: MCPClientConnector,
logger: FastifyBaseLogger,
onImage?: (image: { data: string; mimeType: string }) => void
): DynamicStructuredTool[] {
return toolInfos.map(toolInfo => createMCPToolWrapper(toolInfo, mcpClient, logger, onImage));
}

View File

@@ -0,0 +1,253 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { OHLCService } from '../../services/ohlc-service.js';
import type { WorkspaceManager } from '../../workspace/workspace-manager.js';
import type { ChartState } from '../../workspace/types.js';
import * as chrono from 'chrono-node';
/**
* Get Chart Data Tool
*
* Standard LangChain tool for fetching OHLCV+ data with workspace defaults.
* Allows agent to override any parameter for historical or alternative ticker queries.
*/
export interface GetChartDataToolConfig {
ohlcService: OHLCService;
workspaceManager: WorkspaceManager;
logger: FastifyBaseLogger;
}
export function createGetChartDataTool(config: GetChartDataToolConfig): DynamicStructuredTool {
const { ohlcService, workspaceManager, logger } = config;
return new DynamicStructuredTool({
name: 'get_chart_data',
description: `Fetch OHLCV+ data for current chart or any ticker/timeframe. All parameters are optional and default to workspace chart state.
**IMPORTANT: Use this tool ONLY for quick, casual data viewing. For any analysis, plotting, statistics, or deep research, use the 'research' tool instead.**
Parameters:
- ticker (optional): Market symbol (defaults to workspace chartState.symbol)
- period (optional): OHLC period in seconds (defaults to workspace chartState.period)
- from_time (optional): Start time as Unix timestamp (number or string like "1774126800") OR date string like "2 days ago", "2024-01-01" (defaults to workspace chartState.start_time)
- to_time (optional): End time as Unix timestamp (number or string like "1774732500") OR date string like "now", "yesterday" (defaults to workspace chartState.end_time)
- countback (optional): Limit number of bars returned
- columns (optional): Extra columns beyond OHLC: ["volume", "buy_vol", "sell_vol", "open_time", "high_time", "low_time", "close_time", "open_interest"]`,
schema: z.object({
ticker: z.string().optional().describe('Market symbol (defaults to workspace chartState.symbol)'),
period: z.number().optional().describe('OHLC period in seconds (defaults to workspace chartState.period)'),
from_time: z.union([z.number(), z.string()]).optional().describe('Start time: Unix seconds OR date string (defaults to workspace chartState.start_time)'),
to_time: z.union([z.number(), z.string()]).optional().describe('End time: Unix seconds OR date string (defaults to workspace chartState.end_time)'),
countback: z.number().optional().describe('Limit number of bars returned'),
columns: z.array(z.enum(['volume', 'buy_vol', 'sell_vol', 'open_time', 'high_time', 'low_time', 'close_time', 'open_interest'])).optional().describe('Extra columns beyond OHLC'),
}),
func: async ({ ticker, period, from_time, to_time, countback, columns }) => {
logger.debug({ ticker, period, from_time, to_time, countback, columns }, 'Executing get_chart_data tool');
try {
// Get workspace chart state
const chartState = await getChartState(workspaceManager, logger);
// Build request with workspace defaults
const finalTicker = ticker ?? chartState.symbol;
const finalPeriod = period ?? parsePeriod(chartState.period);
const finalFromTime = await parseTime(from_time, chartState.start_time, logger);
const finalToTime = await parseTime(to_time, chartState.end_time, logger);
const requestedColumns = columns ?? [];
// Validate we have all required parameters
if (!finalTicker) {
return JSON.stringify({ error: 'Ticker not specified and not available in workspace' });
}
if (!finalPeriod) {
return JSON.stringify({ error: 'Period not specified and not available in workspace' });
}
if (!finalFromTime) {
return JSON.stringify({ error: 'from_time not specified and not available in workspace' });
}
if (!finalToTime) {
return JSON.stringify({ error: 'to_time not specified and not available in workspace' });
}
logger.debug({
ticker: finalTicker,
period: finalPeriod,
from_time: finalFromTime,
to_time: finalToTime,
countback,
columns: requestedColumns,
}, 'Fetching OHLC data');
// Fetch data from OHLCService
const historyResult = await ohlcService.fetchOHLC(
finalTicker,
finalPeriod.toString(),
finalFromTime,
finalToTime,
countback
);
if (historyResult.noData || !historyResult.bars || historyResult.bars.length === 0) {
return JSON.stringify({
ticker: finalTicker,
period: finalPeriod,
timeRange: { start: finalFromTime, end: finalToTime },
bars: [],
});
}
// Filter/format bars with requested columns
const bars = historyResult.bars.map(bar => {
const result: any = {
time: bar.time,
open: bar.open,
high: bar.high,
low: bar.low,
close: bar.close,
ticker: finalTicker,
};
// Add optional columns if requested
for (const col of requestedColumns) {
if (col === 'volume' && bar.volume !== undefined) {
result.volume = bar.volume;
} else if (col === 'buy_vol' && bar.buy_vol !== undefined) {
result.buy_vol = bar.buy_vol;
} else if (col === 'sell_vol' && bar.sell_vol !== undefined) {
result.sell_vol = bar.sell_vol;
} else if (col === 'open_time' && bar.open_time !== undefined) {
result.open_time = bar.open_time;
} else if (col === 'high_time' && bar.high_time !== undefined) {
result.high_time = bar.high_time;
} else if (col === 'low_time' && bar.low_time !== undefined) {
result.low_time = bar.low_time;
} else if (col === 'close_time' && bar.close_time !== undefined) {
result.close_time = bar.close_time;
} else if (col === 'open_interest' && bar.open_interest !== undefined) {
result.open_interest = bar.open_interest;
}
}
return result;
});
logger.info({ ticker: finalTicker, barCount: bars.length }, 'Chart data fetched successfully');
return JSON.stringify({
ticker: finalTicker,
period: finalPeriod,
timeRange: {
start: finalFromTime,
end: finalToTime,
},
bars,
});
} catch (error) {
logger.error({ error }, 'Get chart data tool failed');
return JSON.stringify({
error: error instanceof Error ? error.message : String(error),
});
}
},
});
}
/**
* Get chart state from workspace
*/
async function getChartState(workspaceManager: WorkspaceManager, logger: FastifyBaseLogger): Promise<ChartState> {
try {
const chartState = workspaceManager.getState<ChartState>('chartState');
if (!chartState) {
// Return default chart state
return {
symbol: 'BINANCE:BTC/USDT',
start_time: null,
end_time: null,
period: '15',
selected_shapes: [],
};
}
return chartState;
} catch (error) {
logger.error({ error }, 'Failed to get chart state from workspace');
// Return default chart state
return {
symbol: 'BINANCE:BTC/USDT',
start_time: null,
end_time: null,
period: '15',
selected_shapes: [],
};
}
}
/**
* Parse period string to seconds
* Handles period as either a number (already in seconds) or string (minutes)
*/
function parsePeriod(period: string | number | null): number | null {
if (period === null) {
return null;
}
if (typeof period === 'number') {
return period;
}
// Period in workspace is stored as string representing minutes
// Convert to seconds
const minutes = parseInt(period, 10);
if (isNaN(minutes)) {
return null;
}
return minutes * 60;
}
/**
* Parse time parameter (Unix seconds, date string, or null)
* Returns Unix timestamp in seconds
*/
async function parseTime(
timeParam: number | string | null | undefined,
workspaceDefault: number | null,
logger: FastifyBaseLogger
): Promise<number | null> {
// Use workspace default if param not provided
if (timeParam === undefined || timeParam === null) {
return workspaceDefault;
}
// If it's already a number, assume Unix seconds
if (typeof timeParam === 'number') {
return timeParam;
}
// Try to parse string as numeric Unix timestamp first
const numericTimestamp = parseInt(timeParam, 10);
if (!isNaN(numericTimestamp) && numericTimestamp.toString() === timeParam) {
// String is a valid integer - treat as Unix seconds
logger.debug({ timeParam, parsedTimestamp: numericTimestamp }, 'Parsed string as Unix timestamp');
return numericTimestamp;
}
// Parse date string using chrono
try {
const parsed = chrono.parseDate(timeParam);
if (!parsed) {
logger.warn({ timeParam }, 'Failed to parse time string');
return null;
}
// Convert to Unix seconds
return Math.floor(parsed.getTime() / 1000);
} catch (error) {
logger.error({ error, timeParam }, 'Error parsing time string');
return null;
}
}

View File

@@ -0,0 +1,11 @@
// Platform tools exports
export {
createSymbolLookupTool,
type SymbolLookupToolConfig,
} from './symbol-lookup.tool.js';
export {
createGetChartDataTool,
type GetChartDataToolConfig,
} from './get-chart-data.tool.js';

View File

@@ -0,0 +1,53 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { ResearchSubagent } from '../../harness/subagents/research/index.js';
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
export interface ResearchAgentToolConfig {
researchSubagent: ResearchSubagent;
context: SubagentContext;
logger: FastifyBaseLogger;
}
/**
* Creates a LangChain tool that delegates to the research subagent.
* This is the standard LangChain pattern for exposing a subagent as a tool
* to a parent agent.
*/
export function createResearchAgentTool(config: ResearchAgentToolConfig): DynamicStructuredTool {
const { researchSubagent, context, logger } = config;
return new DynamicStructuredTool({
name: 'research',
description: `Delegate to the research subagent for data analysis, charting, statistics, and Python script execution.
Use this tool for:
- Plotting charts with technical indicators (EMA, RSI, MACD, Bollinger Bands, etc.)
- Statistical analysis of price data
- Custom research scripts using the DataAPI and ChartingAPI
- Any task requiring code execution or matplotlib charts
The research subagent will write and execute Python scripts, capture output and charts, and return results.`,
schema: z.object({
instruction: z.string().describe('The research task or analysis to perform. Be specific about what data, indicators, timeframes, and output you want.'),
}),
func: async ({ instruction }: { instruction: string }): Promise<string> => {
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to research subagent');
try {
const result = await researchSubagent.executeWithImages(context, instruction);
// Return in the format that AgentHarness.processToolResult() knows how to handle
// (extracts images and passes them to channelAdapter)
return JSON.stringify({
text: result.text,
images: result.images,
});
} catch (error) {
logger.error({ error, errorMessage: (error as Error)?.message }, 'Research subagent failed');
throw error;
}
},
});
}

View File

@@ -0,0 +1,78 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { SymbolIndexService } from '../../services/symbol-index-service.js';
/**
* Symbol Lookup Tool
*
* Standard LangChain tool for symbol search and resolution.
* Supports two modes:
* - search: Find symbols matching a query
* - resolve: Get detailed metadata for a specific symbol
*/
export interface SymbolLookupToolConfig {
symbolIndexService: SymbolIndexService;
logger: FastifyBaseLogger;
}
export function createSymbolLookupTool(config: SymbolLookupToolConfig): DynamicStructuredTool {
const { symbolIndexService, logger } = config;
return new DynamicStructuredTool({
name: 'symbol_lookup',
description: `Search for market symbols or resolve symbol metadata. Use 'search' mode to find symbols matching a query, or 'resolve' mode to get detailed metadata for a specific symbol.
Parameters:
- mode (required): Either 'search' or 'resolve'
- query (required): Search query (for search mode) or symbol ticker (for resolve mode)
- limit (optional): Maximum number of search results (search mode only, default: 30)`,
schema: z.object({
mode: z.enum(['search', 'resolve']).describe('Operation mode: search for symbols or resolve a specific symbol'),
query: z.string().describe('Search query (for search mode) or symbol ticker (for resolve mode)'),
limit: z.number().optional().default(30).describe('Maximum number of search results (search mode only, default: 30)'),
}),
func: async ({ mode, query, limit }) => {
logger.debug({ mode, query, limit }, 'Executing symbol_lookup tool');
try {
if (mode === 'search') {
const results = await symbolIndexService.search(query, limit);
logger.info({ query, resultCount: results.length }, 'Symbol search completed');
return JSON.stringify({
mode: 'search',
query,
count: results.length,
results,
});
} else {
const symbolInfo = await symbolIndexService.resolveSymbol(query);
if (!symbolInfo) {
logger.warn({ symbol: query }, 'Symbol not found');
return JSON.stringify({
error: `Symbol not found: ${query}`,
symbol: query,
});
}
logger.info({ symbol: query }, 'Symbol resolved');
return JSON.stringify({
mode: 'resolve',
symbol: query,
symbolInfo,
});
}
} catch (error) {
logger.error({ error, mode, query }, 'Symbol lookup tool failed');
return JSON.stringify({
error: error instanceof Error ? error.message : String(error),
});
}
},
});
}

View File

@@ -0,0 +1,291 @@
import type { DynamicStructuredTool } from '@langchain/core/tools';
import type { FastifyBaseLogger } from 'fastify';
import type { MCPClientConnector } from '../harness/mcp-client.js';
import type { OHLCService } from '../services/ohlc-service.js';
import type { SymbolIndexService } from '../services/symbol-index-service.js';
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
import { createSymbolLookupTool } from './platform/symbol-lookup.tool.js';
import { createGetChartDataTool } from './platform/get-chart-data.tool.js';
import { createMCPToolWrappers, type MCPToolInfo } from './mcp/mcp-tool-wrapper.js';
/**
* Agent tool configuration
* Specifies which tools are available to which agent
*/
export interface AgentToolConfig {
/** Agent name (e.g., 'main', 'research', 'code-reviewer') */
agentName: string;
/** Platform tool names to include */
platformTools: string[];
/** MCP tool patterns/names to include (supports wildcards like 'category_*') */
mcpTools: string[];
}
/**
* Platform services required for creating platform tools
* Can be provided as direct references or getter functions (for lazy initialization)
*/
export interface PlatformServices {
ohlcService?: OHLCService | (() => OHLCService | undefined);
symbolIndexService?: SymbolIndexService | (() => SymbolIndexService | undefined);
workspaceManager?: WorkspaceManager | (() => WorkspaceManager | undefined);
}
/**
* Tool Registry
*
* Manages tool creation and agent-to-tool mappings.
* Supports:
* - Platform tools (local services like symbol lookup, chart data)
* - Remote MCP tools (per-user, session-scoped)
* - Configurable tool routing (which tools for which agents)
*/
export class ToolRegistry {
private logger: FastifyBaseLogger;
private platformServices: PlatformServices;
private agentToolConfigs: Map<string, AgentToolConfig> = new Map();
constructor(logger: FastifyBaseLogger, platformServices: PlatformServices) {
this.logger = logger;
this.platformServices = platformServices;
}
/**
* Register agent tool configuration
*/
registerAgentTools(config: AgentToolConfig): void {
this.agentToolConfigs.set(config.agentName, config);
this.logger.debug(
{
agent: config.agentName,
platformTools: config.platformTools,
mcpTools: config.mcpTools,
},
'Registered agent tool configuration'
);
}
/**
* Get tools for a specific agent
*
* @param agentName - Name of the agent ('main', 'research', etc.)
* @param mcpClient - MCP client for remote tools (optional)
* @param availableMCPTools - List of available MCP tools from user's server (optional)
* @param workspaceManager - Workspace manager for this session (optional, used by some platform tools)
* @returns Array of tools for this agent
*/
async getToolsForAgent(
agentName: string,
mcpClient?: MCPClientConnector,
availableMCPTools?: MCPToolInfo[],
workspaceManager?: WorkspaceManager,
onImage?: (image: { data: string; mimeType: string }) => void
): Promise<DynamicStructuredTool[]> {
const config = this.agentToolConfigs.get(agentName);
if (!config) {
this.logger.warn({ agent: agentName }, 'No tool configuration found for agent');
return [];
}
const tools: DynamicStructuredTool[] = [];
// Add platform tools
for (const toolName of config.platformTools) {
const tool = await this.getPlatformTool(toolName, workspaceManager);
if (tool) {
tools.push(tool);
} else {
this.logger.warn({ agent: agentName, tool: toolName }, 'Platform tool not found');
}
}
// Add MCP tools (if MCP client and tools are available)
if (mcpClient && availableMCPTools && availableMCPTools.length > 0) {
const filteredMCPTools = this.filterMCPTools(availableMCPTools, config.mcpTools);
const mcpToolInstances = createMCPToolWrappers(filteredMCPTools, mcpClient, this.logger, onImage);
tools.push(...mcpToolInstances);
this.logger.debug(
{
agent: agentName,
mcpToolCount: mcpToolInstances.length,
mcpToolNames: mcpToolInstances.map(t => t.name),
},
'Added MCP tools for agent'
);
}
this.logger.info(
{
agent: agentName,
toolCount: tools.length,
toolNames: tools.map(t => t.name),
},
'Retrieved tools for agent'
);
return tools;
}
/**
* Get a platform tool by name
*
* @param toolName - Name of the tool to create
* @param sessionWorkspaceManager - Optional session-specific workspace manager
*/
private async getPlatformTool(
toolName: string,
sessionWorkspaceManager?: WorkspaceManager
): Promise<DynamicStructuredTool | null> {
// Don't cache tools - recreate each time to get latest services
// (services might be initialized asynchronously after registry creation)
// Create tool based on name
let tool: DynamicStructuredTool | null = null;
switch (toolName) {
case 'symbol_lookup': {
const symbolIndexService = this.resolveService(this.platformServices.symbolIndexService);
if (symbolIndexService) {
tool = createSymbolLookupTool({
symbolIndexService,
logger: this.logger,
});
} else {
this.logger.warn('SymbolIndexService not available for symbol_lookup tool');
}
break;
}
case 'get_chart_data': {
const ohlcService = this.resolveService(this.platformServices.ohlcService);
// Use session workspace manager if provided, otherwise try global
const workspaceManager = sessionWorkspaceManager ||
this.resolveService(this.platformServices.workspaceManager);
if (ohlcService && workspaceManager) {
tool = createGetChartDataTool({
ohlcService,
workspaceManager,
logger: this.logger,
});
} else {
this.logger.warn(
{ hasOHLC: !!ohlcService, hasWorkspace: !!workspaceManager },
'OHLCService or WorkspaceManager not available for get_chart_data tool'
);
}
break;
}
default:
this.logger.warn({ tool: toolName }, 'Unknown platform tool');
return null;
}
return tool;
}
/**
* Resolve a service (handle both direct references and getter functions)
*/
private resolveService<T>(service: T | (() => T | undefined) | undefined): T | undefined {
// Check if it's a function by checking the type more carefully
if (service && typeof (service as any) === 'function' && !(service as any).prototype) {
// It's a getter function (arrow function or function expression, not a class)
return (service as () => T | undefined)();
}
return service as T | undefined;
}
/**
* Filter MCP tools based on patterns/names
* Supports wildcards like 'category_*' or exact names like 'execute_research'
*/
private filterMCPTools(availableTools: MCPToolInfo[], patterns: string[]): MCPToolInfo[] {
if (patterns.length === 0) {
return [];
}
return availableTools.filter(tool => {
for (const pattern of patterns) {
if (this.matchesPattern(tool.name, pattern)) {
return true;
}
}
return false;
});
}
/**
* Check if a tool name matches a pattern
* Supports wildcards: 'category_*' matches 'category_write', 'category_read', etc.
*/
private matchesPattern(toolName: string, pattern: string): boolean {
if (pattern === toolName) {
return true; // Exact match
}
if (pattern.includes('*')) {
// Convert wildcard pattern to regex
const regexPattern = pattern
.replace(/\*/g, '.*')
.replace(/\?/g, '.');
const regex = new RegExp(`^${regexPattern}$`);
return regex.test(toolName);
}
return false;
}
/**
* Get all registered agent names
*/
getRegisteredAgents(): string[] {
return Array.from(this.agentToolConfigs.keys());
}
/**
* Get tool configuration for an agent
*/
getAgentToolConfig(agentName: string): AgentToolConfig | null {
return this.agentToolConfigs.get(agentName) || null;
}
}
/**
* Global registry instance (initialized at gateway startup)
*/
let globalToolRegistry: ToolRegistry | null = null;
/**
* Initialize the global tool registry
*/
export function initializeToolRegistry(
logger: FastifyBaseLogger,
platformServices: PlatformServices
): ToolRegistry {
if (globalToolRegistry) {
logger.warn('Global tool registry already initialized');
return globalToolRegistry;
}
globalToolRegistry = new ToolRegistry(logger, platformServices);
logger.info('Tool registry initialized');
return globalToolRegistry;
}
/**
* Get the global tool registry
*/
export function getToolRegistry(): ToolRegistry {
if (!globalToolRegistry) {
throw new Error('Tool registry not initialized. Call initializeToolRegistry() first.');
}
return globalToolRegistry;
}

View File

@@ -16,7 +16,15 @@ export interface TradingViewBar {
high: number; high: number;
low: number; low: number;
close: number; close: number;
volume: number; volume?: number;
// Optional extra columns from ohlc.proto
buy_vol?: number;
sell_vol?: number;
open_time?: number;
high_time?: number;
low_time?: number;
close_time?: number;
open_interest?: number;
} }
/** /**

View File

@@ -12,11 +12,31 @@ export const ModelPreferenceSchema = z.object({
export type ModelPreference = z.infer<typeof ModelPreferenceSchema>; export type ModelPreference = z.infer<typeof ModelPreferenceSchema>;
/** /**
* User license and feature authorization * Kubernetes resource allocations — stored per-user so they can be customized
* beyond the standard tier defaults.
*/ */
export const UserLicenseSchema = z.object({ export const K8sResourcesSchema = z.object({
userId: z.string(), memoryRequest: z.string(), // e.g. "256Mi"
email: z.string().email().optional(), memoryLimit: z.string(), // e.g. "512Mi"
cpuRequest: z.string(), // e.g. "100m"
cpuLimit: z.string(), // e.g. "500m"
storage: z.string(), // e.g. "1Gi"
tmpSizeLimit: z.string(), // e.g. "128Mi"
enableIdleShutdown: z.boolean(),
idleTimeoutMinutes: z.number(),
});
export type K8sResources = z.infer<typeof K8sResourcesSchema>;
/**
* The portable License dict — stored as a single JSONB blob per user in the DB,
* passable over-the-wire to any service that needs to enforce or inspect
* feature access, resource limits, or preferences.
*
* Standard tier templates define the defaults; per-user rows are copies that
* can be customised independently without schema changes.
*/
export const LicenseSchema = z.object({
licenseType: z.enum(['free', 'pro', 'enterprise']), licenseType: z.enum(['free', 'pro', 'enterprise']),
features: z.object({ features: z.object({
maxIndicators: z.number(), maxIndicators: z.number(),
@@ -32,8 +52,82 @@ export const UserLicenseSchema = z.object({
maxTokensPerMessage: z.number(), maxTokensPerMessage: z.number(),
rateLimitPerMinute: z.number(), rateLimitPerMinute: z.number(),
}), }),
mcpServerUrl: z.string(), // Allow any string including 'pending', URL validation happens later k8sResources: K8sResourcesSchema,
preferredModel: ModelPreferenceSchema.optional(), preferredModel: ModelPreferenceSchema.optional(),
});
export type License = z.infer<typeof LicenseSchema>;
export type LicenseTier = License['licenseType'];
/**
* Standard tier templates — single source of truth for default License values.
* Used when creating new user accounts (copy the tier template into the user's
* license row) and anywhere tier-specific defaults are needed.
*/
export const LICENSE_TIER_TEMPLATES: Record<LicenseTier, License> = {
free: {
licenseType: 'free',
features: {
maxIndicators: 5, maxStrategies: 3, maxBacktestDays: 30,
realtimeData: false, customExecutors: false, apiAccess: false,
},
resourceLimits: {
maxConcurrentSessions: 1, maxMessagesPerDay: 100,
maxTokensPerMessage: 4096, rateLimitPerMinute: 10,
},
k8sResources: {
memoryRequest: '256Mi', memoryLimit: '512Mi',
cpuRequest: '100m', cpuLimit: '500m',
storage: '1Gi', tmpSizeLimit: '128Mi',
enableIdleShutdown: true, idleTimeoutMinutes: 15,
},
},
pro: {
licenseType: 'pro',
features: {
maxIndicators: 50, maxStrategies: 20, maxBacktestDays: 365,
realtimeData: true, customExecutors: true, apiAccess: true,
},
resourceLimits: {
maxConcurrentSessions: 5, maxMessagesPerDay: 1000,
maxTokensPerMessage: 8192, rateLimitPerMinute: 60,
},
k8sResources: {
memoryRequest: '512Mi', memoryLimit: '2Gi',
cpuRequest: '250m', cpuLimit: '2000m',
storage: '10Gi', tmpSizeLimit: '256Mi',
enableIdleShutdown: true, idleTimeoutMinutes: 60,
},
},
enterprise: {
licenseType: 'enterprise',
features: {
maxIndicators: 999, maxStrategies: 999, maxBacktestDays: 3650,
realtimeData: true, customExecutors: true, apiAccess: true,
},
resourceLimits: {
maxConcurrentSessions: 20, maxMessagesPerDay: 10000,
maxTokensPerMessage: 32768, rateLimitPerMinute: 300,
},
k8sResources: {
memoryRequest: '1Gi', memoryLimit: '4Gi',
cpuRequest: '500m', cpuLimit: '4000m',
storage: '50Gi', tmpSizeLimit: '512Mi',
enableIdleShutdown: false, idleTimeoutMinutes: 0,
},
},
};
/**
* UserLicense — DB row envelope. Wraps the portable License dict with account
* identity and metadata. Not intended to be sent over-the-wire directly;
* use the nested `license` field for cross-service communication.
*/
export const UserLicenseSchema = z.object({
userId: z.string(),
email: z.string().email().optional(),
license: LicenseSchema,
mcpServerUrl: z.string(), // Allow any string including 'pending'; validated at use time
expiresAt: z.union([z.date(), z.string(), z.null()]).optional().transform(val => { expiresAt: z.union([z.date(), z.string(), z.null()]).optional().transform(val => {
if (!val || val === null) return undefined; if (!val || val === null) return undefined;
return val instanceof Date ? val : new Date(val); return val instanceof Date ? val : new Date(val);
@@ -59,14 +153,17 @@ export enum ChannelType {
} }
/** /**
* Authentication context per channel * Authentication context per channel session.
* `license` is the portable License dict (not the full UserLicense row).
* `mcpServerUrl` is the runtime container endpoint, resolved at auth time.
*/ */
export const AuthContextSchema = z.object({ export const AuthContextSchema = z.object({
userId: z.string(), userId: z.string(),
channelType: z.nativeEnum(ChannelType), channelType: z.nativeEnum(ChannelType),
channelUserId: z.string(), // Platform-specific ID (telegram_id, discord_id, etc) channelUserId: z.string(), // Platform-specific ID (telegram_id, discord_id, etc)
sessionId: z.string(), sessionId: z.string(),
license: UserLicenseSchema, license: LicenseSchema,
mcpServerUrl: z.string(),
authenticatedAt: z.date(), authenticatedAt: z.date(),
}); });

View File

@@ -62,6 +62,8 @@ export type {
StoreConfig, StoreConfig,
ChannelAdapter, ChannelAdapter,
ChannelCapabilities, ChannelCapabilities,
ImageMessage,
TextMessage,
PathTrigger, PathTrigger,
PathTriggerHandler, PathTriggerHandler,
PathTriggerContext, PathTriggerContext,

View File

@@ -131,6 +131,29 @@ export interface ChannelCapabilities {
supportsTradingViewEmbed: boolean; supportsTradingViewEmbed: boolean;
} }
/**
* Image message for channel adapters.
* Contains base64-encoded image data from MCP tools.
*/
export interface ImageMessage {
/** Base64-encoded image data */
data: string;
/** MIME type (e.g., 'image/png', 'image/jpeg') */
mimeType: string;
/** Optional caption/description */
caption?: string;
}
/**
* Text message for channel adapters.
*/
export interface TextMessage {
/** Text content */
text: string;
}
/** /**
* Adapter interface for communication channels. * Adapter interface for communication channels.
* Implemented by WebSocket handler, Telegram handler, etc. * Implemented by WebSocket handler, Telegram handler, etc.
@@ -142,6 +165,18 @@ export interface ChannelAdapter {
/** Send an incremental patch to the client */ /** Send an incremental patch to the client */
sendPatch(msg: PatchMessage): void; sendPatch(msg: PatchMessage): void;
/** Send a text message to the client */
sendText(msg: TextMessage): void;
/** Send a streaming text chunk to the client */
sendChunk(content: string): void;
/** Send an image to the client */
sendImage(msg: ImageMessage): void;
/** Notify client that a tool call is being executed */
sendToolCall?(toolName: string, label?: string): void;
/** Get channel capabilities */ /** Get channel capabilities */
getCapabilities(): ChannelCapabilities; getCapabilities(): ChannelCapabilities;
} }

View File

@@ -89,6 +89,6 @@ See `deploy/k8s/base/agent-deployment-example.yaml` for a complete example of ho
1. **Self-delete only**: The sidecar can only delete the deployment it's part of (enforced by label matching in admission policy) 1. **Self-delete only**: The sidecar can only delete the deployment it's part of (enforced by label matching in admission policy)
2. **Non-privileged**: Runs as non-root user (UID 1000) 2. **Non-privileged**: Runs as non-root user (UID 1000)
3. **Minimal permissions**: Only has `get` and `delete` on deployments/PVCs in the agents namespace 3. **Minimal permissions**: Only has `get` and `delete` on deployments/PVCs in the sandboxes namespace
4. **No cross-namespace access**: Scoped to `dexorder-agents` namespace only 4. **No cross-namespace access**: Scoped to `dexorder-sandboxes` namespace only
5. **Crash-safe**: Only triggers cleanup on exit code 42, never on crashes 5. **Crash-safe**: Only triggers cleanup on exit code 42, never on crashes

View File

@@ -1,5 +1,5 @@
# Multi-stage build for DexOrder user container # Multi-stage build for DexOrder user container
FROM python:3.11-slim AS builder FROM continuumio/miniconda3:latest AS builder
WORKDIR /build WORKDIR /build
@@ -12,6 +12,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
# Copy dependency specifications # Copy dependency specifications
COPY setup.py . COPY setup.py .
COPY environment.yml .
COPY dexorder/ dexorder/ COPY dexorder/ dexorder/
# Copy protobuf definitions (copied by bin/build from canonical /protobuf/) # Copy protobuf definitions (copied by bin/build from canonical /protobuf/)
@@ -22,13 +23,17 @@ RUN mkdir -p dexorder/generated && \
protoc --python_out=dexorder/generated --proto_path=protobuf protobuf/*.proto && \ protoc --python_out=dexorder/generated --proto_path=protobuf protobuf/*.proto && \
touch dexorder/generated/__init__.py touch dexorder/generated/__init__.py
# Install dependencies to a target directory # Create conda environment and install dependencies
RUN pip install --no-cache-dir --target=/build/deps . RUN conda env create -f environment.yml -p /build/env && \
conda clean -afy
# Install the local package into the conda environment
RUN /build/env/bin/pip install --no-cache-dir .
# ============================================================================= # =============================================================================
# Runtime stage # Runtime stage
# ============================================================================= # =============================================================================
FROM python:3.11-slim FROM continuumio/miniconda3:latest
WORKDIR /app WORKDIR /app
@@ -40,8 +45,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
# Create non-root user # Create non-root user
RUN groupadd -r dexorder && useradd -r -g dexorder -u 1000 dexorder RUN groupadd -r dexorder && useradd -r -g dexorder -u 1000 dexorder
# Copy installed Python packages from builder # Copy conda environment from builder
COPY --from=builder /build/deps /usr/local/lib/python3.11/site-packages/ COPY --from=builder /build/env /opt/conda/envs/dexorder
# Copy application code # Copy application code
COPY dexorder/ /app/dexorder/ COPY dexorder/ /app/dexorder/
@@ -51,17 +56,26 @@ COPY main.py /app/
COPY --from=builder /build/dexorder/generated/ /app/dexorder/generated/ COPY --from=builder /build/dexorder/generated/ /app/dexorder/generated/
# Create directories for config, secrets, and data # Create directories for config, secrets, and data
# Note: /app will be read-only at runtime except for /app/data (mounted volume)
RUN mkdir -p /app/config /app/secrets /app/data && \ RUN mkdir -p /app/config /app/secrets /app/data && \
chown -R dexorder:dexorder /app chown -R root:root /app && \
chmod -R 755 /app && \
chown dexorder:dexorder /app/data && \
chmod 700 /app/data
# Create writable tmp directory (read-only rootfs requirement) # Create writable tmp directory (read-only rootfs requirement)
RUN mkdir -p /tmp && chmod 1777 /tmp RUN mkdir -p /tmp && chmod 1777 /tmp
# Copy entrypoint script
COPY entrypoint.sh /app/
RUN chmod 755 /app/entrypoint.sh && chown root:root /app/entrypoint.sh
# Switch to non-root user # Switch to non-root user
USER dexorder USER dexorder
# Environment variables (can be overridden in k8s) # Environment variables (can be overridden in k8s)
ENV PYTHONUNBUFFERED=1 \ ENV PYTHONUNBUFFERED=1 \
MPLCONFIGDIR=/tmp \
LOG_LEVEL=INFO \ LOG_LEVEL=INFO \
CONFIG_PATH=/app/config/config.yaml \ CONFIG_PATH=/app/config/config.yaml \
SECRETS_PATH=/app/config/secrets.yaml \ SECRETS_PATH=/app/config/secrets.yaml \
@@ -76,7 +90,7 @@ ENV PYTHONUNBUFFERED=1 \
# Health check endpoint (simple check if process is running) # Health check endpoint (simple check if process is running)
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD python -c "import sys; sys.exit(0)" CMD /opt/conda/envs/dexorder/bin/python -c "import sys; sys.exit(0)"
# Run the main application # Run the main application using conda environment via entrypoint
ENTRYPOINT ["python", "/app/main.py"] ENTRYPOINT ["/app/entrypoint.sh"]

View File

@@ -12,7 +12,7 @@ High-level Python API for accessing historical OHLC data from the DexOrder tradi
## Installation ## Installation
```bash ```bash
cd redesign/client-py cd redesign/sandbox
pip install -e . pip install -e .
``` ```
@@ -202,7 +202,7 @@ The client requires the following endpoints:
```bash ```bash
cd redesign/protobuf cd redesign/protobuf
protoc -I . --python_out=../client-py/dexorder ingestor.proto ohlc.proto protoc -I . --python_out=../sandbox/dexorder ingestor.proto ohlc.proto
``` ```
### Run Tests ### Run Tests

View File

@@ -0,0 +1,221 @@
# Research Script API Usage
Research scripts executed via the `execute_research` MCP tool have access to the global API instance, which provides both data fetching and charting capabilities.
## Accessing the API
```python
from dexorder.api import get_api
import asyncio
# Get the global API instance
api = get_api()
```
## Using the Data API
The data API provides access to historical OHLC (Open, High, Low, Close) market data with smart caching via Iceberg.
### Fetching Historical Data
The API accepts flexible timestamp formats for convenience:
```python
from dexorder.api import get_api
import asyncio
from datetime import datetime
api = get_api()
# Method 1: Using Unix timestamps (seconds)
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600, # 1 hour candles
start_time=1640000000, # Unix timestamp in seconds
end_time=1640086400,
extra_columns=["volume"]
))
# Method 2: Using date strings
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20", # Simple date string
end_time="2021-12-21",
extra_columns=["volume"]
))
# Method 3: Using date strings with time
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20 00:00:00",
end_time="2021-12-20 23:59:59",
extra_columns=["volume"]
))
# Method 4: Using datetime objects
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time=datetime(2021, 12, 20),
end_time=datetime(2021, 12, 21),
extra_columns=["volume"]
))
print(f"Loaded {len(df)} candles")
print(df.head())
```
### Available Extra Columns
- `"volume"` - Total volume
- `"buy_vol"` - Buy-side volume
- `"sell_vol"` - Sell-side volume
- `"open_time"`, `"high_time"`, `"low_time"`, `"close_time"` - Timestamps for each price point
- `"open_interest"` - Open interest (for futures)
- `"ticker"` - Market identifier
- `"period_seconds"` - Period in seconds
## Using the Charting API
The charting API provides styled financial charts with OHLC candlesticks and technical indicators.
### Creating a Basic Candlestick Chart
```python
from dexorder.api import get_api
import asyncio
from datetime import datetime
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21",
extra_columns=["volume"]
))
# Create candlestick chart (synchronous)
fig, ax = api.charting.plot_ohlc(
df,
title="BTC/USDT 1H",
volume=True, # Show volume bars
style="charles" # Chart style
)
# The figure is automatically captured and returned to the MCP client
```
### Adding Indicator Panels
```python
from dexorder.api import get_api
import asyncio
import pandas as pd
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21"
))
# Calculate a simple moving average
df['sma_20'] = df['close'].rolling(window=20).mean()
# Create chart
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT with SMA")
# Overlay the SMA on the price chart
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue", linewidth=2)
ax.legend()
# Add RSI indicator panel below
df['rsi'] = calculate_rsi(df['close'], 14) # Your RSI calculation
rsi_ax = api.charting.add_indicator_panel(
fig, df,
columns=["rsi"],
ylabel="RSI",
ylim=(0, 100)
)
rsi_ax.axhline(70, color='red', linestyle='--', alpha=0.5)
rsi_ax.axhline(30, color='green', linestyle='--', alpha=0.5)
```
## Complete Example
```python
from dexorder.api import get_api
import asyncio
import pandas as pd
# Get API instance
api = get_api()
# Fetch historical data using date strings (easiest for research)
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600, # 1 hour
start_time="2021-12-20",
end_time="2021-12-21",
extra_columns=["volume"]
))
# Add some analysis
df['sma_20'] = df['close'].rolling(window=20).mean()
df['sma_50'] = df['close'].rolling(window=50).mean()
# Create chart with volume
fig, ax = api.charting.plot_ohlc(
df,
title="BTC/USDT Analysis",
volume=True,
style="charles"
)
# Overlay moving averages
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue", linewidth=1.5)
ax.plot(df.index, df['sma_50'], label="SMA 50", color="red", linewidth=1.5)
ax.legend()
# Print summary statistics
print(f"Period: {len(df)} candles")
print(f"High: {df['high'].max()}")
print(f"Low: {df['low'].min()}")
print(f"Mean Volume: {df['volume'].mean():.2f}")
```
## Notes
- **Async vs Sync**: Data API methods are async and require `asyncio.run()`. Charting API methods are synchronous.
- **Figure Capture**: All matplotlib figures created during script execution are automatically captured and returned as PNG images.
- **Print Statements**: All `print()` output is captured and returned as text content.
- **Errors**: Exceptions are caught and reported in the execution results.
- **Timestamps**: The API accepts flexible timestamp formats:
- Unix timestamps in **seconds** (int or float) - e.g., `1640000000`
- Date strings - e.g., `"2021-12-20"` or `"2021-12-20 12:00:00"`
- datetime objects - e.g., `datetime(2021, 12, 20)`
- pandas Timestamp objects
- Internally, the system uses microseconds since epoch, but you don't need to worry about this conversion.
- **Price/Volume Values**: All prices and volumes are returned as decimal floats, automatically converted from internal storage format using market metadata. No manual conversion is needed.
## Available Chart Styles
- `"charles"` (default)
- `"binance"`
- `"blueskies"`
- `"brasil"`
- `"checkers"`
- `"classic"`
- `"mike"`
- `"nightclouds"`
- `"sas"`
- `"starsandstripes"`
- `"yahoo"`

View File

@@ -0,0 +1,67 @@
"""
DexOrder API - market data and charting for research and trading.
For research scripts, import and use get_api() to access the API:
from dexorder.api import get_api
import asyncio
api = get_api()
df = asyncio.run(api.data.historical_ohlc(...))
fig, ax = api.charting.plot_ohlc(df)
"""
import logging
from typing import Optional
from dexorder.api.api import API
from dexorder.api.charting_api import ChartingAPI
from dexorder.api.data_api import DataAPI
log = logging.getLogger(__name__)
# Global API instance - managed by main.py
_global_api: Optional[API] = None
def get_api() -> API:
"""
Get the global API instance for accessing market data and charts.
Use this in research scripts to access the data and charting APIs.
Returns:
API instance with data and charting capabilities
Raises:
RuntimeError: If called before API initialization (should not happen in research scripts)
Example:
from dexorder.api import get_api
import asyncio
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21"
))
# Create chart
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT")
"""
if _global_api is None:
raise RuntimeError("API not initialized")
return _global_api
def set_api(api: API) -> None:
"""Set the global API instance. Internal use only."""
global _global_api
_global_api = api
__all__ = ['API', 'ChartingAPI', 'DataAPI', 'get_api', 'set_api']

View File

@@ -0,0 +1,44 @@
"""
Main DexOrder API - provides access to market data and charting.
"""
import logging
from .charting_api import ChartingAPI
from .data_api import DataAPI
log = logging.getLogger(__name__)
class API:
"""
Main API for accessing market data and creating charts.
This is the primary interface for research scripts and trading strategies.
Access this via get_api() in research scripts.
Attributes:
data: DataAPI for fetching historical and current market data
charting: ChartingAPI for creating candlestick charts and visualizations
Example:
from dexorder.api import get_api
import asyncio
api = get_api()
# Fetch data
df = asyncio.run(api.data.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21"
))
# Create chart
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT 1H")
"""
def __init__(self, charting: ChartingAPI, data: DataAPI):
self.charting: ChartingAPI = charting
self.data: DataAPI = data

View File

@@ -0,0 +1,155 @@
import logging
from abc import abstractmethod, ABC
from typing import Optional, Tuple, List
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
class ChartingAPI(ABC):
"""
API for creating financial charts and visualizations.
Provides methods to create candlestick charts, add technical indicator panels,
and build custom visualizations. All figures are automatically captured and
returned to the client as images.
Basic workflow:
1. Create a chart with plot_ohlc() → returns Figure and Axes
2. Optionally overlay indicators on the main axes (e.g., moving averages)
3. Optionally add indicator panels below with add_indicator_panel()
4. Figures are automatically captured (no need to save manually)
"""
@abstractmethod
def plot_ohlc(
self,
df: pd.DataFrame,
title: Optional[str] = None,
volume: bool = False,
style: str = "charles",
figsize: Tuple[int, int] = (12, 8),
**kwargs
) -> Tuple[Figure, plt.Axes]:
"""
Create a candlestick chart from OHLC data.
Args:
df: DataFrame with OHLC data. Required columns: open, high, low, close.
Column names are case-insensitive.
title: Chart title (optional)
volume: If True, shows volume bars below the candlesticks (requires 'volume' column)
style: Visual style for the chart. Available styles:
"charles" (default), "binance", "blueskies", "brasil", "checkers",
"classic", "mike", "nightclouds", "sas", "starsandstripes", "yahoo"
figsize: Figure size as (width, height) in inches. Default: (12, 8)
**kwargs: Additional styling arguments
Returns:
Tuple of (Figure, Axes):
- Figure: matplotlib Figure object
- Axes: Main candlestick axes (use for overlaying indicators)
Examples:
# Basic chart
fig, ax = api.plot_ohlc(df)
# With volume and title
fig, ax = api.plot_ohlc(
df,
title="BTC/USDT 1H",
volume=True,
style="binance"
)
# Overlay moving average
fig, ax = api.plot_ohlc(df)
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue")
ax.legend()
"""
pass
@abstractmethod
def add_indicator_panel(
self,
fig: Figure,
df: pd.DataFrame,
columns: Optional[List[str]] = None,
ylabel: Optional[str] = None,
height_ratio: float = 0.3,
ylim: Optional[Tuple[float, float]] = None,
**kwargs
) -> plt.Axes:
"""
Add an indicator panel below the chart with time-aligned x-axis.
Use this to display indicators that should be shown separately from the
price chart (e.g., RSI, MACD, volume).
Args:
fig: Figure object from plot_ohlc()
df: DataFrame with indicator data (must have same index as OHLC data)
columns: Column names to plot. If None, plots all numeric columns.
ylabel: Y-axis label (e.g., "RSI", "MACD")
height_ratio: Panel height relative to main chart (default: 0.3 = 30%)
ylim: Y-axis limits as (min, max). If None, auto-scales.
**kwargs: Line styling options (color, linewidth, linestyle, alpha)
Returns:
Axes object for the new panel (use for further customization)
Examples:
# Add RSI panel with reference lines
fig, ax = api.plot_ohlc(df)
rsi_ax = api.add_indicator_panel(
fig, df,
columns=["rsi"],
ylabel="RSI",
ylim=(0, 100)
)
rsi_ax.axhline(30, color='green', linestyle='--', alpha=0.5)
rsi_ax.axhline(70, color='red', linestyle='--', alpha=0.5)
# Add MACD panel
fig, ax = api.plot_ohlc(df)
api.add_indicator_panel(
fig, df,
columns=["macd", "macd_signal"],
ylabel="MACD"
)
"""
pass
@abstractmethod
def create_figure(
self,
figsize: Tuple[int, int] = (12, 8),
style: str = "charles"
) -> Tuple[Figure, plt.Axes]:
"""
Create a styled figure for custom visualizations.
Use this when you want to create charts other than candlesticks
(e.g., histograms, scatter plots, heatmaps).
Args:
figsize: Figure size as (width, height) in inches. Default: (12, 8)
style: Style name for consistent theming. Default: "charles"
Returns:
Tuple of (Figure, Axes) ready for plotting
Examples:
# Histogram
fig, ax = api.create_figure()
ax.hist(returns, bins=50)
ax.set_title("Return Distribution")
# Heatmap
fig, ax = api.create_figure(figsize=(10, 10))
import seaborn as sns
sns.heatmap(correlation_matrix, ax=ax)
ax.set_title("Correlation Matrix")
"""
pass

View File

@@ -0,0 +1,162 @@
from abc import ABC, abstractmethod
from typing import Optional, List
import pandas as pd
from dexorder.utils import TimestampInput
class DataAPI(ABC):
"""
API for accessing market data.
Provides methods to query OHLC (Open, High, Low, Close) candlestick data
for cryptocurrency markets.
"""
@abstractmethod
async def historical_ohlc(
self,
ticker: str,
period_seconds: int,
start_time: TimestampInput,
end_time: TimestampInput,
extra_columns: Optional[List[str]] = None,
) -> pd.DataFrame:
"""
Fetch historical OHLC candlestick data for a market.
Args:
ticker: Market identifier in format "EXCHANGE:SYMBOL"
Examples: "BINANCE:BTC/USDT", "COINBASE:ETH/USD"
period_seconds: Candle period in seconds
Common values:
- 60 (1 minute)
- 300 (5 minutes)
- 900 (15 minutes)
- 3600 (1 hour)
- 86400 (1 day)
- 604800 (1 week)
start_time: Start of time range. Accepts:
- Unix timestamp in seconds (int/float): 1640000000
- Date string: "2021-12-20" or "2021-12-20 12:00:00"
- datetime object: datetime(2021, 12, 20)
- pandas Timestamp: pd.Timestamp("2021-12-20")
end_time: End of time range. Same formats as start_time.
extra_columns: Optional additional columns to include beyond the standard
OHLC columns. Available options:
- "volume" - Total volume (decimal float)
- "buy_vol" - Buy-side volume (decimal float)
- "sell_vol" - Sell-side volume (decimal float)
- "open_time", "high_time", "low_time", "close_time" (timestamps)
- "open_interest" (for futures markets)
- "ticker", "period_seconds"
Returns:
DataFrame with candlestick data sorted by timestamp (ascending).
Standard columns (always included):
- timestamp: Period start time in microseconds
- open: Opening price (decimal float)
- high: Highest price (decimal float)
- low: Lowest price (decimal float)
- close: Closing price (decimal float)
Plus any columns specified in extra_columns.
All prices and volumes are automatically converted to decimal floats
using market metadata. No manual conversion is needed.
Returns empty DataFrame if no data is available.
Examples:
# Basic OHLC with Unix timestamp
df = await api.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time=1640000000,
end_time=1640086400
)
# Using date strings with volume
df = await api.historical_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600,
start_time="2021-12-20",
end_time="2021-12-21",
extra_columns=["volume"]
)
# Using datetime objects
from datetime import datetime
df = await api.historical_ohlc(
ticker="COINBASE:ETH/USD",
period_seconds=300,
start_time=datetime(2021, 12, 20, 9, 30),
end_time=datetime(2021, 12, 20, 16, 30),
extra_columns=["volume", "buy_vol", "sell_vol"]
)
"""
pass
@abstractmethod
async def latest_ohlc(
self,
ticker: str,
period_seconds: int,
length: int = 1,
extra_columns: Optional[List[str]] = None,
) -> pd.DataFrame:
"""
Query the most recent OHLC candles for a ticker.
This method fetches the latest N completed candles without needing to
specify exact timestamps. Useful for real-time analysis and indicators.
Args:
ticker: Market identifier in format "EXCHANGE:SYMBOL"
Examples: "BINANCE:BTC/USDT", "COINBASE:ETH/USD"
period_seconds: OHLC candle period in seconds
Common values: 60 (1m), 300 (5m), 900 (15m), 3600 (1h),
86400 (1d), 604800 (1w)
length: Number of most recent candles to return (default: 1)
extra_columns: Optional list of additional column names to include.
Same column options as historical_ohlc:
- "volume", "buy_vol", "sell_vol"
- "open_time", "high_time", "low_time", "close_time"
- "open_interest", "ticker", "period_seconds"
Returns:
Pandas DataFrame with the same column structure as historical_ohlc,
containing the N most recent completed candles sorted by timestamp.
Returns empty DataFrame if no data is available.
Examples:
# Get the last candle
df = await api.latest_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=3600
)
# Returns: timestamp, open, high, low, close
# Get the last 50 5-minute candles with volume
df = await api.latest_ohlc(
ticker="COINBASE:ETH/USD",
period_seconds=300,
length=50,
extra_columns=["volume", "buy_vol", "sell_vol"]
)
# Get recent candles with all timing data
df = await api.latest_ohlc(
ticker="BINANCE:BTC/USDT",
period_seconds=60,
length=100,
extra_columns=["open_time", "high_time", "low_time", "close_time"]
)
Note:
This method returns only completed candles. The current (incomplete)
candle is not included.
"""
pass

View File

@@ -0,0 +1,400 @@
"""
Conda Package Manager
Manages dynamic installation and cleanup of conda packages for user components.
Scans metadata files to determine required packages and syncs the conda environment.
"""
import json
import logging
import subprocess
import sys
from pathlib import Path
from typing import Optional, Set
log = logging.getLogger(__name__)
# =============================================================================
# Conda Environment Detection
# =============================================================================
def get_conda_env_path() -> Optional[Path]:
"""
Detect the active conda environment path.
Returns:
Path to conda environment, or None if not in a conda environment
"""
# Check for CONDA_PREFIX environment variable
import os
conda_prefix = os.getenv("CONDA_PREFIX")
if conda_prefix:
return Path(conda_prefix)
# Check if python executable is in a conda environment
python_path = Path(sys.executable)
# Look for conda-meta directory (indicates conda environment)
for parent in [python_path.parent, python_path.parent.parent]:
if (parent / "conda-meta").exists():
return parent
return None
def get_conda_executable() -> Optional[Path]:
"""
Find the conda executable.
Returns:
Path to conda executable, or None if not found
"""
env_path = get_conda_env_path()
if not env_path:
return None
# Try common locations
for conda_name in ["conda", "mamba"]:
# Look in environment bin
conda_bin = env_path / "bin" / conda_name
if conda_bin.exists():
return conda_bin
# Look in parent conda installation
parent_conda = env_path.parent.parent / "bin" / conda_name
if parent_conda.exists():
return parent_conda
return None
# =============================================================================
# Package Management
# =============================================================================
def get_installed_packages() -> Set[str]:
"""
Get set of currently installed conda packages.
Returns:
Set of package names
"""
try:
result = subprocess.run(
[sys.executable, "-m", "conda", "list", "--json"],
capture_output=True,
text=True,
timeout=30,
)
if result.returncode == 0:
packages = json.loads(result.stdout)
return {pkg["name"] for pkg in packages}
else:
log.error(f"Failed to list conda packages: {result.stderr}")
return set()
except subprocess.TimeoutExpired:
log.error("Timeout while listing conda packages")
return set()
except Exception as e:
log.error(f"Error listing conda packages: {e}")
return set()
def install_packages(packages: list[str]) -> dict:
"""
Install conda packages if not already installed.
Args:
packages: List of package names to install
Returns:
dict with:
- success: bool
- installed: list[str] - packages that were installed
- skipped: list[str] - packages already installed
- failed: list[str] - packages that failed to install
- error: str (if any)
"""
if not packages:
return {
"success": True,
"installed": [],
"skipped": [],
"failed": [],
}
# Get currently installed packages
installed = get_installed_packages()
# Filter out already installed packages
to_install = [pkg for pkg in packages if pkg not in installed]
skipped = [pkg for pkg in packages if pkg in installed]
if not to_install:
log.info(f"All packages already installed: {skipped}")
return {
"success": True,
"installed": [],
"skipped": skipped,
"failed": [],
}
# Install missing packages
log.info(f"Installing conda packages: {to_install}")
try:
result = subprocess.run(
[sys.executable, "-m", "conda", "install", "-y", "-c", "conda-forge"] + to_install,
capture_output=True,
text=True,
timeout=300, # 5 minute timeout
)
if result.returncode == 0:
log.info(f"Successfully installed packages: {to_install}")
return {
"success": True,
"installed": to_install,
"skipped": skipped,
"failed": [],
}
else:
log.error(f"Failed to install packages: {result.stderr}")
return {
"success": False,
"installed": [],
"skipped": skipped,
"failed": to_install,
"error": result.stderr,
}
except subprocess.TimeoutExpired:
log.error("Timeout while installing conda packages")
return {
"success": False,
"installed": [],
"skipped": skipped,
"failed": to_install,
"error": "Installation timeout",
}
except Exception as e:
log.error(f"Error installing conda packages: {e}")
return {
"success": False,
"installed": [],
"skipped": skipped,
"failed": to_install,
"error": str(e),
}
def remove_packages(packages: list[str]) -> dict:
"""
Remove conda packages.
Args:
packages: List of package names to remove
Returns:
dict with:
- success: bool
- removed: list[str] - packages that were removed
- error: str (if any)
"""
if not packages:
return {
"success": True,
"removed": [],
}
log.info(f"Removing conda packages: {packages}")
try:
result = subprocess.run(
[sys.executable, "-m", "conda", "remove", "-y"] + packages,
capture_output=True,
text=True,
timeout=120,
)
if result.returncode == 0:
log.info(f"Successfully removed packages: {packages}")
return {
"success": True,
"removed": packages,
}
else:
log.error(f"Failed to remove packages: {result.stderr}")
return {
"success": False,
"removed": [],
"error": result.stderr,
}
except subprocess.TimeoutExpired:
log.error("Timeout while removing conda packages")
return {
"success": False,
"removed": [],
"error": "Removal timeout",
}
except Exception as e:
log.error(f"Error removing conda packages: {e}")
return {
"success": False,
"removed": [],
"error": str(e),
}
# =============================================================================
# Metadata Scanning
# =============================================================================
def scan_metadata_packages(data_dir: Path) -> Set[str]:
"""
Scan all metadata files to find required conda packages.
Args:
data_dir: Base data directory containing category subdirectories
Returns:
Set of all required package names
"""
packages = set()
# Scan all category directories
for category_dir in data_dir.iterdir():
if not category_dir.is_dir():
continue
# Scan all items in this category
for item_dir in category_dir.iterdir():
if not item_dir.is_dir():
continue
metadata_path = item_dir / "metadata.json"
if not metadata_path.exists():
continue
try:
metadata = json.loads(metadata_path.read_text())
conda_packages = metadata.get("conda_packages", [])
if conda_packages:
packages.update(conda_packages)
log.debug(f"Found packages in {item_dir.name}: {conda_packages}")
except Exception as e:
log.error(f"Failed to read metadata from {metadata_path}: {e}")
return packages
def get_base_packages(environment_yml: Path) -> Set[str]:
"""
Get base packages from environment.yml.
Args:
environment_yml: Path to environment.yml file
Returns:
Set of base package names
"""
if not environment_yml.exists():
log.warning(f"environment.yml not found at {environment_yml}")
return set()
try:
import yaml
with open(environment_yml) as f:
env_spec = yaml.safe_load(f)
packages = set()
# Add conda packages
for dep in env_spec.get("dependencies", []):
if isinstance(dep, str):
# Extract package name (before version spec)
pkg_name = dep.split(">=")[0].split("=")[0].split("<")[0].split(">")[0].strip()
packages.add(pkg_name)
return packages
except Exception as e:
log.error(f"Failed to parse environment.yml: {e}")
return set()
# =============================================================================
# Sync Operation
# =============================================================================
def sync_packages(data_dir: Path, environment_yml: Optional[Path] = None) -> dict:
"""
Sync conda packages with metadata requirements.
Scans all metadata files, computes desired package set, and removes
packages that are no longer needed (excluding base environment packages).
Args:
data_dir: Base data directory
environment_yml: Path to environment.yml (optional)
Returns:
dict with:
- success: bool
- required: list[str] - packages required by metadata
- base: list[str] - base packages from environment.yml
- installed: list[str] - currently installed packages
- to_remove: list[str] - packages to be removed
- removed: list[str] - packages that were removed
- error: str (if any)
"""
log.info("Starting conda package sync")
# Get required packages from metadata
required_packages = scan_metadata_packages(data_dir)
log.info(f"Required packages from metadata: {required_packages}")
# Get base packages from environment.yml
base_packages = set()
if environment_yml and environment_yml.exists():
base_packages = get_base_packages(environment_yml)
log.info(f"Base packages from environment.yml: {base_packages}")
# Get currently installed packages
installed_packages = get_installed_packages()
log.info(f"Currently installed packages: {len(installed_packages)} total")
# Compute packages to remove
# Remove packages that are:
# - Currently installed
# - Not in base packages
# - Not in required packages
protected = base_packages | required_packages
to_remove = [pkg for pkg in installed_packages if pkg not in protected]
# Filter out critical system packages (be conservative)
system_prefixes = ["python", "conda", "pip", "setuptools", "wheel", "_"]
to_remove = [pkg for pkg in to_remove if not any(pkg.startswith(prefix) for prefix in system_prefixes)]
log.info(f"Packages to remove: {to_remove}")
result = {
"success": True,
"required": sorted(required_packages),
"base": sorted(base_packages),
"installed": sorted(installed_packages),
"to_remove": to_remove,
"removed": [],
}
# Remove packages if any
if to_remove:
remove_result = remove_packages(to_remove)
result["success"] = remove_result["success"]
result["removed"] = remove_result.get("removed", [])
if not remove_result["success"]:
result["error"] = remove_result.get("error", "Unknown error")
log.info(f"Conda package sync complete: {len(result['removed'])} packages removed")
return result

Some files were not shown because too many files have changed in this diff Show More