data fixes, partial custom indicator support

This commit is contained in:
2026-04-08 21:28:31 -04:00
parent b701554996
commit a70dcd954f
81 changed files with 5438 additions and 1852 deletions

81
bin/dev
View File

@@ -217,16 +217,18 @@ deploy_services() {
# Update configs # Update configs
echo -e "${GREEN}→${NC} Updating configs..." echo -e "${GREEN}→${NC} Updating configs..."
# Template the gateway-config.yaml with actual image tags # Template gateway-config.yaml with actual image tags (backup first for safe restore)
sed -i "s/SANDBOX_TAG_PLACEHOLDER/$SANDBOX_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" local _gw_bak
sed -i "s/SIDECAR_TAG_PLACEHOLDER/$SIDECAR_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" _gw_bak=$(mktemp)
cp "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" "$_gw_bak"
sed -i "s|sandbox_image: dexorder/ai-sandbox:.*|sandbox_image: dexorder/ai-sandbox:$SANDBOX_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s|sidecar_image: dexorder/ai-lifecycle-sidecar:.*|sidecar_image: dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
"$SCRIPT_DIR/config-update" dev "$SCRIPT_DIR/config-update" dev
# Create a temporary kustomization overlay with image tags # Create a temporary kustomization overlay with image tags
echo -e "${GREEN}→${NC} Setting image tags in kustomization..." echo -e "${GREEN}→${NC} Setting image tags in kustomization..."
cat >> kustomization.yaml <<EOF cat >> kustomization.yaml <<EOF
# Image tags (added by bin/dev) # Image tags (added by bin/dev)
images: images:
- name: dexorder/ai-relay - name: dexorder/ai-relay
@@ -258,9 +260,9 @@ EOF
# Clean up the appended image tags from kustomization.yaml # Clean up the appended image tags from kustomization.yaml
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
# Restore gateway-config.yaml placeholders # Restore gateway-config.yaml from backup
sed -i "s/$SANDBOX_TAG/SANDBOX_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" cp "$_gw_bak" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s/$SIDECAR_TAG/SIDECAR_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" rm "$_gw_bak"
echo -e "${GREEN}✓ Services deployed${NC}" echo -e "${GREEN}✓ Services deployed${NC}"
@@ -583,9 +585,11 @@ deploy_service() {
gateway) gateway)
image_name="dexorder/ai-gateway" image_name="dexorder/ai-gateway"
image_tag="$GATEWAY_TAG" image_tag="$GATEWAY_TAG"
# Also need to template gateway-config.yaml # Also need to template gateway-config.yaml (backup for safe restore)
sed -i "s/SANDBOX_TAG_PLACEHOLDER/$SANDBOX_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" _gw_bak_single=$(mktemp)
sed -i "s/SIDECAR_TAG_PLACEHOLDER/$SIDECAR_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" cp "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" "$_gw_bak_single"
sed -i "s|sandbox_image: dexorder/ai-sandbox:.*|sandbox_image: dexorder/ai-sandbox:$SANDBOX_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s|sidecar_image: dexorder/ai-lifecycle-sidecar:.*|sidecar_image: dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
"$SCRIPT_DIR/config-update" dev "$SCRIPT_DIR/config-update" dev
;; ;;
web) web)
@@ -604,7 +608,6 @@ deploy_service() {
# Create a temporary kustomization overlay with ONLY this service's image tag # Create a temporary kustomization overlay with ONLY this service's image tag
cat >> kustomization.yaml <<EOF cat >> kustomization.yaml <<EOF
# Image tags (added by bin/dev) # Image tags (added by bin/dev)
images: images:
- name: $image_name - name: $image_name
@@ -616,10 +619,10 @@ EOF
# Clean up the appended image tags from kustomization.yaml # Clean up the appended image tags from kustomization.yaml
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
# Restore gateway-config.yaml placeholders if we modified it # Restore gateway-config.yaml from backup if we modified it
if [ "$service" == "gateway" ]; then if [ "$service" == "gateway" ]; then
sed -i "s/$SANDBOX_TAG/SANDBOX_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" cp "$_gw_bak_single" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s/$SIDECAR_TAG/SIDECAR_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" rm "$_gw_bak_single"
fi fi
echo -e "${GREEN}✓ $service deployed${NC}" echo -e "${GREEN}✓ $service deployed${NC}"
@@ -685,6 +688,19 @@ case "$COMMAND" in
fi fi
done done
# Sandbox restart requires gateway to redeploy with the new sandbox image tag.
# If gateway wasn't explicitly listed, rebuild and deploy it automatically.
if [ "$sandbox_requested" == "1" ]; then
gateway_in_list=0
for svc in "${deploy_services_list[@]}"; do
[ "$svc" == "gateway" ] && gateway_in_list=1 && break
done
if [ "$gateway_in_list" == "0" ]; then
rebuild_images "gateway"
deploy_services_list+=("gateway")
fi
fi
# Deploy all non-sandbox services together in one kustomize apply # Deploy all non-sandbox services together in one kustomize apply
if [ ${#deploy_services_list[@]} -gt 0 ]; then if [ ${#deploy_services_list[@]} -gt 0 ]; then
if [ -f "$ROOT_DIR/.dev-image-tag" ]; then if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
@@ -693,18 +709,20 @@ case "$COMMAND" in
cd "$ROOT_DIR/deploy/k8s/dev" cd "$ROOT_DIR/deploy/k8s/dev"
# Template gateway-config if gateway is in the list # Template gateway-config if gateway is in the list (backup for safe restore)
_ms_gw_bak=""
for svc in "${deploy_services_list[@]}"; do for svc in "${deploy_services_list[@]}"; do
if [ "$svc" == "gateway" ]; then if [ "$svc" == "gateway" ]; then
sed -i "s/SANDBOX_TAG_PLACEHOLDER/$SANDBOX_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" _ms_gw_bak=$(mktemp)
sed -i "s/SIDECAR_TAG_PLACEHOLDER/$SIDECAR_TAG/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" cp "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" "$_ms_gw_bak"
sed -i "s|sandbox_image: dexorder/ai-sandbox:.*|sandbox_image: dexorder/ai-sandbox:$SANDBOX_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s|sidecar_image: dexorder/ai-lifecycle-sidecar:.*|sidecar_image: dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
"$SCRIPT_DIR/config-update" dev "$SCRIPT_DIR/config-update" dev
break break
fi fi
done done
# Build the images stanza for all services at once # Build the images stanza for all services at once
echo "" >> kustomization.yaml
echo "# Image tags (added by bin/dev)" >> kustomization.yaml echo "# Image tags (added by bin/dev)" >> kustomization.yaml
echo "images:" >> kustomization.yaml echo "images:" >> kustomization.yaml
for svc in "${deploy_services_list[@]}"; do for svc in "${deploy_services_list[@]}"; do
@@ -722,18 +740,29 @@ case "$COMMAND" in
sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml sed -i '/# Image tags (added by bin\/dev)/,$d' kustomization.yaml
# Restore gateway-config placeholders if gateway was deployed # Restore gateway-config from backup if we modified it
for svc in "${deploy_services_list[@]}"; do if [ -n "$_ms_gw_bak" ]; then
if [ "$svc" == "gateway" ]; then cp "$_ms_gw_bak" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s/$SANDBOX_TAG/SANDBOX_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" rm "$_ms_gw_bak"
sed -i "s/$SIDECAR_TAG/SIDECAR_TAG_PLACEHOLDER/g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" fi
break
fi
done
fi fi
# Handle sandbox separately # Handle sandbox separately
if [ "$sandbox_requested" == "1" ]; then if [ "$sandbox_requested" == "1" ]; then
if [ -f "$ROOT_DIR/.dev-image-tag" ]; then
source "$ROOT_DIR/.dev-image-tag"
fi
echo -e "${GREEN}→${NC} Updating gateway config with new sandbox image tag ($SANDBOX_TAG)..."
cd "$ROOT_DIR/deploy/k8s/dev"
_sb_bak=$(mktemp)
cp "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml" "$_sb_bak"
sed -i "s|sandbox_image: dexorder/ai-sandbox:.*|sandbox_image: dexorder/ai-sandbox:$SANDBOX_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
sed -i "s|sidecar_image: dexorder/ai-lifecycle-sidecar:.*|sidecar_image: dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG|g" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
"$SCRIPT_DIR/config-update" dev
cp "$_sb_bak" "$ROOT_DIR/deploy/k8s/dev/configs/gateway-config.yaml"
rm "$_sb_bak"
echo -e "${GREEN}→${NC} Restarting gateway to pick up new sandbox image tag..."
kubectl rollout restart deployment/gateway
echo -e "${GREEN}→${NC} Deleting user container deployments in sandbox namespace..." echo -e "${GREEN}→${NC} Deleting user container deployments in sandbox namespace..."
kubectl delete deployments --all -n sandbox 2>/dev/null || true kubectl delete deployments --all -n sandbox 2>/dev/null || true
echo -e "${GREEN}✓ User containers will be recreated by gateway on next login${NC}" echo -e "${GREEN}✓ User containers will be recreated by gateway on next login${NC}"

View File

@@ -92,18 +92,16 @@ create_item "MinIO" \
"secret_key[password]=REPLACE_WITH_STRONG_SECRET_KEY" "secret_key[password]=REPLACE_WITH_STRONG_SECRET_KEY"
# --- Gateway --- # --- Gateway ---
# Used by: ai-secrets (anthropic_api_key), gateway-secrets (all LLM keys + jwt_secret) # Used by: gateway-secrets (LLM keys + jwt_secret + search keys)
# jwt_secret: used to sign user sessions — generate with: openssl rand -base64 48 # jwt_secret: used to sign user sessions — generate with: openssl rand -base64 48
# anthropic_api_key: Anthropic Console → API Keys (https://console.anthropic.com) # deepinfra_api_key: Deep Infra Console → API Keys (https://deepinfra.com)
# openai_api_key: OpenAI Platform → API Keys (https://platform.openai.com) # anthropic_api_key: Anthropic Console → API Keys (https://console.anthropic.com) — kept for potential future use
# google_api_key: Google AI Studio (https://aistudio.google.com) # tavily_api_key: Tavily Console → API Keys (https://app.tavily.com)
# openrouter_api_key: OpenRouter (https://openrouter.ai)
create_item "Gateway" \ create_item "Gateway" \
"anthropic_api_key[password]=sk-ant-REPLACE_ME" \ "deepinfra_api_key[password]=REPLACE_ME" \
"jwt_secret[password]=REPLACE_WITH_RANDOM_64_CHAR_SECRET" \ "jwt_secret[password]=REPLACE_WITH_RANDOM_64_CHAR_SECRET" \
"openai_api_key[password]=sk-REPLACE_ME" \ "anthropic_api_key[password]=sk-ant-REPLACE_ME" \
"google_api_key[password]=REPLACE_ME" \ "tavily_api_key[password]=tvly-REPLACE_ME"
"openrouter_api_key[password]=sk-or-REPLACE_ME"
# --- Telegram --- # --- Telegram ---
# Used by: gateway-secrets (optional Telegram bot integration) # Used by: gateway-secrets (optional Telegram bot integration)

View File

@@ -24,40 +24,40 @@ data:
# Default model (if user has no preference) # Default model (if user has no preference)
defaults: defaults:
model_provider: anthropic model_provider: deepinfra
model: claude-sonnet-4-6 model: zai-org/GLM-5
# License tier model configuration # License tier model configuration
license_models: license_models:
# Free tier models # Free tier models
free: free:
default: claude-haiku-4-5-20251001 default: zai-org/GLM-5
cost_optimized: claude-haiku-4-5-20251001 cost_optimized: zai-org/GLM-5
complex: claude-haiku-4-5-20251001 complex: zai-org/GLM-5
allowed_models: allowed_models:
- claude-haiku-4-5-20251001 - zai-org/GLM-5
# Pro tier models # Pro tier models
pro: pro:
default: claude-sonnet-4-6 default: zai-org/GLM-5
cost_optimized: claude-haiku-4-5-20251001 cost_optimized: zai-org/GLM-5
complex: claude-sonnet-4-6 complex: zai-org/GLM-5
blocked_models: blocked_models:
- claude-opus-4-6 - Qwen/Qwen3-235B-A22B-Instruct-2507
# Enterprise tier models # Enterprise tier models
enterprise: enterprise:
default: claude-sonnet-4-6 default: zai-org/GLM-5
cost_optimized: claude-haiku-4-5-20251001 cost_optimized: zai-org/GLM-5
complex: claude-opus-4-6 complex: Qwen/Qwen3-235B-A22B-Instruct-2507
# Kubernetes configuration # Kubernetes configuration
kubernetes: kubernetes:
namespace: sandbox namespace: sandbox
service_namespace: default service_namespace: default
in_cluster: true in_cluster: true
sandbox_image: dexorder/ai-sandbox:SANDBOX_TAG_PLACEHOLDER sandbox_image: dexorder/ai-sandbox:dev20260408140409
sidecar_image: dexorder/ai-lifecycle-sidecar:SIDECAR_TAG_PLACEHOLDER sidecar_image: dexorder/ai-lifecycle-sidecar:dev20260407185216
storage_class: standard storage_class: standard
image_pull_policy: Never # For minikube dev - use local images image_pull_policy: Never # For minikube dev - use local images

View File

@@ -43,248 +43,3 @@ secretGenerator: []
generatorOptions: generatorOptions:
disableNameSuffixHash: true disableNameSuffixHash: true

View File

@@ -4,4 +4,4 @@ metadata:
name: ai-secrets name: ai-secrets
type: Opaque type: Opaque
stringData: stringData:
anthropic-api-key: "sk-ant-YOUR_KEY_HERE" deepinfra-api-key: "YOUR_DEEPINFRA_KEY_HERE"

View File

@@ -22,8 +22,8 @@ data:
# Default model (if user has no preference) # Default model (if user has no preference)
defaults: defaults:
model_provider: anthropic model_provider: deepinfra
model: claude-sonnet-4-6 model: zai-org/GLM-5
# Kubernetes configuration # Kubernetes configuration
kubernetes: kubernetes:

View File

@@ -5,4 +5,4 @@ metadata:
namespace: ai namespace: ai
type: Opaque type: Opaque
stringData: stringData:
anthropic-api-key: "{{ op://AI Prod/Gateway/anthropic_api_key }}" deepinfra-api-key: "{{ op://AI Prod/Gateway/deepinfra_api_key }}"

View File

@@ -14,10 +14,11 @@ stringData:
# LLM Provider API Keys # LLM Provider API Keys
llm_providers: llm_providers:
anthropic_api_key: "{{ op://AI Prod/Gateway/anthropic_api_key }}" deepinfra_api_key: "{{ op://AI Prod/Gateway/deepinfra_api_key }}"
openai_api_key: "{{ op://AI Prod/Gateway/openai_api_key }}"
google_api_key: "{{ op://AI Prod/Gateway/google_api_key }}" # Search API Keys
openrouter_api_key: "{{ op://AI Prod/Gateway/openrouter_api_key }}" search:
tavily_api_key: "{{ op://AI Prod/Gateway/tavily_api_key }}"
# Telegram (optional) # Telegram (optional)
telegram: telegram:

View File

@@ -80,9 +80,8 @@ public class SchemaInitializer {
*/ */
// Bump this when the schema changes. Tables with a different (or missing) version // Bump this when the schema changes. Tables with a different (or missing) version
// will be dropped and recreated. Increment by 1 for each incompatible change. // will be dropped and recreated. Increment by 1 for each incompatible change.
// v2: open/high/low/close changed from required to optional to support null gap bars // v1: open/high/low/close required; ingestor forward-fills interior gaps with previous close
// v3: timestamps changed from microseconds to nanoseconds; ticker format changed to BTC/USDT.BINANCE private static final String OHLC_SCHEMA_VERSION = "1";
private static final String OHLC_SCHEMA_VERSION = "3";
private static final String SCHEMA_VERSION_PROP = "app.schema.version"; private static final String SCHEMA_VERSION_PROP = "app.schema.version";
private void initializeOhlcTable() { private void initializeOhlcTable() {
@@ -121,7 +120,7 @@ public class SchemaInitializer {
LOG.info("Creating OHLC table: {}", tableId); LOG.info("Creating OHLC table: {}", tableId);
// Define the OHLC schema. // Define the OHLC schema.
// timestamp is stored as BIGINT (microseconds since epoch), not a TIMESTAMP type, // timestamp is stored as BIGINT (nanoseconds since epoch), not a TIMESTAMP type,
// so that GenericRowData.setField() accepts a plain Long value. // so that GenericRowData.setField() accepts a plain Long value.
Schema schema = new Schema( Schema schema = new Schema(
// Primary key fields // Primary key fields
@@ -129,11 +128,11 @@ public class SchemaInitializer {
required(2, "period_seconds", Types.IntegerType.get(), "OHLC period in seconds"), required(2, "period_seconds", Types.IntegerType.get(), "OHLC period in seconds"),
required(3, "timestamp", Types.LongType.get(), "Candle timestamp in nanoseconds since epoch"), required(3, "timestamp", Types.LongType.get(), "Candle timestamp in nanoseconds since epoch"),
// OHLC price data — optional to support gap bars (null = no trades that period) // OHLC price data — required; ingestor forward-fills interior gaps with previous close
optional(4, "open", Types.LongType.get(), "Opening price"), required(4, "open", Types.LongType.get(), "Opening price (forward-filled for interior market gaps)"),
optional(5, "high", Types.LongType.get(), "Highest price"), required(5, "high", Types.LongType.get(), "Highest price"),
optional(6, "low", Types.LongType.get(), "Lowest price"), required(6, "low", Types.LongType.get(), "Lowest price"),
optional(7, "close", Types.LongType.get(), "Closing price"), required(7, "close", Types.LongType.get(), "Closing price"),
// Volume data // Volume data
optional(8, "volume", Types.LongType.get(), "Total volume"), optional(8, "volume", Types.LongType.get(), "Total volume"),

View File

@@ -16,22 +16,28 @@
"@fastify/jwt": "^9.0.1", "@fastify/jwt": "^9.0.1",
"@fastify/websocket": "^11.0.1", "@fastify/websocket": "^11.0.1",
"@kubernetes/client-node": "^1.0.0", "@kubernetes/client-node": "^1.0.0",
"@langchain/anthropic": "latest", "@langchain/community": "^1.1.27",
"@langchain/core": "latest", "@langchain/core": "latest",
"@langchain/langgraph": "latest", "@langchain/langgraph": "latest",
"@langchain/openai": "^1.4.2",
"@modelcontextprotocol/sdk": "^1.0.4", "@modelcontextprotocol/sdk": "^1.0.4",
"@qdrant/js-client-rest": "^1.17.0", "@qdrant/js-client-rest": "^1.17.0",
"@types/pdf-parse": "^1.1.5",
"argon2": "^0.41.1", "argon2": "^0.41.1",
"better-auth": "^1.5.3", "better-auth": "^1.5.3",
"cheerio": "^1.2.0",
"chrono-node": "^2.7.10", "chrono-node": "^2.7.10",
"duck-duck-scrape": "^2.2.7",
"duckdb": "^1.1.3", "duckdb": "^1.1.3",
"fast-json-patch": "^3.1.1", "fast-json-patch": "^3.1.1",
"fast-xml-parser": "^5.5.10",
"fastify": "^5.2.0", "fastify": "^5.2.0",
"gray-matter": "^4.0.3", "gray-matter": "^4.0.3",
"ioredis": "^5.4.2", "ioredis": "^5.4.2",
"js-yaml": "^4.1.0", "js-yaml": "^4.1.0",
"kysely": "^0.27.3", "kysely": "^0.27.3",
"ollama": "^0.5.10", "ollama": "^0.5.10",
"pdf-parse": "^2.4.5",
"pg": "^8.13.1", "pg": "^8.13.1",
"pino": "^9.6.0", "pino": "^9.6.0",
"pino-pretty": "^13.0.0", "pino-pretty": "^13.0.0",

View File

@@ -10,6 +10,7 @@ import type { SymbolIndexService } from '../services/symbol-index-service.js';
import type { ContainerManager } from '../k8s/container-manager.js'; import type { ContainerManager } from '../k8s/container-manager.js';
import { import {
WorkspaceManager, WorkspaceManager,
ContainerSync,
DEFAULT_STORES, DEFAULT_STORES,
type ChannelAdapter, type ChannelAdapter,
type ChannelCapabilities, type ChannelCapabilities,
@@ -120,15 +121,6 @@ export class WebSocketHandler {
sendStatus(socket, 'initializing', 'Starting your workspace...'); sendStatus(socket, 'initializing', 'Starting your workspace...');
// Create workspace manager for this session
const workspace = new WorkspaceManager({
userId: authContext.userId,
sessionId: authContext.sessionId,
stores: DEFAULT_STORES,
// containerSync will be added when MCP client is implemented
logger,
});
// Create WebSocket channel adapter // Create WebSocket channel adapter
const wsAdapter: ChannelAdapter = { const wsAdapter: ChannelAdapter = {
sendSnapshot: (msg: SnapshotMessage) => { sendSnapshot: (msg: SnapshotMessage) => {
@@ -174,31 +166,47 @@ export class WebSocketHandler {
}), }),
}; };
// Declare harness outside try block so it's available in catch // Declare harness and workspace outside try block so they're available in catch
let harness: AgentHarness | undefined; let harness: AgentHarness | undefined;
let workspace: WorkspaceManager | undefined;
try { try {
// Initialize workspace first // Create and connect harness first so MCP client is available for ContainerSync
await workspace.initialize();
workspace.setAdapter(wsAdapter);
this.workspaces.set(authContext.sessionId, workspace);
// Create agent harness via factory (storage deps injected by factory)
harness = this.config.createHarness({ harness = this.config.createHarness({
userId: authContext.userId, userId: authContext.userId,
sessionId: authContext.sessionId, sessionId: authContext.sessionId,
license: authContext.license, license: authContext.license,
mcpServerUrl: authContext.mcpServerUrl, mcpServerUrl: authContext.mcpServerUrl,
logger, logger,
workspaceManager: workspace,
channelAdapter: wsAdapter, channelAdapter: wsAdapter,
channelType: authContext.channelType, channelType: authContext.channelType,
channelUserId: authContext.channelUserId, channelUserId: authContext.channelUserId,
}); });
await harness.initialize(); await harness.initialize();
// Wire ContainerSync now that MCP client is connected, then initialize workspace
const containerSync = new ContainerSync(harness.getMcpClient(), logger);
workspace = new WorkspaceManager({
userId: authContext.userId,
sessionId: authContext.sessionId,
stores: DEFAULT_STORES,
containerSync,
logger,
});
await workspace.initialize();
workspace.setAdapter(wsAdapter);
harness.setWorkspaceManager(workspace);
this.workspaces.set(authContext.sessionId, workspace);
this.harnesses.set(authContext.sessionId, harness); this.harnesses.set(authContext.sessionId, harness);
// Push all store snapshots to the client now, before 'connected'.
// Empty seqs force full snapshots for every store, so the browser's
// message queue has the current workspace state (including persistent
// stores loaded from the container) before TradingView initializes.
await workspace.handleHello({});
// Register session for event system // Register session for event system
// Container endpoint is derived from the MCP server URL (same container, different port) // Container endpoint is derived from the MCP server URL (same container, different port)
const containerEventEndpoint = this.getContainerEventEndpoint(authContext.mcpServerUrl); const containerEventEndpoint = this.getContainerEventEndpoint(authContext.mcpServerUrl);
@@ -287,15 +295,18 @@ export class WebSocketHandler {
} else if (payload.type === 'hello') { } else if (payload.type === 'hello') {
// Workspace sync: hello message // Workspace sync: hello message
logger.debug({ seqs: payload.seqs }, 'Handling workspace hello'); logger.debug({ seqs: payload.seqs }, 'Handling workspace hello');
await workspace.handleHello(payload.seqs || {}); await workspace!.handleHello(payload.seqs || {});
} else if (payload.type === 'patch') { } else if (payload.type === 'patch') {
// Workspace sync: patch message // Workspace sync: patch message
logger.debug({ store: payload.store, seq: payload.seq }, 'Handling workspace patch'); logger.debug({ store: payload.store, seq: payload.seq }, 'Handling workspace patch');
await workspace.handlePatch(payload.store, payload.seq, payload.patch || []); await workspace!.handlePatch(payload.store, payload.seq, payload.patch || []);
} else if (payload.type === 'agent_stop') {
logger.info('Agent stop requested');
harness?.interrupt();
} else if (this.isDatafeedMessage(payload)) { } else if (this.isDatafeedMessage(payload)) {
// Historical data request - send to OHLC service // Historical data request - send to OHLC service
logger.info({ type: payload.type }, 'Routing to datafeed handler'); logger.info({ type: payload.type }, 'Routing to datafeed handler');
await this.handleDatafeedMessage(socket, payload, logger); await this.handleDatafeedMessage(socket, payload, logger, authContext);
} else { } else {
logger.warn({ type: payload.type }, 'Unknown message type received'); logger.warn({ type: payload.type }, 'Unknown message type received');
} }
@@ -322,7 +333,7 @@ export class WebSocketHandler {
} }
// Cleanup workspace // Cleanup workspace
await workspace.shutdown(); await workspace!.shutdown();
this.workspaces.delete(authContext.sessionId); this.workspaces.delete(authContext.sessionId);
// Cleanup harness // Cleanup harness
@@ -346,8 +357,10 @@ export class WebSocketHandler {
} catch (error) { } catch (error) {
logger.error({ error }, 'Failed to initialize session'); logger.error({ error }, 'Failed to initialize session');
socket.close(1011, 'Internal server error'); socket.close(1011, 'Internal server error');
await workspace.shutdown(); if (workspace) {
this.workspaces.delete(authContext.sessionId); await workspace.shutdown();
this.workspaces.delete(authContext.sessionId);
}
if (harness) { if (harness) {
await harness.cleanup(); await harness.cleanup();
} }
@@ -382,6 +395,7 @@ export class WebSocketHandler {
'get_bars', 'get_bars',
'subscribe_bars', 'subscribe_bars',
'unsubscribe_bars', 'unsubscribe_bars',
'evaluate_indicator',
]; ];
return datafeedTypes.includes(payload.type); return datafeedTypes.includes(payload.type);
} }
@@ -392,7 +406,8 @@ export class WebSocketHandler {
private async handleDatafeedMessage( private async handleDatafeedMessage(
socket: WebSocket, socket: WebSocket,
payload: any, payload: any,
logger: any logger: any,
authContext?: any
): Promise<void> { ): Promise<void> {
logger.info({ type: payload.type, payload }, 'handleDatafeedMessage called'); logger.info({ type: payload.type, payload }, 'handleDatafeedMessage called');
const ohlcService = this.config.ohlcService; const ohlcService = this.config.ohlcService;
@@ -526,6 +541,69 @@ export class WebSocketHandler {
); );
break; break;
case 'evaluate_indicator': {
// Direct MCP call — bypasses the agent/LLM for performance
const harness = this.harnesses.get(authContext.sessionId);
if (!harness) {
socket.send(JSON.stringify({
type: 'evaluate_indicator_result',
request_id: requestId,
error: 'Session not initialized',
}));
break;
}
try {
const mcpResult = await harness.callMcpTool('evaluate_indicator', {
symbol: payload.symbol,
from_time: payload.from_time,
to_time: payload.to_time,
period_seconds: payload.period_seconds,
pandas_ta_name: payload.pandas_ta_name,
parameters: payload.parameters ?? {},
}) as any;
// MCP returns { content: [{type: 'text', text: '...json...'}] }
// When the tool raises an exception, the MCP framework sets isError: true
// and puts the raw exception text in content[0].text (not JSON-wrapped).
const rawText = mcpResult?.content?.[0]?.text ?? mcpResult?.[0]?.text;
if (mcpResult?.isError || rawText == null) {
const errMsg = rawText ?? 'evaluate_indicator returned no content';
logger.error({ pandas_ta_name: payload.pandas_ta_name, rawText }, 'evaluate_indicator sandbox error');
socket.send(JSON.stringify({
type: 'evaluate_indicator_result',
request_id: requestId,
error: errMsg,
}));
break;
}
let data: any;
try {
data = JSON.parse(rawText);
} catch {
// Sandbox returned non-JSON (e.g. bare exception text)
logger.error({ pandas_ta_name: payload.pandas_ta_name, rawText }, 'evaluate_indicator returned non-JSON');
socket.send(JSON.stringify({
type: 'evaluate_indicator_result',
request_id: requestId,
error: rawText,
}));
break;
}
socket.send(JSON.stringify({
type: 'evaluate_indicator_result',
request_id: requestId,
...data,
}));
} catch (err: any) {
logger.error({ err: err?.message, pandas_ta_name: payload.pandas_ta_name }, 'evaluate_indicator handler error');
socket.send(JSON.stringify({
type: 'evaluate_indicator_result',
request_id: requestId,
error: err?.message ?? String(err),
}));
}
break;
}
default: default:
logger.warn({ type: payload.type }, 'Unknown datafeed message type'); logger.warn({ type: payload.type }, 'Unknown datafeed message type');
} }

View File

@@ -504,7 +504,11 @@ export class DuckDBClient {
} }
/** /**
* Find missing OHLC data ranges * Find missing OHLC data ranges by checking for absent timestamps.
*
* Any timestamp slot in [start_time, min(end_time, now)) that has no row in
* Iceberg is treated as missing and collected into contiguous ranges that the
* caller should request from the relay/ingestor.
*/ */
async findMissingOHLCRanges( async findMissingOHLCRanges(
ticker: string, ticker: string,
@@ -517,32 +521,51 @@ export class DuckDBClient {
try { try {
const data = await this.queryOHLC(ticker, period_seconds, start_time, end_time); const data = await this.queryOHLC(ticker, period_seconds, start_time, end_time);
if (data.length === 0) {
// All data is missing
return [[start_time, end_time]];
}
// Check if we have continuous data
// For now, simple check: if we have any data, assume complete
// TODO: Implement proper gap detection by checking for missing periods
const periodNanos = BigInt(period_seconds) * 1_000_000_000n; const periodNanos = BigInt(period_seconds) * 1_000_000_000n;
// end_time is exclusive, so expected count = (end - start) / period (no +1)
const expectedBars = Number((end_time - start_time) / periodNanos);
if (data.length < expectedBars * 0.95) { // Allow 5% tolerance // Cap at current time — future slots are not "missing", they don't exist yet.
this.logger.debug({ const nowNanos = BigInt(Date.now()) * 1_000_000n;
ticker, const effectiveEnd = end_time < nowNanos ? end_time : nowNanos;
expected: expectedBars,
actual: data.length, // Build a set of timestamps we already have (all rows are non-null now).
}, 'Incomplete OHLC data detected'); const present = new Set(data.map((row: any) => row.timestamp));
return [[start_time, end_time]]; // Request full range
// Collect every expected slot that is absent.
const missing: bigint[] = [];
for (let t = start_time; t < effectiveEnd; t += periodNanos) {
if (!present.has(t)) {
missing.push(t);
}
} }
// Data appears complete if (missing.length === 0) {
return []; return [];
}
// Coalesce adjacent missing slots into contiguous [rangeStart, rangeEnd) intervals.
const ranges: Array<[bigint, bigint]> = [];
let rangeStart = missing[0];
let prev = missing[0];
for (let i = 1; i < missing.length; i++) {
if (missing[i] !== prev + periodNanos) {
ranges.push([rangeStart, prev + periodNanos]);
rangeStart = missing[i];
}
prev = missing[i];
}
ranges.push([rangeStart, prev + periodNanos]);
this.logger.debug({
ticker,
period_seconds,
missingSlots: missing.length,
ranges: ranges.length,
}, 'OHLC gap detection complete');
return ranges;
} catch (error: any) { } catch (error: any) {
this.logger.error({ error: error.message }, 'Failed to find missing OHLC ranges'); this.logger.error({ error: error.message }, 'Failed to find missing OHLC ranges');
// Return full range on error (safe default) // Return full range on error (safe default — triggers a backfill)
return [[start_time, end_time]]; return [[start_time, end_time]];
} }
} }

View File

@@ -90,7 +90,7 @@ subagents/
```yaml ```yaml
tools: tools:
platform: ['symbol_lookup'] # Platform tools platform: ['symbol_lookup'] # Platform tools
mcp: ['category_*'] # MCP tool patterns mcp: ['python_*'] # MCP tool patterns
``` ```
**Example:** **Example:**

View File

@@ -12,10 +12,14 @@ import type { ModelMiddleware } from '../llm/middleware.js';
import type { WorkspaceManager } from '../workspace/workspace-manager.js'; import type { WorkspaceManager } from '../workspace/workspace-manager.js';
import type { ChannelAdapter, PathTriggerContext } from '../workspace/index.js'; import type { ChannelAdapter, PathTriggerContext } from '../workspace/index.js';
import type { ResearchSubagent } from './subagents/research/index.js'; import type { ResearchSubagent } from './subagents/research/index.js';
import type { IndicatorSubagent } from './subagents/indicator/index.js';
import type { WebExploreSubagent } from './subagents/web-explore/index.js';
import type { DynamicStructuredTool } from '@langchain/core/tools'; import type { DynamicStructuredTool } from '@langchain/core/tools';
import { getToolRegistry } from '../tools/tool-registry.js'; import { getToolRegistry } from '../tools/tool-registry.js';
import type { MCPToolInfo } from '../tools/mcp/mcp-tool-wrapper.js'; import type { MCPToolInfo } from '../tools/mcp/mcp-tool-wrapper.js';
import { createResearchAgentTool } from '../tools/platform/research-agent.tool.js'; import { createResearchAgentTool } from '../tools/platform/research-agent.tool.js';
import { createIndicatorAgentTool } from '../tools/platform/indicator-agent.tool.js';
import { createWebExploreAgentTool } from '../tools/platform/web-explore-agent.tool.js';
import { createUserContext } from './memory/session-context.js'; import { createUserContext } from './memory/session-context.js';
import { readFile } from 'fs/promises'; import { readFile } from 'fs/promises';
import { join, dirname } from 'path'; import { join, dirname } from 'path';
@@ -52,6 +56,8 @@ export interface AgentHarnessConfig extends HarnessSessionConfig {
conversationStore?: ConversationStore; conversationStore?: ConversationStore;
historyLimit: number; historyLimit: number;
researchSubagent?: ResearchSubagent; researchSubagent?: ResearchSubagent;
indicatorSubagent?: IndicatorSubagent;
webExploreSubagent?: WebExploreSubagent;
} }
/** /**
@@ -79,12 +85,17 @@ export class AgentHarness {
private availableMCPTools: MCPToolInfo[] = []; private availableMCPTools: MCPToolInfo[] = [];
private researchImageCapture: Array<{ data: string; mimeType: string }> = []; private researchImageCapture: Array<{ data: string; mimeType: string }> = [];
private conversationStore?: ConversationStore; private conversationStore?: ConversationStore;
private indicatorSubagent?: IndicatorSubagent;
private webExploreSubagent?: WebExploreSubagent;
private abortController: AbortController | null = null;
constructor(config: AgentHarnessConfig) { constructor(config: AgentHarnessConfig) {
this.config = config; this.config = config;
this.workspaceManager = config.workspaceManager; this.workspaceManager = config.workspaceManager;
this.channelAdapter = config.channelAdapter; this.channelAdapter = config.channelAdapter;
this.researchSubagent = config.researchSubagent; this.researchSubagent = config.researchSubagent;
this.indicatorSubagent = config.indicatorSubagent;
this.webExploreSubagent = config.webExploreSubagent;
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger); this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
this.modelRouter = new ModelRouter(this.modelFactory, config.logger); this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
@@ -117,6 +128,10 @@ export class AgentHarness {
this.channelAdapter = adapter; this.channelAdapter = adapter;
} }
interrupt(): void {
this.abortController?.abort();
}
/** /**
* Initialize harness and connect to user's MCP server * Initialize harness and connect to user's MCP server
*/ */
@@ -132,9 +147,15 @@ export class AgentHarness {
// Discover available MCP tools from user's server // Discover available MCP tools from user's server
await this.discoverMCPTools(); await this.discoverMCPTools();
// Initialize web explore subagent first — research and indicator subagents inject it as a tool
await this.initializeWebExploreSubagent();
// Initialize research subagent if not provided // Initialize research subagent if not provided
await this.initializeResearchSubagent(); await this.initializeResearchSubagent();
// Initialize indicator subagent if not provided
await this.initializeIndicatorSubagent();
this.config.logger.info('Agent harness initialized'); this.config.logger.info('Agent harness initialized');
} catch (error) { } catch (error) {
this.config.logger.error({ error }, 'Failed to initialize agent harness'); this.config.logger.error({ error }, 'Failed to initialize agent harness');
@@ -214,6 +235,24 @@ export class AgentHarness {
(img) => this.researchImageCapture.push(img) (img) => this.researchImageCapture.push(img)
); );
// Inject web_explore tool if the web-explore subagent is ready
if (this.webExploreSubagent) {
const webExploreContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
researchTools.push(createWebExploreAgentTool({
webExploreSubagent: this.webExploreSubagent,
context: webExploreContext,
logger: this.config.logger,
}));
}
// Path resolution: use the compiled output path // Path resolution: use the compiled output path
const researchSubagentPath = join(__dirname, 'subagents', 'research'); const researchSubagentPath = join(__dirname, 'subagents', 'research');
this.config.logger.debug({ researchSubagentPath }, 'Using research subagent path'); this.config.logger.debug({ researchSubagentPath }, 'Using research subagent path');
@@ -243,6 +282,143 @@ export class AgentHarness {
} }
} }
/**
* Initialize indicator subagent
*/
private async initializeIndicatorSubagent(): Promise<void> {
if (this.indicatorSubagent) {
this.config.logger.debug('Indicator subagent already provided');
return;
}
this.config.logger.debug('Creating indicator subagent for session');
try {
const { createIndicatorSubagent } = await import('./subagents/indicator/index.js');
const { model } = await this.modelRouter.route(
'indicator management',
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId
);
const toolRegistry = getToolRegistry();
const indicatorTools = await toolRegistry.getToolsForAgent(
'indicator',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager,
undefined, // no image callback
(storeName, newState) => {
// After a workspace_patch succeeds in the container, update the gateway's
// WorkspaceManager so it pushes a WebSocket patch to the web client.
this.workspaceManager?.setState(storeName, newState).catch((err) =>
this.config.logger.error({ err, storeName }, 'Failed to sync workspace after indicator mutation')
);
}
);
// Inject web_explore tool if the web-explore subagent is ready
if (this.webExploreSubagent) {
const webExploreContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
indicatorTools.push(createWebExploreAgentTool({
webExploreSubagent: this.webExploreSubagent,
context: webExploreContext,
logger: this.config.logger,
}));
}
const indicatorSubagentPath = join(__dirname, 'subagents', 'indicator');
this.config.logger.debug({ indicatorSubagentPath }, 'Using indicator subagent path');
this.indicatorSubagent = await createIndicatorSubagent(
model,
this.config.logger,
indicatorSubagentPath,
this.mcpClient,
indicatorTools
);
this.config.logger.info(
{
toolCount: indicatorTools.length,
toolNames: indicatorTools.map(t => t.name),
},
'Indicator subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create indicator subagent'
);
// Don't throw — indicator subagent is optional
}
}
/**
* Initialize web explore subagent
*/
private async initializeWebExploreSubagent(): Promise<void> {
if (this.webExploreSubagent) {
this.config.logger.debug('Web explore subagent already provided');
return;
}
this.config.logger.debug('Creating web explore subagent for session');
try {
const { createWebExploreSubagent } = await import('./subagents/web-explore/index.js');
const { model } = await this.modelRouter.route(
'web research and summarization',
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId
);
const toolRegistry = getToolRegistry();
const webExploreTools = await toolRegistry.getToolsForAgent(
'web-explore',
undefined, // no MCP client needed
undefined,
undefined
);
const webExploreSubagentPath = join(__dirname, 'subagents', 'web-explore');
this.config.logger.debug({ webExploreSubagentPath }, 'Using web explore subagent path');
this.webExploreSubagent = await createWebExploreSubagent(
model,
this.config.logger,
webExploreSubagentPath,
webExploreTools
);
this.config.logger.info(
{
toolCount: webExploreTools.length,
toolNames: webExploreTools.map(t => t.name),
},
'Web explore subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create web explore subagent'
);
// Don't throw — web explore subagent is optional
}
}
/** /**
* Execute model with tool calling loop * Execute model with tool calling loop
* Handles multi-turn tool calls until the model produces a final text response * Handles multi-turn tool calls until the model produces a final text response
@@ -251,7 +427,8 @@ export class AgentHarness {
model: any, model: any,
messages: BaseMessage[], messages: BaseMessage[],
tools: DynamicStructuredTool[], tools: DynamicStructuredTool[],
maxIterations: number = 2 maxIterations: number = 2,
signal?: AbortSignal
): Promise<string> { ): Promise<string> {
this.config.logger.info( this.config.logger.info(
{ toolCount: tools.length, maxIterations }, { toolCount: tools.length, maxIterations },
@@ -262,6 +439,7 @@ export class AgentHarness {
let iterations = 0; let iterations = 0;
while (iterations < maxIterations) { while (iterations < maxIterations) {
if (signal?.aborted) break;
iterations++; iterations++;
this.config.logger.info( this.config.logger.info(
{ {
@@ -275,7 +453,7 @@ export class AgentHarness {
this.config.logger.debug('Streaming model response...'); this.config.logger.debug('Streaming model response...');
let response: any = null; let response: any = null;
try { try {
const stream = await model.stream(messagesCopy); const stream = await model.stream(messagesCopy, { signal });
for await (const chunk of stream) { for await (const chunk of stream) {
if (typeof chunk.content === 'string' && chunk.content.length > 0) { if (typeof chunk.content === 'string' && chunk.content.length > 0) {
this.channelAdapter?.sendChunk(chunk.content); this.channelAdapter?.sendChunk(chunk.content);
@@ -415,6 +593,29 @@ export class AgentHarness {
return 'I apologize, but I encountered an issue processing your request. Please try rephrasing your question.'; return 'I apologize, but I encountered an issue processing your request. Please try rephrasing your question.';
} }
/**
* Call a tool on the user's MCP server directly (bypasses the agent/LLM).
* Used by channel handlers for direct data requests (e.g. evaluate_indicator).
*/
async callMcpTool(name: string, args: Record<string, unknown>): Promise<unknown> {
return this.mcpClient.callTool(name, args);
}
/**
* Expose MCP client so channel handlers can wire ContainerSync after harness init.
*/
getMcpClient(): MCPClientConnector {
return this.mcpClient;
}
/**
* Set workspace manager after construction (used when ContainerSync requires MCP to be connected first).
*/
setWorkspaceManager(workspace: WorkspaceManager): void {
this.workspaceManager = workspace;
this.registerWorkspaceTriggers();
}
/** /**
* Handle incoming message from user * Handle incoming message from user
*/ */
@@ -480,18 +681,19 @@ export class AgentHarness {
this.workspaceManager // Pass session workspace manager this.workspaceManager // Pass session workspace manager
); );
// Build shared subagent context
const subagentContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
// Add research subagent as a tool if available // Add research subagent as a tool if available
if (this.researchSubagent) { if (this.researchSubagent) {
const subagentContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
tools.push(createResearchAgentTool({ tools.push(createResearchAgentTool({
researchSubagent: this.researchSubagent, researchSubagent: this.researchSubagent,
context: subagentContext, context: subagentContext,
@@ -499,6 +701,24 @@ export class AgentHarness {
})); }));
} }
// Add indicator subagent as a tool if available
if (this.indicatorSubagent) {
tools.push(createIndicatorAgentTool({
indicatorSubagent: this.indicatorSubagent,
context: subagentContext,
logger: this.config.logger,
}));
}
// Add web explore subagent as a tool if available
if (this.webExploreSubagent) {
tools.push(createWebExploreAgentTool({
webExploreSubagent: this.webExploreSubagent,
context: subagentContext,
logger: this.config.logger,
}));
}
this.config.logger.info( this.config.logger.info(
{ {
toolCount: tools.length, toolCount: tools.length,
@@ -524,7 +744,9 @@ export class AgentHarness {
// 8. Call LLM with tool calling loop // 8. Call LLM with tool calling loop
this.config.logger.info('Invoking LLM with tool support'); this.config.logger.info('Invoking LLM with tool support');
const assistantMessage = await this.executeWithToolCalling(modelWithTools, processedMessages, tools, 10); this.abortController = new AbortController();
const assistantMessage = await this.executeWithToolCalling(modelWithTools, processedMessages, tools, 10, this.abortController.signal);
this.abortController = null;
this.config.logger.info( this.config.logger.info(
{ responseLength: assistantMessage.length }, { responseLength: assistantMessage.length },
@@ -587,13 +809,17 @@ export class AgentHarness {
private getToolLabel(toolName: string): string { private getToolLabel(toolName: string): string {
const labels: Record<string, string> = { const labels: Record<string, string> = {
research: 'Researching...', research: 'Researching...',
indicator: 'Adjusting indicators...',
get_chart_data: 'Fetching chart data...', get_chart_data: 'Fetching chart data...',
symbol_lookup: 'Searching symbol...', symbol_lookup: 'Searching symbol...',
category_list: 'Seeing what we have...', python_list: 'Seeing what we have...',
category_edit: 'Coding...', python_edit: 'Coding...',
category_write: 'Coding...', python_write: 'Coding...',
category_read: 'Inspecting...', python_read: 'Inspecting...',
execute_research: 'Running script...', execute_research: 'Running script...',
backtest_strategy: 'Running backtest...',
list_active_strategies: 'Checking active strategies...',
web_explore: 'Searching the web...',
}; };
return labels[toolName] ?? `Running ${toolName}...`; return labels[toolName] ?? `Running ${toolName}...`;
} }

View File

@@ -1,5 +1,5 @@
import { Client } from '@modelcontextprotocol/sdk/client/index.js'; import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js'; import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js';
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
@@ -12,11 +12,12 @@ export interface MCPClientConfig {
/** /**
* MCP client connector for user's container * MCP client connector for user's container
* Manages connection to user-specific MCP server via SSE transport * Manages connection to user-specific MCP server via Streamable HTTP transport
*/ */
export class MCPClientConnector { export class MCPClientConnector {
private client: Client | null = null; private client: Client | null = null;
private connected = false; private connected = false;
private reconnectPromise: Promise<void> | null = null;
private config: MCPClientConfig; private config: MCPClientConfig;
constructor(config: MCPClientConfig) { constructor(config: MCPClientConfig) {
@@ -24,17 +25,42 @@ export class MCPClientConnector {
} }
/** /**
* Connect to user's MCP server via SSE transport * Connect to user's MCP server via Streamable HTTP transport.
* Safe to call when already connecting (concurrent callers wait for the same attempt).
*/ */
async connect(): Promise<void> { async connect(): Promise<void> {
if (this.connected) { if (this.connected) {
return; return;
} }
// If a reconnect is already in progress, wait for it rather than racing
if (this.reconnectPromise) {
return this.reconnectPromise;
}
this.reconnectPromise = this._doConnect();
try {
await this.reconnectPromise;
} finally {
this.reconnectPromise = null;
}
}
private async _doConnect(): Promise<void> {
// Close stale client if this is a reconnect attempt
if (this.client) {
try {
await this.client.close();
} catch {
// Ignore errors closing a stale/broken client
}
this.client = null;
}
try { try {
this.config.logger.info( this.config.logger.info(
{ userId: this.config.userId, url: this.config.mcpServerUrl }, { userId: this.config.userId, url: this.config.mcpServerUrl },
'Connecting to user MCP server via SSE' 'Connecting to user MCP server'
); );
this.client = new Client( this.client = new Client(
@@ -49,15 +75,32 @@ export class MCPClientConnector {
} }
); );
// Create SSE transport for HTTP connection to user container // Streamable HTTP: single /mcp endpoint, session tracked via mcp-session-id header
const transport = new SSEClientTransport( const transport = new StreamableHTTPClientTransport(
new URL(`${this.config.mcpServerUrl}/sse`) new URL(`${this.config.mcpServerUrl}/mcp`)
); );
await this.client.connect(transport); await this.client.connect(transport);
// Hook client.onerror to detect transport failures (e.g. sandbox restart returning
// 404 "session not found"). When fired, mark disconnected so the next callTool /
// listTools call triggers a full reconnect + initialize handshake.
const connectedClient = this.client;
const origOnError = this.client.onerror;
this.client.onerror = (error) => {
origOnError?.(error);
// Only act on the currently-active client (ignore stale closures after reconnect)
if (this.client === connectedClient && this.connected) {
this.config.logger.warn(
{ error },
'MCP transport error — marking disconnected for lazy reconnect'
);
this.connected = false;
}
};
this.connected = true; this.connected = true;
this.config.logger.info('Connected to user MCP server via SSE'); this.config.logger.info('Connected to user MCP server');
} catch (error) { } catch (error) {
this.config.logger.error( this.config.logger.error(
{ error, userId: this.config.userId }, { error, userId: this.config.userId },
@@ -67,18 +110,31 @@ export class MCPClientConnector {
} }
} }
/**
* Ensure the client is connected, reconnecting if necessary.
* Used as a preamble for every public method so a sandbox restart is
* recovered transparently on the next tool call.
*/
private async ensureConnected(): Promise<void> {
if (!this.client || !this.connected) {
this.config.logger.info(
{ userId: this.config.userId },
'MCP not connected, attempting reconnect'
);
await this.connect();
}
}
/** /**
* Call a tool on the user's MCP server * Call a tool on the user's MCP server
*/ */
async callTool(name: string, args: Record<string, unknown>): Promise<unknown> { async callTool(name: string, args: Record<string, unknown>): Promise<unknown> {
if (!this.client || !this.connected) { await this.ensureConnected();
throw new Error('MCP client not connected');
}
try { try {
this.config.logger.debug({ tool: name, args }, 'Calling MCP tool'); this.config.logger.debug({ tool: name, args }, 'Calling MCP tool');
const result = await this.client.callTool({ name, arguments: args }); const result = await this.client!.callTool({ name, arguments: args });
return result; return result;
} catch (error) { } catch (error) {
this.config.logger.error({ error, tool: name }, 'MCP tool call failed'); this.config.logger.error({ error, tool: name }, 'MCP tool call failed');
@@ -91,13 +147,11 @@ export class MCPClientConnector {
* Returns all available tools from the MCP server * Returns all available tools from the MCP server
*/ */
async listTools(): Promise<Array<{ name: string; description?: string; inputSchema?: any }>> { async listTools(): Promise<Array<{ name: string; description?: string; inputSchema?: any }>> {
if (!this.client || !this.connected) { await this.ensureConnected();
throw new Error('MCP client not connected');
}
try { try {
this.config.logger.debug('Requesting tool list from MCP server'); this.config.logger.debug('Requesting tool list from MCP server');
const response = await this.client.listTools(); const response = await this.client!.listTools();
this.config.logger.debug( this.config.logger.debug(
{ {
@@ -146,12 +200,10 @@ export class MCPClientConnector {
* Returns all available resources from the MCP server * Returns all available resources from the MCP server
*/ */
async listResources(): Promise<Array<{ uri: string; name: string; description?: string; mimeType?: string }>> { async listResources(): Promise<Array<{ uri: string; name: string; description?: string; mimeType?: string }>> {
if (!this.client || !this.connected) { await this.ensureConnected();
throw new Error('MCP client not connected');
}
try { try {
const response = await this.client.listResources(); const response = await this.client!.listResources();
// Return all resources - agent-to-resource binding is handled by the tool registry // Return all resources - agent-to-resource binding is handled by the tool registry
const resources = response.resources.map((resource: any) => ({ const resources = response.resources.map((resource: any) => ({
@@ -177,14 +229,12 @@ export class MCPClientConnector {
* Read a resource from user's MCP server * Read a resource from user's MCP server
*/ */
async readResource(uri: string): Promise<{ uri: string; mimeType?: string; text?: string; blob?: string }> { async readResource(uri: string): Promise<{ uri: string; mimeType?: string; text?: string; blob?: string }> {
if (!this.client || !this.connected) { await this.ensureConnected();
throw new Error('MCP client not connected');
}
try { try {
this.config.logger.debug({ uri }, 'Reading MCP resource'); this.config.logger.debug({ uri }, 'Reading MCP resource');
const response = await this.client.readResource({ uri }); const response = await this.client!.readResource({ uri });
// Extract the first content item (MCP returns array of contents) // Extract the first content item (MCP returns array of contents)
const content = response.contents[0]; const content = response.contents[0];
@@ -206,15 +256,19 @@ export class MCPClientConnector {
* Disconnect from MCP server * Disconnect from MCP server
*/ */
async disconnect(): Promise<void> { async disconnect(): Promise<void> {
if (this.client && this.connected) { if (this.client) {
try { try {
await this.client.close(); await this.client.close();
this.connected = false; if (this.connected) {
this.config.logger.info('Disconnected from user MCP server'); this.config.logger.info('Disconnected from user MCP server');
}
} catch (error) { } catch (error) {
this.config.logger.error({ error }, 'Error disconnecting from MCP server'); this.config.logger.error({ error }, 'Error disconnecting from MCP server');
} }
} }
this.connected = false;
this.client = null;
this.reconnectPromise = null;
} }
isConnected(): boolean { isConnected(): boolean {

View File

@@ -3,6 +3,8 @@
You are a helpful AI assistant for Dexorder, an AI-first trading platform. You are a helpful AI assistant for Dexorder, an AI-first trading platform.
You help users research markets, develop indicators and strategies, and analyze trading data. You help users research markets, develop indicators and strategies, and analyze trading data.
Your text responses should be markdown, using emojiis, color, and formatting to create a visually appealing response.
**User License:** {{licenseType}} **User License:** {{licenseType}}
**Available Features:** **Available Features:**
@@ -10,21 +12,71 @@ You help users research markets, develop indicators and strategies, and analyze
--- ---
# Platform Capabilities
Dexorder trading platform provides OHLC data at a 1-minute resolution and supports strategies that read one or more OHLC feeds at a 1-minute resolution or coarser. It also offers a wide range of built-in indicators and allows users to create custom indicators for advanced analysis.
Dexorder does not support tick-by-tick trading or high-frequency strategies.
Dexorder does not support long-running computations like paramater optimizations or training machine learning models.
Dexorder does not support portfolio optimization or trading strategies that require a large number of symbols.
If the user asks for a capability not provided by Dexorder, decline and offer alternatives.
# Important Instructions # Important Instructions
## Investment Advice
**NEVER** recommend any specific ticker, trade, or strategy. You may suggest mechanical adjustments or improvements to strategies, but you must never recommend that the user adopt a specific trade or position.
## Task Delegation ## Task Delegation
- For ANY research questions, deep analysis, statistical analysis, charting requests, plotting, ML tasks, or market data queries that require computation, you MUST use the 'research' tool - For ANY research questions, deep analysis, statistical analysis, charting requests, or market data queries that require computation, you MUST use the 'research' tool
- The research tool creates and runs Python scripts that generate charts and perform analysis - For ANYTHING related to indicators on the chart — reading, adding, removing, modifying, or creating custom indicators — you MUST use the 'indicator' tool
- Use 'research' for anything involving: plotting, statistics, calculations, correlations, patterns, volume analysis, technical indicators, or any non-trivial data processing - For ANY backtesting request — running a strategy against historical data — you MUST use the 'backtest_strategy' tool directly; NEVER use the research tool for backtesting
- NEVER write Python code directly in your responses to the user - NEVER write Python code directly in your responses to the user
- NEVER show code to the user - delegate to the research tool instead - NEVER show code to the user delegate to the research or indicator tool instead
- NEVER attempt to do analysis yourself - let the research subagent handle it - NEVER attempt to do analysis yourself let the subagents handle it
## Available Tools ## Available Tools
You have access to the following tools:
### indicator
**Use this tool for all indicator-related requests.**
The indicator subagent manages the chart's indicators: it reads the current indicator set, adds or removes indicators, modifies parameters, and can create custom indicator scripts.
**ALWAYS use indicator for:**
- "What indicators do I have on the chart?" → read and describe current indicators
- "Show RSI" / "Add Bollinger Bands" → add indicators to chart
- "Change MACD fast period to 8" → modify indicator parameters
- "Remove all moving averages" → remove indicators
- "Create a custom volume-weighted RSI" → write custom indicator
- Any question about what an indicator means or how it's configured
- Recommending indicators for a given strategy
**Custom indicators vs. ad-hoc research scripts:**
When a user asks for a calculation (e.g. "volume-weighted RSI", "adaptive ATR", "sector relative strength"), prefer creating a **custom indicator** via this tool over writing a one-off pandas/Python script in the research tool. Custom indicators are better because:
1. **Reusable** — saved permanently and can be applied to any symbol at any time
2. **First-class UI** — appear in the chart's Indicator picker alongside built-in indicators
3. **Live chart display** — their values are plotted directly on the chart as the user browses
4. **Watchlist & trigger support** — can be used to filter symbols (watchlists) and fire alerts/triggers (coming soon)
Use the research tool for exploratory or one-off analysis. Use the indicator tool whenever the user wants to *track* or *reuse* a computed value.
**NEVER modify workspace indicators yourself** — always delegate to the indicator tool.
### web_explore
**Use this tool to search the web or academic databases.**
The web-explore subagent searches the web (or arXiv for academic topics), fetches relevant pages, and returns a markdown summary with cited sources.
**ALWAYS use web_explore for:**
- Questions about current events, news, or real-time information
- Documentation, tutorials, or how-to guides
- Academic papers, research findings, or scientific topics
- Any topic that requires up-to-date external sources
**NOT for market data or computation** — use the research tool for analysis, and get_chart_data for OHLC values.
### research ### research
**This is your PRIMARY tool for any analysis, computation, charting, or plotting tasks.** **This is your PRIMARY tool for data analysis, computation, and charting.**
Creates and runs Python research scripts via a specialized research subagent. Creates and runs Python research scripts via a specialized research subagent.
The subagent autonomously writes code, executes it, handles errors, and generates charts. The subagent autonomously writes code, executes it, handles errors, and generates charts.
@@ -32,7 +84,6 @@ The subagent autonomously writes code, executes it, handles errors, and generate
**ALWAYS use research for:** **ALWAYS use research for:**
- Any plotting, charting, or visualization requests - Any plotting, charting, or visualization requests
- Price action analysis and correlations - Price action analysis and correlations
- Technical indicators and overlays
- Statistical analysis of market data - Statistical analysis of market data
- Volume analysis and patterns - Volume analysis and patterns
- Machine learning or predictive modeling - Machine learning or predictive modeling
@@ -41,16 +92,11 @@ The subagent autonomously writes code, executes it, handles errors, and generate
- Custom calculations or transformations - Custom calculations or transformations
- Deep analysis requiring Python libraries (pandas, numpy, scipy, matplotlib, etc.) - Deep analysis requiring Python libraries (pandas, numpy, scipy, matplotlib, etc.)
**NOT for indicator management** — use the indicator tool for that.
**NEVER attempt to do analysis yourself in the chat.** **NEVER attempt to do analysis yourself in the chat.**
Let the research subagent write and execute the Python code. Let the research subagent write and execute the Python code.
**Examples of when to use research:**
- "Plot BTC with volume overlay" → use research
- "Calculate correlation between ETH and BTC" → use research
- "Show me RSI divergences" → use research
- "Analyze Monday price patterns" → use research
- "Does volume predict price movement?" → use research
Parameters: Parameters:
- instruction: Natural language description of the analysis to perform (be specific!) - instruction: Natural language description of the analysis to perform (be specific!)
- name: A unique name for the research script (e.g., "BTC Weekly Analysis") - name: A unique name for the research script (e.g., "BTC Weekly Analysis")
@@ -59,10 +105,37 @@ Example usage:
- User: "Does Friday price action correlate with Monday?" - User: "Does Friday price action correlate with Monday?"
- You: Call research tool with instruction="Analyze correlation between Friday and Monday price action during NY trading hours (9:30-4:00 ET)", name="Friday-Monday Correlation" - You: Call research tool with instruction="Analyze correlation between Friday and Monday price action during NY trading hours (9:30-4:00 ET)", name="Friday-Monday Correlation"
### category_list ### backtest_strategy
List existing research scripts (category="research"). **ALWAYS use this tool — and ONLY this tool — for any backtesting request.**
Runs a saved trading strategy against historical OHLC data using the Nautilus Trader backtesting engine.
Returns structured performance metrics and an equity curve. Any charts generated are automatically sent to the user.
**ALWAYS use backtest_strategy for:**
- "Backtest my RSI strategy over the last year"
- "How did this strategy perform on BTC?"
- "Run a backtest from January to June"
- Any request to test or evaluate a strategy on historical data
**NEVER use research for backtesting** — the research tool cannot run strategies through the backtesting engine.
After the tool returns, summarize the results clearly: total return, Sharpe ratio, max drawdown, win rate, and trade count. Present the equity curve description in plain language.
Parameters:
- strategy_name: Display name of the saved strategy (use python_list with category="strategy" to check existing strategies)
- feeds: Array of `{symbol, period_seconds}` feed objects (e.g. `[{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]`)
- from_time / to_time: Date strings ("2024-01-01", "90 days ago", "now") or Unix timestamps
- initial_capital: Starting balance in quote currency (default 10,000)
### list_active_strategies
Lists all currently active (live or paper) strategies and their status.
Use this when the user asks what strategies are running.
### python_list
List existing scripts in a category ("strategy", "indicator", or "research").
Use this before calling the research tool to check whether a relevant script already exists. Use this before calling the research tool to check whether a relevant script already exists.
If one does, pass its exact name to the research tool so the subagent updates it rather than creating a new one. If one does, pass its exact name to the research tool so the subagent updates it rather than creating a new one.
Also use before calling backtest_strategy to confirm the strategy name.
### symbol-lookup ### symbol-lookup
Look up trading symbols and get metadata. Look up trading symbols and get metadata.
@@ -102,3 +175,4 @@ You also have access to workspace persistence tools via MCP:
- **workspace_patch(store_name, patch)**: Apply JSON patch to a workspace store - **workspace_patch(store_name, patch)**: Apply JSON patch to a workspace store
These are useful for persisting user preferences, analysis results, and custom data across sessions. These are useful for persisting user preferences, analysis results, and custom data across sessions.
For the `indicators` store specifically, always use the indicator tool rather than calling workspace tools directly.

View File

@@ -44,12 +44,9 @@ export interface SubagentContext {
* *
* Structure: * Structure:
* subagents/ * subagents/
* code-reviewer/ * research/
* config.yaml * config.yaml
* system-prompt.md * system-prompt.md
* memory/
* review-guidelines.md
* common-patterns.md
* index.ts * index.ts
*/ */
export abstract class BaseSubagent { export abstract class BaseSubagent {

View File

@@ -1,26 +0,0 @@
# Code Reviewer Subagent Configuration
name: code-reviewer
description: Reviews trading strategy code for bugs, performance issues, and best practices
# Model configuration (optional override)
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 4096
# Memory files to load from memory/ directory
memoryFiles:
- review-guidelines.md
- common-patterns.md
- best-practices.md
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- static_analysis
- performance_review
- security_audit
- code_quality
- best_practices

View File

@@ -1,93 +0,0 @@
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
/**
* Code Reviewer Subagent
*
* Specialized agent for reviewing trading strategy code.
* Reviews for:
* - Logic errors and bugs
* - Performance issues
* - Security vulnerabilities
* - Trading best practices
* - Code quality
*
* Loads knowledge from multi-file memory:
* - review-guidelines.md: What to check for
* - common-patterns.md: Good and bad examples
* - best-practices.md: Industry standards
*/
export class CodeReviewerSubagent extends BaseSubagent {
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger, mcpClient?: any, tools?: any[]) {
super(config, model, logger, mcpClient, tools);
}
/**
* Review code and provide structured feedback
*/
async execute(context: SubagentContext, code: string): Promise<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
codeLength: code.length,
},
'Reviewing code'
);
const messages = this.buildMessages(context, `Review the following trading strategy code:\n\n\`\`\`typescript\n${code}\n\`\`\``);
const response = await this.model.invoke(messages);
return response.content as string;
}
/**
* Stream code review
*/
async *stream(context: SubagentContext, code: string): AsyncGenerator<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
codeLength: code.length,
},
'Streaming code review'
);
const messages = this.buildMessages(context, `Review the following trading strategy code:\n\n\`\`\`typescript\n${code}\n\`\`\``);
const stream = await this.model.stream(messages);
for await (const chunk of stream) {
yield chunk.content as string;
}
}
}
/**
* Factory function to create and initialize CodeReviewerSubagent
*/
export async function createCodeReviewerSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
mcpClient?: any,
tools?: any[]
): Promise<CodeReviewerSubagent> {
const { readFile } = await import('fs/promises');
const { join } = await import('path');
const yaml = await import('js-yaml');
// Load config
const configPath = join(basePath, 'config.yaml');
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as SubagentConfig;
// Create and initialize subagent
const subagent = new CodeReviewerSubagent(config, model, logger, mcpClient, tools);
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -1,227 +0,0 @@
# Trading Strategy Best Practices
## Code Organization
### Separation of Concerns
```typescript
// Good: Clear separation
class Strategy {
async analyze(data: MarketData): Promise<Signal> { }
}
class RiskManager {
validateSignal(signal: Signal): boolean { }
}
class ExecutionEngine {
async execute(signal: Signal): Promise<Order> { }
}
// Bad: Everything in one function
async function trade() {
// Analysis, risk, execution all mixed
}
```
### Configuration Management
```typescript
// Good: External configuration
interface StrategyConfig {
stopLossPercent: number;
takeProfitPercent: number;
maxPositionSize: number;
riskPerTrade: number;
}
const config = loadConfig('strategy.yaml');
// Bad: Hardcoded values scattered throughout
const stopLoss = price * 0.95; // What if you want to change this?
```
## Testing Considerations
### Testable Code
```typescript
// Good: Pure functions, easy to test
function calculateRSI(prices: number[], period: number = 14): number {
// Pure calculation, no side effects
return rsi;
}
// Bad: Hard to test
async function strategy() {
const data = await fetchLiveData(); // Can't control in tests
const signal = analyze(data);
await executeTrade(signal); // Side effects
}
```
### Mock-Friendly Design
```typescript
// Good: Dependency injection
class Strategy {
constructor(
private dataProvider: DataProvider,
private executor: OrderExecutor
) {}
async run() {
const data = await this.dataProvider.getData();
// ...
}
}
// In tests: inject mocks
const strategy = new Strategy(mockDataProvider, mockExecutor);
```
## Performance Optimization
### Avoid Recalculation
```typescript
// Good: Cache indicator results
class IndicatorCache {
private cache = new Map<string, { value: number, timestamp: number }>();
get(key: string, ttl: number, calculator: () => number): number {
const cached = this.cache.get(key);
if (cached && Date.now() - cached.timestamp < ttl) {
return cached.value;
}
const value = calculator();
this.cache.set(key, { value, timestamp: Date.now() });
return value;
}
}
// Bad: Recalculate every time
for (const ticker of tickers) {
const rsi = calculateRSI(await getData(ticker)); // Slow
}
```
### Batch Operations
```typescript
// Good: Batch API calls
const results = await Promise.all(
tickers.map(ticker => dataProvider.getOHLC(ticker))
);
// Bad: Sequential API calls
const results = [];
for (const ticker of tickers) {
results.push(await dataProvider.getOHLC(ticker)); // Slow
}
```
## Error Handling
### Graceful Degradation
```typescript
// Good: Fallback behavior
async function getMarketData(ticker: string): Promise<OHLC[]> {
try {
return await primarySource.fetch(ticker);
} catch (error) {
logger.warn('Primary source failed, trying backup');
try {
return await backupSource.fetch(ticker);
} catch (backupError) {
logger.error('All sources failed');
return getCachedData(ticker); // Last resort
}
}
}
// Bad: Let it crash
async function getMarketData(ticker: string) {
return await api.fetch(ticker); // Uncaught errors
}
```
### Detailed Logging
```typescript
// Good: Structured logging with context
logger.info({
action: 'order_placed',
ticker: 'BTC/USDT',
side: 'buy',
size: 0.1,
price: 50000,
orderId: 'abc123',
strategy: 'mean-reversion'
});
// Bad: String concatenation
console.log('Placed order'); // No context
```
## Documentation
### Self-Documenting Code
```typescript
// Good: Clear naming and JSDoc
/**
* Calculate position size using Kelly Criterion
* @param winRate Probability of winning (0-1)
* @param avgWin Average win amount
* @param avgLoss Average loss amount
* @param capital Total available capital
* @returns Optimal position size in base currency
*/
function calculateKellyPosition(
winRate: number,
avgWin: number,
avgLoss: number,
capital: number
): number {
const kellyPercent = (winRate * avgWin - (1 - winRate) * avgLoss) / avgWin;
return Math.max(0, Math.min(kellyPercent * capital, capital * 0.25)); // Cap at 25%
}
// Bad: Cryptic names
function calc(w: number, a: number, b: number, c: number) {
return (w * a - (1 - w) * b) / a * c;
}
```
## Security
### Input Validation
```typescript
// Good: Validate all external inputs
function validateTicker(ticker: string): boolean {
return /^[A-Z]+:[A-Z]+\/[A-Z]+$/.test(ticker);
}
function validatePeriod(period: string): boolean {
return ['1m', '5m', '15m', '1h', '4h', '1d', '1w'].includes(period);
}
// Bad: Trust user input
function getOHLC(ticker: string, period: string) {
return db.query(`SELECT * FROM ohlc WHERE ticker='${ticker}'`); // SQL injection!
}
```
### Rate Limiting
```typescript
// Good: Prevent API abuse
class RateLimiter {
private calls: number[] = [];
async throttle(maxCallsPerMinute: number): Promise<void> {
const now = Date.now();
this.calls = this.calls.filter(t => now - t < 60000);
if (this.calls.length >= maxCallsPerMinute) {
const wait = 60000 - (now - this.calls[0]);
await sleep(wait);
}
this.calls.push(now);
}
}
```

View File

@@ -1,124 +0,0 @@
# Common Trading Strategy Patterns
## Pattern: Trend Following
```typescript
// Good: Clear trend detection with multiple confirmations
function detectTrend(prices: number[], period: number = 20): 'bull' | 'bear' | 'neutral' {
const sma = calculateSMA(prices, period);
const currentPrice = prices[prices.length - 1];
const priceVsSMA = (currentPrice - sma) / sma;
// Use threshold to avoid noise
if (priceVsSMA > 0.02) return 'bull';
if (priceVsSMA < -0.02) return 'bear';
return 'neutral';
}
// Bad: Single indicator, no confirmation
function detectTrend(prices: number[]): string {
return prices[prices.length - 1] > prices[prices.length - 2] ? 'bull' : 'bear';
}
```
## Pattern: Mean Reversion
```typescript
// Good: Proper boundary checks and position sizing
async function checkMeanReversion(ticker: string): Promise<TradeSignal | null> {
const data = await getOHLC(ticker, 100);
const mean = calculateMean(data.close);
const stdDev = calculateStdDev(data.close);
const current = data.close[data.close.length - 1];
const zScore = (current - mean) / stdDev;
// Only trade at extreme deviations
if (zScore < -2) {
return {
side: 'buy',
size: calculatePositionSize(Math.abs(zScore)), // Scale with confidence
stopLoss: current * 0.95,
};
}
return null;
}
// Bad: No risk management, arbitrary thresholds
function checkMeanReversion(price: number, avg: number): boolean {
return price < avg; // Too simplistic
}
```
## Pattern: Breakout Detection
```typescript
// Good: Volume confirmation and false breakout protection
function detectBreakout(ohlc: OHLC[], resistance: number): boolean {
const current = ohlc[ohlc.length - 1];
const previous = ohlc[ohlc.length - 2];
// Price breaks resistance
const priceBreak = current.close > resistance && previous.close <= resistance;
// Volume confirmation (at least 1.5x average)
const avgVolume = ohlc.slice(-20, -1).reduce((sum, c) => sum + c.volume, 0) / 19;
const volumeConfirm = current.volume > avgVolume * 1.5;
// Wait for candle close to avoid false breaks
const candleClosed = true; // Check if candle is complete
return priceBreak && volumeConfirm && candleClosed;
}
// Bad: No confirmation, premature signal
function detectBreakout(price: number, resistance: number): boolean {
return price > resistance; // False positives
}
```
## Pattern: Risk Management
```typescript
// Good: Comprehensive risk checks
class PositionManager {
private readonly MAX_POSITION_PERCENT = 0.05; // 5% of portfolio
private readonly MAX_DAILY_LOSS = 0.02; // 2% daily drawdown limit
async openPosition(signal: TradeSignal, accountBalance: number): Promise<boolean> {
// Check daily loss limit
if (this.getDailyPnL() / accountBalance < -this.MAX_DAILY_LOSS) {
logger.warn('Daily loss limit reached');
return false;
}
// Position size check
const maxSize = accountBalance * this.MAX_POSITION_PERCENT;
const actualSize = Math.min(signal.size, maxSize);
// Risk/reward check
const risk = Math.abs(signal.price - signal.stopLoss);
const reward = Math.abs(signal.takeProfit - signal.price);
if (reward / risk < 2) {
logger.info('Risk/reward ratio too low');
return false;
}
return await this.executeOrder(signal, actualSize);
}
}
// Bad: No risk checks
async function openPosition(signal: any) {
return await exchange.buy(signal.ticker, signal.size); // Dangerous
}
```
## Anti-Patterns to Avoid
1. **Magic Numbers**: Use named constants
2. **Global State**: Pass state explicitly
3. **Synchronous Blocking**: Use async for I/O
4. **No Error Handling**: Always wrap in try/catch
5. **Ignoring Slippage**: Factor in execution costs

View File

@@ -1,67 +0,0 @@
# Code Review Guidelines
## Trading Strategy Specific Checks
### Position Sizing
- ✅ Check for dynamic position sizing based on account balance
- ✅ Verify max position size limits
- ❌ Flag hardcoded position sizes
- ❌ Flag missing position size validation
### Order Handling
- ✅ Verify order type is appropriate (market vs limit)
- ✅ Check for order timeout handling
- ❌ Flag missing order confirmation checks
- ❌ Flag potential duplicate orders
### Risk Management
- ✅ Verify stop-loss is always set
- ✅ Check take-profit levels are realistic
- ❌ Flag missing drawdown protection
- ❌ Flag strategies without maximum daily loss limits
### Data Handling
- ✅ Check for proper OHLC data validation
- ✅ Verify timestamp handling (timezone, microseconds)
- ❌ Flag missing null/undefined checks
- ❌ Flag potential look-ahead bias
### Performance
- ✅ Verify indicators are calculated efficiently
- ✅ Check for unnecessary re-calculations
- ❌ Flag O(n²) or worse algorithms in hot paths
- ❌ Flag large memory allocations in loops
## Severity Levels
### Critical (🔴)
- Will cause financial loss or system crash
- Security vulnerabilities
- Data integrity issues
- Must be fixed before deployment
### High (🟠)
- Significant bugs or edge cases
- Performance issues that affect execution
- Risk management gaps
- Should be fixed before deployment
### Medium (🟡)
- Code quality issues
- Minor performance improvements
- Best practice violations
- Fix when convenient
### Low (🟢)
- Style preferences
- Documentation improvements
- Nice-to-have refactorings
- Optional improvements
## Common Pitfalls
1. **Look-Ahead Bias**: Using future data in backtests
2. **Overfitting**: Too many parameters, not enough data
3. **Slippage Ignorance**: Not accounting for execution costs
4. **Survivorship Bias**: Testing only on assets that survived
5. **Data Snooping**: Testing multiple strategies, reporting only the best

View File

@@ -1,51 +0,0 @@
# Code Reviewer System Prompt
You are an expert code reviewer specializing in trading strategies and financial algorithms.
## Your Role
Review trading strategy code with a focus on:
- **Correctness**: Logic errors, edge cases, off-by-one errors
- **Performance**: Inefficient loops, unnecessary calculations
- **Security**: Input validation, overflow risks, race conditions
- **Trading Best Practices**: Position sizing, risk management, order handling
- **Code Quality**: Readability, maintainability, documentation
## Review Approach
1. **Read the entire code** before providing feedback
2. **Identify critical issues first** (bugs, security, data loss)
3. **Suggest improvements** with specific code examples
4. **Explain the "why"** behind each recommendation
5. **Be constructive** - focus on helping, not criticizing
## Output Format
Structure your review as:
```
## Summary
Brief overview of code quality (1-2 sentences)
## Critical Issues
- Issue 1: Description with line number
- Issue 2: Description with line number
## Improvements
- Suggestion 1: Description with example
- Suggestion 2: Description with example
## Best Practices
- Practice 1: Why it matters
- Practice 2: Why it matters
## Overall Assessment
Pass / Needs Revision / Reject
```
## Important Notes
- Be specific with line numbers and code references
- Provide actionable feedback
- Consider the trading context (not just general coding)
- Flag any risk management issues immediately

View File

@@ -6,11 +6,6 @@ export {
type SubagentContext, type SubagentContext,
} from './base-subagent.js'; } from './base-subagent.js';
export {
CodeReviewerSubagent,
createCodeReviewerSubagent,
} from './code-reviewer/index.js';
export { export {
ResearchSubagent, ResearchSubagent,
createResearchSubagent, createResearchSubagent,

View File

@@ -0,0 +1,30 @@
# Indicator Subagent Configuration
name: indicator
description: Manages TradingView indicators in the workspace and creates custom indicator scripts
# Model configuration
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 8192
# No memory files — all indicator knowledge is inline in the system prompt
memoryFiles: []
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- indicator_management
- workspace_manipulation
- custom_indicators
# Tools available to this subagent
tools:
platform: []
mcp:
- workspace_read # Read current indicators store
- workspace_patch # Add/update/remove indicators (no workspace_write — patch only)
- category_* # Write/edit/read/list custom indicator scripts
- evaluate_indicator # Evaluate any indicator against real OHLC data

View File

@@ -0,0 +1,111 @@
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { SystemMessage } from '@langchain/core/messages';
import { createReactAgent } from '@langchain/langgraph/prebuilt';
import type { FastifyBaseLogger } from 'fastify';
import type { MCPClientConnector } from '../../mcp-client.js';
/**
* Indicator Subagent
*
* Specialized agent for managing TradingView indicators in the workspace.
* Uses workspace_read/patch MCP tools to:
* - Read, add, modify, and remove indicators from the indicators store
* - Create custom indicator scripts via python_* tools
* - Validate indicators using the evaluate_indicator tool
*
* Simpler than ResearchSubagent — no image capture needed.
*/
export class IndicatorSubagent extends BaseSubagent {
constructor(
config: SubagentConfig,
model: BaseChatModel,
logger: FastifyBaseLogger,
mcpClient?: MCPClientConnector,
tools?: any[]
) {
super(config, model, logger, mcpClient, tools);
}
/**
* Execute indicator request using LangGraph's createReactAgent.
*/
async execute(context: SubagentContext, instruction: string): Promise<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
instruction: instruction.substring(0, 200),
toolCount: this.tools.length,
toolNames: this.tools.map(t => t.name),
},
'Indicator subagent starting'
);
if (!this.hasMCPClient()) {
throw new Error('MCP client not available for indicator subagent');
}
if (this.tools.length === 0) {
this.logger.warn('Indicator subagent has no tools — cannot read or patch workspace');
}
const initialMessages = this.buildMessages(context, instruction);
const systemMessage = initialMessages[0];
const humanMessage = initialMessages[initialMessages.length - 1];
const agent = createReactAgent({
llm: this.model,
tools: this.tools,
prompt: systemMessage as SystemMessage,
});
const result = await agent.invoke(
{ messages: [humanMessage] },
{ recursionLimit: 25 }
);
const allMessages: any[] = result.messages ?? [];
this.logger.info(
{ messageCount: allMessages.length },
'Indicator subagent graph completed'
);
const lastAI = [...allMessages].reverse().find(
(m: any) => m.constructor?.name === 'AIMessage' || m._getType?.() === 'ai'
);
const finalText = lastAI
? (typeof lastAI.content === 'string' ? lastAI.content : JSON.stringify(lastAI.content))
: 'Indicator update completed.';
this.logger.info({ textLength: finalText.length }, 'Indicator subagent finished');
return finalText;
}
}
/**
* Factory function to create and initialize IndicatorSubagent
*/
export async function createIndicatorSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
mcpClient?: MCPClientConnector,
tools?: any[]
): Promise<IndicatorSubagent> {
const { readFile } = await import('fs/promises');
const { join } = await import('path');
const yaml = await import('js-yaml');
const configPath = join(basePath, 'config.yaml');
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as SubagentConfig;
const subagent = new IndicatorSubagent(config, model, logger, mcpClient, tools);
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -0,0 +1,467 @@
# Indicator Subagent
You are a specialized assistant that manages technical indicators on the Dexorder TradingView chart. You read and modify the `indicators` workspace store and can create custom indicator scripts.
---
## Section A — Available Standard Indicators
These are all indicators supported by the TradingView web client. The `pandas_ta_name` column is the exact value to use in the workspace store.
### Overlap / Moving Averages (plotted on price pane)
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|------------------|--------------|----------------|-------------------------------|
| `sma` | Simple MA | `length=20` | Arithmetic mean of close over `length` periods. Lags price; crossovers used as trend signals. |
| `ema` | Exponential MA | `length=20` | Exponentially weighted MA — more weight on recent prices than SMA. Reacts faster. |
| `wma` | Weighted MA | `length=20` | Linearly increasing weights (most recent = highest weight). Between SMA and EMA in responsiveness. |
| `dema` | Double EMA | `length=20` | Two layers of EMA to reduce lag. More responsive than EMA, more noise at extremes. |
| `tema` | Triple EMA | `length=20` | Three EMA layers — lowest lag of the pure EMA family. Very sensitive to recent price. |
| `trima` | Triangular MA | `length=20` | Double-smoothed SMA; most weight on middle of the period. Very smooth, significant lag. |
| `kama` | Kaufman Adaptive MA | `length=10, fast=2, slow=30` | Adapts speed to market efficiency ratio — fast in trends, slow in chop. |
| `t3` | T3 MA | `length=5, a=0.7` | Tillson's smooth, low-lag MA using six EMAs. `a` controls smoothing vs lag trade-off. |
| `hma` | Hull MA | `length=20` | Very low-lag MA using weighted MAs. Designed to minimize lag while maintaining smoothness. |
| `alma` | Arnaud Legoux MA | `length=20, sigma=6, offset=0.85` | Gaussian-weighted MA; `offset` shifts weight toward recent (1.0) or past (0.0). |
| `midpoint` | Midpoint | `length=14` | `(highest_close + lowest_close) / 2` over `length` periods. Simple center of range. |
| `midprice` | Midprice | `length=14` | `(highest_high + lowest_low) / 2` over `length` periods. True price range midpoint. |
| `supertrend` | SuperTrend | `length=7, multiplier=3.0` | ATR-based trend band that flips above/below price. Direction signal; not a smooth line. |
| `ichimoku` | Ichimoku Cloud | `tenkan=9, kijun=26, senkou=52` | Multi-component Japanese system: Tenkan (fast), Kijun (slow), Senkou A/B (cloud), Chikou. |
| `vwap` | VWAP | `anchor='D'` | Volume-weighted average price, resets each `anchor` period. Benchmark for intraday value. Requires datetime index. |
| `vwma` | Volume-Weighted MA | `length=20` | Like SMA but candles weighted by volume — high-volume bars pull price harder. |
| `bbands` | Bollinger Bands | `length=20, std=2.0` | SMA ± N standard deviations. Returns upper, mid, lower bands. Squeeze = low vol; expansion = breakout. |
### Momentum (plotted in separate pane)
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|------------------|--------------|----------------|-------------------------------|
| `rsi` | RSI | `length=14` | 0100 oscillator. >70 overbought, <30 oversold. Divergences from price signal reversals. |
| `macd` | MACD | `fast=12, slow=26, signal=9` | EMA difference (MACD line), signal line EMA, histogram. Crossovers and zero-line crosses are signals. |
| `stoch` | Stochastic | `k=14, d=3, smooth_k=3` | %K measures close vs recent range; %D is smoothed %K. >80 overbought, <20 oversold. |
| `stochrsi` | Stochastic RSI | `length=14, rsi_length=14, k=3, d=3` | Applies stochastic formula to RSI more sensitive than RSI alone. |
| `cci` | CCI | `length=20` | Deviation of price from statistical mean. ±100 are typical overbought/sold thresholds. |
| `willr` | Williams %R | `length=14` | Inverse stochastic, 100 to 0. Above 20 overbought, below 80 oversold. |
| `mom` | Momentum | `length=10` | Raw price difference: `close - close[n]`. Zero-line crossovers indicate direction change. |
| `roc` | Rate of Change | `length=10` | Percentage price change over `length` bars. Similar to momentum but normalized. |
| `trix` | TRIX | `length=18, signal=9` | 1-period % change of triple-smoothed EMA. Zero-line crossovers; filters noise well. |
| `cmo` | Chande MO | `length=14` | Ratio of up/down momentum, 100 to 100. Similar to RSI but uses all price changes. |
| `adx` | ADX | `length=14` | Trend strength 0100 (direction-agnostic). >25 = trending, <20 = ranging. Includes +DI/DI. |
| `aroon` | Aroon | `length=25` | Measures recency of highest/lowest prices. Aroon Up >70 and Down <30 = uptrend. |
| `ao` | Awesome Oscillator | *(no params)* | 5- vs 34-period SMA of midprice. Histogram above zero = bullish; below = bearish. |
| `bop` | Balance of Power | *(no params)* | `(close open) / (high low)`. Measures intrabar buying vs selling pressure. |
| `uo` | Ultimate Oscillator | `fast=7, medium=14, slow=28` | Weighted combo of three buying-pressure ratios. Divergences at extremes are key signals. |
| `apo` | APO | `fast=12, slow=26` | Absolute Price Oscillator EMA difference without signal line. Positive = upward momentum. |
| `mfi` | Money Flow Index | `length=14` | RSI-like but uses price × volume. >80 overbought, <20 oversold. |
| `coppock` | Coppock Curve | `length=10, fast=11, slow=14` | Long-term momentum from rate-of-change. Designed for monthly bottoms; works on any TF. |
| `dpo` | DPO | `length=20` | Detrended Price Oscillator removes trend to expose cycles. Positive = above cycle average. |
| `fisher` | Fisher Transform | `length=9` | Converts price to Gaussian distribution. Sharp spikes at ±2 often signal reversals. |
| `rvgi` | RVGI | `length=14, swma_length=4` | Compares closeopen to highlow range. Signal line crossovers indicate momentum shifts. |
| `kst` | Know Sure Thing | `r1=10,r2=13,r3=15,r4=20,n1=10,n2=13,n3=15,n4=9,signal=9` | Four smoothed ROC values summed. Zero-line and signal-line crossovers are signals. |
### Volatility
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|------------------|--------------|----------------|-------------------------------|
| `atr` | ATR | `length=14` | Average True Range normalized measure of bar-to-bar volatility. Used for stop sizing. |
| `kc` | Keltner Channels | `length=20, scalar=2.0` | EMA ± N × ATR. Price outside channel = trend extension; inside = consolidation. |
| `donchian` | Donchian Channels | `lower_length=20, upper_length=20` | Highest high / lowest low over `length`. Breakout above/below = momentum signal. |
### Volume (plotted in separate pane)
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|------------------|--------------|----------------|-------------------------------|
| `obv` | OBV | *(no params)* | Cumulative volume: added on up days, subtracted on down days. Divergence from price = leading signal. |
| `ad` | A/D Line | *(no params)* | Accumulation/Distribution running total of money flow multiplier × volume. |
| `adosc` | Chaikin Oscillator | `fast=3, slow=10` | EMA difference of A/D line. Positive = accumulation; negative = distribution. |
| `cmf` | Chaikin MF | `length=20` | Sum of money flow volume / total volume. +0.25 strong buy pressure; 0.25 strong sell. |
| `eom` | Ease of Movement | `length=14` | Relates price change to volume. High value = price moved easily on low volume. |
| `efi` | Elder's Force Index | `length=13` | Price change × volume. Positive spikes = strong buying; negative = strong selling. |
| `kvo` | Klinger Oscillator | `fast=34, slow=55, signal=13` | EMA difference of a volume-force measure. Signal-line crossovers are trade signals. |
| `pvt` | PVT | *(no params)* | Cumulative volume × % price change. Similar to OBV but uses % change rather than direction. |
### Statistics / Price Transforms
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|------------------|--------------|----------------|-------------------------------|
| `stdev` | Std Deviation | `length=20` | Standard deviation of close. Rises in volatile periods; used for volatility regimes. |
| `linreg` | Lin Reg | `length=14` | Least-squares regression endpoint over `length` bars. Smooth trend line; not predictive. |
| `slope` | Lin Reg Slope | `length=14` | Gradient of the regression line. Positive = upward trend; magnitude = steepness. |
| `hl2` | HL2 | *(no params)* | `(high + low) / 2`. Simple midpoint of each bar. |
| `hlc3` | HLC3 | *(no params)* | `(high + low + close) / 3`. Typical price, used in many indicator calculations. |
| `ohlc4` | OHLC4 | *(no params)* | `(open + high + low + close) / 4`. Average price per bar. |
### Trend
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|------------------|--------------|----------------|-------------------------------|
| `psar` | Parabolic SAR | `af0=0.02, af=0.02, max_af=0.2` | Trailing stop dots that follow price and flip on reversal. `af` controls acceleration. |
| `vortex` | Vortex | `length=14` | VI+ and VI measure upward vs downward movement. VI+ > VI = uptrend and vice versa. |
| `chop` | Choppiness | `length=14` | 0100: high (>61.8) = choppy/sideways, low (<38.2) = strong trend. Does not give direction. |
---
## Section B — Workspace Format & Tools
### Indicators Store
The `indicators` workspace store has an `indicators` wrapper key containing a JSON object keyed by indicator ID:
```
{
"indicators": {
"ind_1234567890": {
"id": "ind_1234567890", // unique ID, use "ind_" + Date.now()
"pandas_ta_name": "rsi", // lowercase pandas-ta function name from Section A
"instance_name": "rsi_1234567890", // id without "ind_" prefix
"parameters": { "length": 14 }, // pandas-ta keyword args
"visible": true,
"pane": "chart", // "chart" = price pane; "indicator_pane_1" etc for separate
"symbol": "BTC/USDT.BINANCE", // optional, current chart symbol
"created_at": 1712345678, // optional unix timestamp
"modified_at": 1712345678 // optional unix timestamp
// These fields are managed by the web client — do NOT set them:
// "tv_study_id", "tv_indicator_name", "tv_inputs"
},
...
}
}
```
**Important**: All patch paths must start with `/indicators/`. The indicator objects live under the `indicators` key, not at the top level of the store.
**Pane values:**
- `"chart"` price pane overlays (MAs, BBands, SuperTrend, Ichimoku, VWAP, etc.)
- `"indicator_pane_1"`, `"indicator_pane_2"`, etc. separate sub-panes below the chart
**General rule**: Overlap/MA indicators go on `"chart"`. Momentum, Volume, Volatility (ATR, Donchian, Keltner), and Statistics indicators go on `"indicator_pane_N"`. When adding multiple separate-pane indicators, reuse the same pane number if they logically belong together, or use a new number.
### Reading Indicators
```
workspace_read("indicators")
```
Returns the full store object. Always read first before modifying so you know the current state. The indicator objects are under the `indicators` key: `result.data.indicators`.
When asked to list or describe current indicators, include:
- The display name and parameters
- A brief description of what each indicator measures and how to interpret it (from Section A)
- Which pane it's on
### Adding an Indicator
Generate a unique ID as `"ind_" + timestamp` (e.g. `"ind_1712345678123"`).
```
workspace_patch("indicators", [
{
"op": "add",
"path": "/indicators/ind_1712345678123",
"value": {
"id": "ind_1712345678123",
"pandas_ta_name": "rsi",
"instance_name": "rsi_1712345678123",
"parameters": { "length": 14 },
"visible": true,
"pane": "indicator_pane_1",
"created_at": 1712345678
}
}
])
```
### Modifying an Indicator
Read first to get the ID, then patch the specific field:
```
workspace_patch("indicators", [
{ "op": "replace", "path": "/indicators/ind_1712345678123/parameters/length", "value": 21 }
])
```
To modify multiple parameters at once:
```
workspace_patch("indicators", [
{ "op": "replace", "path": "/indicators/ind_1712345678123/parameters", "value": { "fast": 8, "slow": 21, "signal": 9 } }
])
```
### Removing an Indicator
```
workspace_patch("indicators", [
{ "op": "remove", "path": "/indicators/ind_1712345678123" }
])
```
### Visibility Toggle
```
workspace_patch("indicators", [
{ "op": "replace", "path": "/indicators/ind_1712345678123/visible", "value": false }
])
```
---
## Section C — Custom Indicators
Custom indicators are Python scripts in the `indicator` category. Use `python_write` / `python_edit` / `python_read` / `python_list` exactly as you would for research scripts, but with `category="indicator"`.
### Writing a Custom Indicator Script
A custom indicator must define a **top-level function whose name exactly matches the sanitized directory name** (the name you passed to `python_write`, after sanitization). It receives the OHLC columns it needs as positional arguments, matching `input_series` in the metadata. It must return a `pd.Series` (single output) or `pd.DataFrame` (multi-output, column names must match `output_columns`).
```python
# Example: volume-weighted RSI (function name = "vw_rsi", directory name = "vw_rsi")
import pandas as pd
import pandas_ta as ta
def vw_rsi(close: pd.Series, volume: pd.Series, length: int = 14) -> pd.Series:
"""Volume-weighted RSI: RSI scaled by relative volume."""
rsi = ta.rsi(close, length=length)
vol_weight = volume / volume.rolling(length).mean()
return (rsi * vol_weight).rolling(3).mean()
```
For multi-output (e.g. bands-style), return a `pd.DataFrame` with columns matching `output_columns`:
```python
import pandas as pd
import pandas_ta as ta
def vol_bands(close: pd.Series, volume: pd.Series, length: int = 20) -> pd.DataFrame:
"""Volatility bands based on volume-weighted std."""
mid = close.rolling(length).mean()
std = (close * (volume / volume.rolling(length).mean())).rolling(length).std()
return pd.DataFrame({"upper": mid + 2 * std, "mid": mid, "lower": mid - 2 * std})
```
After writing a custom indicator with `python_write`, add it to the workspace using `pandas_ta_name: "custom_<sanitized_name>"`.
### Metadata for Custom Indicators
When writing a custom indicator you **must** supply complete metadata so the web client can auto-construct the TradingView plotter. Pass these fields in the `metadata` argument to `python_write`:
| Field | Type | Required | Description |
|---|---|---|---|
| `parameters` | dict | yes | Parameter schema: `{param_name: {type, default, description?, min?, max?}}` |
| `input_series` | list[str] | yes | OHLCV columns passed to the function in order. Valid: `open`, `high`, `low`, `close`, `volume` |
| `output_columns` | list[dict] | yes | Per-series descriptors see table below |
| `pane` | str | yes | `"price"` (overlaid on candles) or `"separate"` (sub-pane) |
| `filled_areas` | list[dict] | no | Shaded fills between two series see below |
| `bands` | list[dict] | no | Horizontal reference lines (constant-value series recommended instead see note) |
#### `output_columns` format
Each entry describes one output series:
```python
{
"name": "value", # column name returned by the function (or "value" for Series)
"display_name": "My Ind", # optional label shown in TV legend
"description": "...", # optional
"plot": { # optional — omit for default (line, auto-color, width 2)
"style": 0, # LineStudyPlotStyle integer (see table below)
"color": "#2196F3", # CSS hex; omit for auto-assigned color
"linewidth": 2, # 14, default 2
"visible": True # default True
}
}
```
**`plot.style` values (LineStudyPlotStyle):**
| Value | Renders as |
|---|---|
| `0` | Line (default) |
| `1` | Histogram bars |
| `3` | Dots / Cross markers |
| `4` | Area (filled under line) |
| `5` | Columns (vertical bars) |
| `6` | Circles |
| `9` | Step line |
#### `filled_areas` format (optional)
Shaded fills between two series. The web client supports up to 4 fills, paired by index to output column pairs `(0,1)`, `(2,3)`, `(4,5)`, `(6,7)`. For a fill to work, the two series it shades must be at consecutive even/odd positions in `output_columns`.
```python
[
{
"id": "fill_upper_lower", # descriptive id (informational only)
"type": "plot_plot", # always "plot_plot" for fills between series
"series1": "upper", # output_column name of the first boundary
"series2": "lower", # output_column name of the second boundary
"color": "#2196F3", # CSS hex fill color (default: auto)
"opacity": 0.1 # 0.01.0 (default 0.1)
}
]
```
**Note on horizontal reference lines (`bands`):** TradingView's native band mechanism fixes the level value at registration time and cannot be changed per-instance. Instead, add a constant-value output column to your function and mark it with a dashed style:
```python
# In your indicator function:
result["ob"] = 70.0 # constant overbought level
result["os"] = 30.0 # constant oversold level
```
```python
# In output_columns metadata:
{"name": "ob", "display_name": "OB", "plot": {"style": 0, "color": "#ef5350", "linewidth": 1}},
{"name": "os", "display_name": "OS", "plot": {"style": 0, "color": "#26a69a", "linewidth": 1}},
```
#### Complete examples
**Single oscillator line (volume-weighted RSI):**
```python
python_write(
category="indicator",
name="vw_rsi",
description="RSI weighted by relative volume.",
code="""
import pandas as pd
import pandas_ta as ta
def vw_rsi(close, volume, length=14):
rsi = ta.rsi(close, length=length)
vol_weight = volume / volume.rolling(length).mean()
return (rsi * vol_weight).rolling(3).mean()
""",
metadata={
"parameters": {
"length": {"type": "int", "default": 14, "min": 2, "max": 200, "description": "RSI period"}
},
"input_series": ["close", "volume"],
"output_columns": [
{"name": "value", "display_name": "VW-RSI", "plot": {"style": 0}}
],
"pane": "separate"
}
)
```
**Bollinger Bands with fill (upper + mid + lower, shaded between upper and lower):**
```python
python_write(
category="indicator",
name="my_bbands",
description="Custom Bollinger Bands.",
code="""
import pandas as pd
import pandas_ta as ta
def my_bbands(close, length=20, std=2.0):
bb = ta.bbands(close, length=length, std=std)
return pd.DataFrame({
"upper": bb.iloc[:, 0],
"mid": bb.iloc[:, 1],
"lower": bb.iloc[:, 2],
})
""",
metadata={
"parameters": {
"length": {"type": "int", "default": 20, "min": 5, "max": 500},
"std": {"type": "float", "default": 2.0, "min": 0.5, "max": 5.0}
},
"input_series": ["close"],
"output_columns": [
{"name": "upper", "display_name": "Upper", "plot": {"style": 0, "color": "#2196F3"}},
{"name": "lower", "display_name": "Lower", "plot": {"style": 0, "color": "#2196F3"}},
{"name": "mid", "display_name": "Mid", "plot": {"style": 0, "color": "#FF9800"}}
],
"pane": "price",
"filled_areas": [
{"id": "fill", "type": "plot_plot", "series1": "upper", "series2": "lower",
"color": "#2196F3", "opacity": 0.08}
]
}
)
```
Note: `upper` and `lower` are at positions 0 and 1 in `output_columns`, which maps to fill slot `fill_0` (the only fill slot pairing positions 0 and 1).
**MACD-style (line + signal + histogram):**
```python
"output_columns": [
{"name": "macd", "display_name": "MACD", "plot": {"style": 0, "color": "#2196F3"}},
{"name": "signal", "display_name": "Signal", "plot": {"style": 0, "color": "#FF9800"}},
{"name": "hist", "display_name": "Hist", "plot": {"style": 1, "color": "#4CAF50"}}
],
"pane": "separate"
```
### Adding a Custom Indicator to the Workspace
After writing and validating, patch the workspace with **both** the standard fields and `custom_metadata` (the web client uses this to build the TradingView custom study):
```
workspace_patch("indicators", [
{
"op": "add",
"path": "/indicators/ind_1712345678123",
"value": {
"id": "ind_1712345678123",
"pandas_ta_name": "custom_vw_rsi",
"instance_name": "custom_vw_rsi_1712345678123",
"parameters": { "length": 14 },
"visible": true,
"pane": "indicator_pane_1",
"created_at": 1712345678,
"custom_metadata": {
"display_name": "Volume-Weighted RSI",
"parameters": {
"length": {"type": "int", "default": 14, "min": 2, "max": 200, "description": "RSI period"}
},
"input_series": ["close", "volume"],
"output_columns": [
{"name": "value", "display_name": "VW-RSI", "plot": {"style": 0}}
],
"pane": "separate"
}
}
}
])
```
The `custom_metadata` block must match what was stored in the indicator's `metadata.json`.
### Validating with evaluate_indicator
Use `evaluate_indicator` to test any indicator (standard or custom) before adding it to the workspace. This confirms it computes correctly on real data:
```
evaluate_indicator(
symbol="BTC/USDT.BINANCE",
from_time="30 days ago",
to_time="now",
period_seconds=3600,
pandas_ta_name="custom_vw_rsi",
parameters={"length": 14}
)
```
Returns a structured array of `{timestamp, value}` (or multiple value columns for multi-output indicators like MACD, BBands). Use the results to confirm the indicator is computing as expected before patching the workspace.
---
## Workflow
1. **Read first**: Always call `workspace_read("indicators")` before any modification so you know what's already on the chart.
2. **Check before creating custom indicators**: Before writing a new custom indicator with `python_write`, call `python_list(category="indicator")` to see what already exists. If an indicator with the same name (or a matching sanitized name) is already present, reuse or update it rather than creating a duplicate. Two indicator directories with different capitalizations (e.g. `TrendFlex` and `trendflex`) map to the same `pandas_ta_name` (`custom_trendflex`) and will conflict.
3. **List descriptively**: When asked what indicators are showing, include the brief description and interpretation from Section A for each not just the name and parameters.
4. **Validate custom indicators**: Use `evaluate_indicator` after writing a custom indicator script to confirm it runs without errors before adding to workspace.
5. **Patch, don't overwrite**: Always use `workspace_patch` never call `workspace_write` on the indicators store, as that would replace all indicators including ones the user added manually via the UI.
6. **Confirm changes**: After patching, briefly confirm what was added/changed/removed and what the indicator does (one sentence from Section A).
7. **Pane assignment**: When adding indicators, assign the correct pane type. When adding multiple momentum indicators, stack them in separate panes (`indicator_pane_1`, `indicator_pane_2`, etc.) unless the user asks otherwise.

View File

@@ -20,7 +20,7 @@ export interface ResearchResult {
* Research Subagent * Research Subagent
* *
* Specialized agent for creating and running Python research scripts. * Specialized agent for creating and running Python research scripts.
* Uses category_* MCP tools to: * Uses python_* MCP tools to:
* - Create/edit research scripts with DataAPI and ChartingAPI * - Create/edit research scripts with DataAPI and ChartingAPI
* - Execute scripts and capture matplotlib charts * - Execute scripts and capture matplotlib charts
* - Iterate on errors with autonomous coding loop * - Iterate on errors with autonomous coding loop

View File

@@ -14,22 +14,22 @@ Create Python scripts that:
You have direct access to these MCP tools: You have direct access to these MCP tools:
- **category_write**: Create a new research script - **python_write**: Create a new script (research, strategy, or indicator category)
- Required: category="research", name, description, code - Required: category, name, description, code
- Optional: metadata (with conda_packages list if needed) - Optional: metadata (category-specific fields — see below)
- Automatically executes the script after writing - For research: automatically executes the script after writing
- Returns validation results and execution output (text + images) - Returns validation results and execution output (text + images)
- **category_edit**: Update an existing research script - **python_edit**: Update an existing script
- Required: category="research", name - Required: category, name
- Optional: code, description, metadata - Optional: code, description, metadata
- Automatically re-executes if code is updated - For research: automatically re-executes if code is updated
- Returns validation results and execution output - Returns validation results and execution output
- **category_read**: Read an existing research script - **python_read**: Read an existing research script
- Returns: code, metadata - Returns: code, metadata
- **category_list**: List all research scripts - **python_list**: List all research scripts
- Returns: array of {name, description, metadata} - Returns: array of {name, description, metadata}
- **execute_research**: Manually run a research script - **execute_research**: Manually run a research script
@@ -186,15 +186,59 @@ Key defaults to keep in mind:
For multi-output indicator column extraction patterns and complete charting examples, fetch `pandas-ta-reference.md` from your knowledge base. For multi-output indicator column extraction patterns and complete charting examples, fetch `pandas-ta-reference.md` from your knowledge base.
## Strategy Metadata Format
When writing or editing a strategy (`category="strategy"`), always include a `metadata` object with:
- **`data_feeds`** — list of feed descriptors the strategy requires:
```json
[
{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600, "description": "Primary BTC/USDT hourly feed"},
{"symbol": "ETH/USDT.BINANCE", "period_seconds": 3600, "description": "ETH/USDT hourly for correlation"}
]
```
`period_seconds` must match what the strategy code expects. Use the same values when calling `backtest_strategy`.
- **`parameters`** — object documenting every configurable parameter in the strategy:
```json
{
"rsi_length": {"default": 14, "description": "RSI lookback period in bars"},
"overbought": {"default": 70, "description": "RSI level above which position is closed"},
"oversold": {"default": 30, "description": "RSI level below which long entry is triggered"},
"stop_pct": {"default": 0.02, "description": "Stop-loss as a fraction of entry price (e.g. 0.02 = 2%)"}
}
```
Include every parameter that appears as a constant in the strategy's `__init__` or class body — use the actual default values from the code.
Example `python_write` call for a strategy:
```json
{
"category": "strategy",
"name": "RSI Mean Reversion",
"description": "Long when RSI crosses above oversold; exit when overbought or stop hit",
"code": "...",
"metadata": {
"data_feeds": [
{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600, "description": "BTC/USDT hourly OHLCV + order flow"}
],
"parameters": {
"rsi_length": {"default": 14, "description": "RSI lookback period"},
"overbought": {"default": 70, "description": "Exit long above this RSI level"},
"oversold": {"default": 30, "description": "Enter long below this RSI level"}
}
}
}
```
## Coding Loop Pattern ## Coding Loop Pattern
When a user requests analysis: When a user requests analysis:
1. **Understand the request**: What data is needed? What analysis? What visualization? 1. **Understand the request**: What data is needed? What analysis? What visualization?
2. **Use the provided name**: The instruction will begin with `Research script name: "<name>"`. Always use that exact name when calling `category_write` or `category_edit`. Check first with `category_read` — if the script already exists, use `category_edit` to update it rather than creating a new one with `category_write`. 2. **Use the provided name**: The instruction will begin with `Research script name: "<name>"`. Always use that exact name when calling `python_write` or `python_edit`. Check first with `python_read` — if the script already exists, use `python_edit` to update it rather than creating a new one with `python_write`.
3. **Write the script**: Use `category_write` (new) or `category_edit` (existing) 3. **Write the script**: Use `python_write` (new) or `python_edit` (existing)
- Write clean, well-commented Python code - Write clean, well-commented Python code
- Include proper error handling - Include proper error handling
- Use appropriate ticker symbols, time ranges, and periods - Use appropriate ticker symbols, time ranges, and periods
@@ -208,7 +252,7 @@ When a user requests analysis:
5. **Iterate if needed**: If there are errors: 5. **Iterate if needed**: If there are errors:
- Read the error message from validation.output or execution text - Read the error message from validation.output or execution text
- Use `category_edit` to fix the script - Use `python_edit` to fix the script
- The script will auto-execute again - The script will auto-execute again
6. **Return results**: Once successful, summarize what was done 6. **Return results**: Once successful, summarize what was done
@@ -246,7 +290,7 @@ When a user requests analysis:
User: "Show me BTC price action for the last 7 days with volume" User: "Show me BTC price action for the last 7 days with volume"
You: You:
1. Call `category_write` with: 1. Call `python_write` with:
- name: "BTC 7-Day Price Action" - name: "BTC 7-Day Price Action"
- description: "BTC/USDT price and volume analysis for the last 7 days" - description: "BTC/USDT price and volume analysis for the last 7 days"
- code: (Python script that fetches data and creates chart) - code: (Python script that fetches data and creates chart)

View File

@@ -0,0 +1,30 @@
# Web Explore Subagent Configuration
name: web-explore
description: Searches the web and academic papers, fetches content, and returns a textual summary
# Model configuration
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 8192
# No memory files needed
memoryFiles: []
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- web_search
- page_fetch
- academic_search
- content_summarization
# Tools available to this subagent (all platform tools, no MCP needed)
tools:
platform:
- web_search
- fetch_page
- arxiv_search
mcp: []

View File

@@ -0,0 +1,92 @@
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { SystemMessage } from '@langchain/core/messages';
import { createReactAgent } from '@langchain/langgraph/prebuilt';
import type { FastifyBaseLogger } from 'fastify';
/**
* Web Explore Subagent
*
* Accepts a research instruction, searches the web (DuckDuckGo) or arXiv
* for academic queries, fetches relevant page/PDF content, and returns a
* markdown summary with cited sources.
*
* No MCP client needed — operates entirely through platform tools.
*/
export class WebExploreSubagent extends BaseSubagent {
constructor(
config: SubagentConfig,
model: BaseChatModel,
logger: FastifyBaseLogger,
tools?: any[]
) {
super(config, model, logger, undefined, tools);
}
async execute(context: SubagentContext, instruction: string): Promise<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
instruction: instruction.substring(0, 200),
toolCount: this.tools.length,
toolNames: this.tools.map(t => t.name),
},
'Web explore subagent starting'
);
const initialMessages = this.buildMessages(context, instruction);
const systemMessage = initialMessages[0];
const humanMessage = initialMessages[initialMessages.length - 1];
const agent = createReactAgent({
llm: this.model,
tools: this.tools,
prompt: systemMessage as SystemMessage,
});
const result = await agent.invoke(
{ messages: [humanMessage] },
{ recursionLimit: 15 }
);
const allMessages: any[] = result.messages ?? [];
this.logger.info({ messageCount: allMessages.length }, 'Web explore subagent graph completed');
const lastAI = [...allMessages].reverse().find(
(m: any) => m.constructor?.name === 'AIMessage' || m._getType?.() === 'ai'
);
const finalText = lastAI
? (typeof lastAI.content === 'string' ? lastAI.content : JSON.stringify(lastAI.content))
: 'No results found.';
this.logger.info({ textLength: finalText.length }, 'Web explore subagent finished');
return finalText;
}
}
/**
* Factory function to create and initialize WebExploreSubagent
*/
export async function createWebExploreSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
tools?: any[]
): Promise<WebExploreSubagent> {
const { readFile } = await import('fs/promises');
const { join } = await import('path');
const yaml = await import('js-yaml');
const configPath = join(basePath, 'config.yaml');
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as SubagentConfig;
const subagent = new WebExploreSubagent(config, model, logger, tools);
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -0,0 +1,33 @@
# Web Explore Agent
You are a research assistant that searches the web and academic databases to answer questions or gather information according to the given instructions.
## Tools
You have three tools:
- **`web_search`** — Search the web broadly (Tavily). Returns titles, URLs, and content summaries. Best for general information, news, documentation, proprietary/niche topics, trading indicators, software papers, and anything not likely to be on arXiv.
- **`arxiv_search`** — Search arXiv for academic preprints. Returns titles, authors, abstracts, and PDF links. Use this **only** for peer-reviewed or academic research (e.g. machine learning, statistics, finance theory). Most trading indicators, technical analysis tools, and proprietary methods are NOT on arXiv.
- **`fetch_page`** — Fetch the full content of a URL (web page or PDF). PDFs are automatically converted to text. Use this after searching to read the complete content of a promising result.
## Strategy
1. **Choose the right search tool first:**
- Default to `web_search` for most queries — it covers the broadest range of sources including trading indicators, technical analysis, software documentation, and niche topics
- Use `arxiv_search` only when the instruction is explicitly academic in nature (e.g. "find papers on", "peer-reviewed research on", "academic study of")
- If `arxiv_search` returns nothing clearly relevant after 12 queries → switch to `web_search` immediately
2. **Search, then fetch:** After getting results, call `fetch_page` on the 23 most promising URLs to get full content.
3. **Don't loop on the same query:** If a search returns results but nothing useful, change your approach — try different keywords or a different tool. Never repeat the same search query.
4. **Synthesize:** Write a clear, well-structured markdown summary that directly addresses the instruction. Cite sources with inline links.
## Output format
Return a markdown response with:
- A direct answer or summary addressing the instruction
- Key findings or takeaways
- Sources cited inline (e.g. `[Title](url)`)
Keep the response focused and concise — avoid padding or restating the question.

View File

@@ -9,11 +9,6 @@ export {
type WorkflowEdgeCondition, type WorkflowEdgeCondition,
} from './base-workflow.js'; } from './base-workflow.js';
export {
StrategyValidationWorkflow,
createStrategyValidationWorkflow,
} from './strategy-validation/graph.js';
export { export {
TradingRequestWorkflow, TradingRequestWorkflow,
createTradingRequestWorkflow, createTradingRequestWorkflow,

View File

@@ -1,19 +0,0 @@
# Strategy Validation Workflow Configuration
name: strategy-validation
description: Validates trading strategies with code review, backtest, and risk assessment
# Workflow settings
timeout: 300000 # 5 minutes
maxRetries: 3
requiresApproval: true
approvalNodes:
- human_approval
# Validation loop settings
maxValidationRetries: 3 # Max times to retry fixing errors
minBacktestScore: 0.5 # Minimum Sharpe ratio to pass
# Model override (optional)
model: claude-sonnet-4-6
temperature: 0.3

View File

@@ -1,138 +0,0 @@
import { StateGraph } from '@langchain/langgraph';
import { BaseWorkflow, type WorkflowConfig } from '../base-workflow.js';
import { StrategyValidationState, type StrategyValidationStateType } from './state.js';
import {
createCodeReviewNode,
createFixCodeNode,
createBacktestNode,
createRiskAssessmentNode,
createHumanApprovalNode,
createRecommendationNode,
} from './nodes.js';
import type { FastifyBaseLogger } from 'fastify';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { CodeReviewerSubagent } from '../../subagents/code-reviewer/index.js';
/**
* Strategy Validation Workflow
*
* Multi-step workflow with validation loop:
* 1. Code Review (using CodeReviewerSubagent)
* 2. If issues found → Fix Code → Loop back to Code Review
* 3. Backtest (using user's MCP server)
* 4. If backtest fails → Fix Code → Loop back to Code Review
* 5. Risk Assessment
* 6. Human Approval (pause for user input)
* 7. Final Recommendation
*
* Features:
* - Validation loop with max retries
* - Human-in-the-loop approval gate
* - Multi-file memory from CodeReviewerSubagent
* - Comprehensive state tracking
*/
export class StrategyValidationWorkflow extends BaseWorkflow<StrategyValidationStateType> {
constructor(
config: WorkflowConfig,
private model: BaseChatModel,
private codeReviewer: CodeReviewerSubagent,
private mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
logger: FastifyBaseLogger
) {
super(config, logger);
}
buildGraph(): any {
const graph = new StateGraph(StrategyValidationState);
// Create nodes
const codeReviewNode = createCodeReviewNode(this.codeReviewer, this.logger);
const fixCodeNode = createFixCodeNode(this.model, this.logger);
const backtestNode = createBacktestNode(this.mcpBacktestFn, this.logger);
const riskAssessmentNode = createRiskAssessmentNode(this.model, this.logger);
const humanApprovalNode = createHumanApprovalNode(this.logger);
const recommendationNode = createRecommendationNode(this.model, this.logger);
// Add nodes to graph
graph
.addNode('code_review', codeReviewNode)
.addNode('fix_code', fixCodeNode)
.addNode('backtest', backtestNode)
.addNode('risk_assessment', riskAssessmentNode)
.addNode('human_approval', humanApprovalNode)
.addNode('recommendation', recommendationNode);
// Define edges
(graph as any).addEdge('__start__', 'code_review');
// Conditional: After code review, fix if needed or proceed to backtest
(graph as any).addConditionalEdges('code_review', (state: any) => {
if (state.needsFixing && state.validationRetryCount < 3) {
return 'fix_code';
}
if (state.needsFixing && state.validationRetryCount >= 3) {
return 'recommendation'; // Give up, generate rejection
}
return 'backtest';
});
// After fixing code, loop back to code review
(graph as any).addEdge('fix_code', 'code_review');
// Conditional: After backtest, fix if failed or proceed to risk assessment
(graph as any).addConditionalEdges('backtest', (state: any) => {
if (!state.backtestPassed && state.validationRetryCount < 3) {
return 'fix_code';
}
if (!state.backtestPassed && state.validationRetryCount >= 3) {
return 'recommendation'; // Give up
}
return 'risk_assessment';
});
// After risk assessment, go to human approval
(graph as any).addEdge('risk_assessment', 'human_approval');
// Conditional: After human approval, proceed to recommendation or reject
(graph as any).addConditionalEdges('human_approval', (state: any) => {
return state.humanApproved ? 'recommendation' : '__end__';
});
// Final recommendation is terminal
(graph as any).addEdge('recommendation', '__end__');
return graph;
}
}
/**
* Factory function to create and compile workflow
*/
export async function createStrategyValidationWorkflow(
model: BaseChatModel,
codeReviewer: CodeReviewerSubagent,
mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
logger: FastifyBaseLogger,
configPath: string
): Promise<StrategyValidationWorkflow> {
const { readFile } = await import('fs/promises');
const yaml = await import('js-yaml');
// Load config
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as WorkflowConfig;
// Create workflow
const workflow = new StrategyValidationWorkflow(
config,
model,
codeReviewer,
mcpBacktestFn,
logger
);
// Compile graph
workflow.compile();
return workflow;
}

View File

@@ -1,233 +0,0 @@
import type { StrategyValidationStateType } from './state.js';
import type { FastifyBaseLogger } from 'fastify';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { CodeReviewerSubagent } from '../../subagents/code-reviewer/index.js';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
/**
* Node: Code Review
* Reviews strategy code using CodeReviewerSubagent
*/
export function createCodeReviewNode(
codeReviewer: CodeReviewerSubagent,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Code review');
const review = await codeReviewer.execute(
{ userContext: state.userContext },
state.strategyCode
);
// Simple issue detection (in production, parse structured output)
const hasIssues = review.toLowerCase().includes('critical') ||
review.toLowerCase().includes('reject');
return {
codeReview: review,
codeIssues: hasIssues ? ['Issues detected in code review'] : [],
needsFixing: hasIssues,
};
};
}
/**
* Node: Fix Code Issues
* Uses LLM to fix issues identified in code review
*/
export function createFixCodeNode(
model: BaseChatModel,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Fixing code issues');
const systemPrompt = `You are a trading strategy developer.
Fix the issues identified in the code review while maintaining the strategy's logic.
Return only the corrected code without explanation.`;
const userPrompt = `Original code:
\`\`\`typescript
${state.strategyCode}
\`\`\`
Code review feedback:
${state.codeReview}
Provide the corrected code:`;
const response = await model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
const fixedCode = (response.content as string)
.replace(/```typescript\n?/g, '')
.replace(/```\n?/g, '')
.trim();
return {
strategyCode: fixedCode,
validationRetryCount: state.validationRetryCount + 1,
};
};
}
/**
* Node: Backtest Strategy
* Runs backtest using user's MCP server
*/
export function createBacktestNode(
mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Running backtest');
try {
const results = await mcpBacktestFn(
state.strategyCode,
state.ticker,
state.timeframe
);
// Check if backtest passed (simplified)
const sharpeRatio = (results.sharpeRatio as number) || 0;
const passed = sharpeRatio > 0.5;
return {
backtestResults: results,
backtestPassed: passed,
needsFixing: !passed,
};
} catch (error) {
logger.error({ error }, 'Backtest failed');
return {
backtestResults: { error: (error as Error).message },
backtestPassed: false,
needsFixing: true,
};
}
};
}
/**
* Node: Risk Assessment
* Analyzes backtest results for risk
*/
export function createRiskAssessmentNode(
model: BaseChatModel,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Risk assessment');
const systemPrompt = `You are a risk management expert.
Analyze the strategy and backtest results to assess risk level.
Provide: risk level (low/medium/high) and detailed assessment.`;
const userPrompt = `Strategy code:
\`\`\`typescript
${state.strategyCode}
\`\`\`
Backtest results:
${JSON.stringify(state.backtestResults, null, 2)}
Provide risk assessment in format:
RISK_LEVEL: [low/medium/high]
ASSESSMENT: [detailed explanation]`;
const response = await model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
const assessment = response.content as string;
// Parse risk level (simplified)
let riskLevel: 'low' | 'medium' | 'high' = 'medium';
if (assessment.includes('RISK_LEVEL: low')) riskLevel = 'low';
if (assessment.includes('RISK_LEVEL: high')) riskLevel = 'high';
return {
riskAssessment: assessment,
riskLevel,
};
};
}
/**
* Node: Human Approval
* Pauses workflow for human review
*/
export function createHumanApprovalNode(logger: FastifyBaseLogger) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Awaiting human approval');
// In real implementation, this would:
// 1. Send approval request to user's channel
// 2. Store workflow state with interrupt
// 3. Wait for user response
// 4. Resume with approval decision
// For now, auto-approve if risk is low/medium and backtest passed
const autoApprove = state.backtestPassed &&
(state.riskLevel === 'low' || state.riskLevel === 'medium');
return {
humanApproved: autoApprove,
approvalComment: autoApprove ? 'Auto-approved: passed validation' : 'Needs manual review',
};
};
}
/**
* Node: Final Recommendation
* Generates final recommendation based on all steps
*/
export function createRecommendationNode(
model: BaseChatModel,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Generating recommendation');
const systemPrompt = `You are the final decision maker for strategy deployment.
Based on all validation steps, provide a clear recommendation: approve, reject, or revise.`;
const userPrompt = `Strategy validation summary:
Code Review: ${state.codeIssues.length === 0 ? 'Passed' : 'Issues found'}
Backtest: ${state.backtestPassed ? 'Passed' : 'Failed'}
Risk Level: ${state.riskLevel}
Human Approved: ${state.humanApproved}
Backtest Results:
${JSON.stringify(state.backtestResults, null, 2)}
Risk Assessment:
${state.riskAssessment}
Provide final recommendation (approve/reject/revise) and reasoning:`;
const response = await model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
const recommendation = response.content as string;
// Parse recommendation (simplified)
let decision: 'approve' | 'reject' | 'revise' = 'revise';
if (recommendation.toLowerCase().includes('approve')) decision = 'approve';
if (recommendation.toLowerCase().includes('reject')) decision = 'reject';
return {
recommendation: decision,
recommendationReason: recommendation,
output: recommendation,
};
};
}

View File

@@ -1,78 +0,0 @@
import { Annotation } from '@langchain/langgraph';
import { BaseWorkflowState } from '../base-workflow.js';
/**
* Strategy validation workflow state
*
* Extends base workflow state with strategy-specific fields
*/
export const StrategyValidationState = Annotation.Root({
...BaseWorkflowState.spec,
// Input
strategyCode: Annotation<string>(),
ticker: Annotation<string>(),
timeframe: Annotation<string>(),
// Code review step
codeReview: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
codeIssues: Annotation<string[]>({
value: (left, right) => right ?? left,
default: () => [],
}),
// Backtest step
backtestResults: Annotation<Record<string, unknown> | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
backtestPassed: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
// Risk assessment step
riskAssessment: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
riskLevel: Annotation<'low' | 'medium' | 'high' | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Human approval step
humanApproved: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
approvalComment: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Validation loop control
validationRetryCount: Annotation<number>({
value: (left, right) => right ?? left,
default: () => 0,
}),
needsFixing: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
// Final output
recommendation: Annotation<'approve' | 'reject' | 'revise' | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
recommendationReason: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
});
export type StrategyValidationStateType = typeof StrategyValidationState.State;

View File

@@ -1,5 +1,5 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatAnthropic } from '@langchain/anthropic'; import { ChatOpenAI } from '@langchain/openai';
import type { FastifyBaseLogger } from 'fastify'; import type { FastifyBaseLogger } from 'fastify';
import { type ModelMiddleware, NoopMiddleware, AnthropicCachingMiddleware } from './middleware.js'; import { type ModelMiddleware, NoopMiddleware, AnthropicCachingMiddleware } from './middleware.js';
@@ -10,7 +10,7 @@ export { NoopMiddleware, AnthropicCachingMiddleware };
* Supported LLM providers * Supported LLM providers
*/ */
export enum LLMProvider { export enum LLMProvider {
ANTHROPIC = 'anthropic', DEEP_INFRA = 'deepinfra',
} }
/** /**
@@ -47,11 +47,13 @@ export interface LicenseModelsConfig {
* Provider configuration with API keys * Provider configuration with API keys
*/ */
export interface ProviderConfig { export interface ProviderConfig {
anthropicApiKey?: string; deepinfraApiKey?: string;
defaultModel?: ModelConfig; defaultModel?: ModelConfig;
licenseModels?: LicenseModelsConfig; licenseModels?: LicenseModelsConfig;
} }
const DEEP_INFRA_BASE_URL = 'https://api.deepinfra.com/v1/openai';
/** /**
* LLM Provider factory * LLM Provider factory
* Creates model instances with unified interface across providers * Creates model instances with unified interface across providers
@@ -75,8 +77,8 @@ export class LLMProviderFactory {
); );
switch (modelConfig.provider) { switch (modelConfig.provider) {
case LLMProvider.ANTHROPIC: case LLMProvider.DEEP_INFRA:
return this.createAnthropicModel(modelConfig); return this.createDeepInfraModel(modelConfig);
default: default:
throw new Error(`Unsupported provider: ${modelConfig.provider}`); throw new Error(`Unsupported provider: ${modelConfig.provider}`);
@@ -84,22 +86,24 @@ export class LLMProviderFactory {
} }
/** /**
* Create Anthropic Claude model * Create Deep Infra model via OpenAI-compatible API
*/ */
private createAnthropicModel(config: ModelConfig): { model: ChatAnthropic; middleware: AnthropicCachingMiddleware } { private createDeepInfraModel(config: ModelConfig): { model: ChatOpenAI; middleware: NoopMiddleware } {
if (!this.config.anthropicApiKey) { if (!this.config.deepinfraApiKey) {
throw new Error('Anthropic API key not configured'); throw new Error('Deep Infra API key not configured');
} }
const model = new ChatAnthropic({ const model = new ChatOpenAI({
model: config.model, model: config.model,
temperature: config.temperature ?? 0.7, temperature: config.temperature ?? 0.7,
maxTokens: config.maxTokens ?? 4096, maxTokens: config.maxTokens ?? 4096,
anthropicApiKey: this.config.anthropicApiKey, apiKey: this.config.deepinfraApiKey,
clientOptions: { defaultHeaders: { 'anthropic-beta': 'prompt-caching-2024-07-31' } }, configuration: {
baseURL: DEEP_INFRA_BASE_URL,
},
}); });
return { model, middleware: new AnthropicCachingMiddleware() }; return { model, middleware: new NoopMiddleware() };
} }
/** /**
@@ -110,13 +114,13 @@ export class LLMProviderFactory {
return this.config.defaultModel; return this.config.defaultModel;
} }
if (!this.config.anthropicApiKey) { if (!this.config.deepinfraApiKey) {
throw new Error('Anthropic API key not configured'); throw new Error('Deep Infra API key not configured');
} }
return { return {
provider: LLMProvider.ANTHROPIC, provider: LLMProvider.DEEP_INFRA,
model: 'claude-sonnet-4-6', model: 'zai-org/GLM-5',
}; };
} }
@@ -132,16 +136,12 @@ export class LLMProviderFactory {
* Predefined model configurations * Predefined model configurations
*/ */
export const MODELS = { export const MODELS = {
CLAUDE_SONNET: { GLM_5: {
provider: LLMProvider.ANTHROPIC, provider: LLMProvider.DEEP_INFRA,
model: 'claude-sonnet-4-6', model: 'zai-org/GLM-5',
}, },
CLAUDE_HAIKU: { QWEN_235B: {
provider: LLMProvider.ANTHROPIC, provider: LLMProvider.DEEP_INFRA,
model: 'claude-haiku-4-5-20251001', model: 'Qwen/Qwen3-235B-A22B-Instruct-2507',
},
CLAUDE_OPUS: {
provider: LLMProvider.ANTHROPIC,
model: 'claude-opus-4-6',
}, },
} as const satisfies Record<string, ModelConfig>; } as const satisfies Record<string, ModelConfig>;

View File

@@ -113,17 +113,17 @@ export class ModelRouter {
// Fallback to hardcoded defaults // Fallback to hardcoded defaults
if (license.licenseType === 'enterprise') { if (license.licenseType === 'enterprise') {
return isComplex return isComplex
? { provider: LLMProvider.ANTHROPIC, model: 'claude-opus-4-6' } ? { provider: LLMProvider.DEEP_INFRA, model: 'Qwen/Qwen3-235B-A22B-Instruct-2507' }
: { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' }; : { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
} }
if (license.licenseType === 'pro') { if (license.licenseType === 'pro') {
return isComplex return isComplex
? { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' } ? { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' }
: { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' }; : { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
} }
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' }; return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
} }
/** /**
@@ -141,13 +141,13 @@ export class ModelRouter {
// Fallback to hardcoded defaults // Fallback to hardcoded defaults
switch (license.licenseType) { switch (license.licenseType) {
case 'enterprise': case 'enterprise':
return { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' }; return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
case 'pro': case 'pro':
return { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' }; return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
case 'free': case 'free':
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' }; return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
default: default:
return this.defaultModel; return this.defaultModel;
@@ -166,8 +166,8 @@ export class ModelRouter {
} }
} }
// Fallback: use Haiku for cost efficiency // Fallback: use GLM-5
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' }; return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
} }
/** /**
@@ -195,12 +195,12 @@ export class ModelRouter {
// Fallback to hardcoded defaults // Fallback to hardcoded defaults
if (license.licenseType === 'free') { if (license.licenseType === 'free') {
const allowedModels = ['claude-haiku-4-5-20251001']; const allowedModels = ['zai-org/GLM-5'];
return allowedModels.includes(model.model); return allowedModels.includes(model.model);
} }
if (license.licenseType === 'pro') { if (license.licenseType === 'pro') {
const blockedModels = ['claude-opus-4-6']; const blockedModels = ['Qwen/Qwen3-235B-A22B-Instruct-2507'];
return !blockedModels.includes(model.model); return !blockedModels.includes(model.model);
} }

View File

@@ -90,31 +90,28 @@ function loadConfig() {
// LLM provider API keys and model configuration // LLM provider API keys and model configuration
providerConfig: { providerConfig: {
anthropicApiKey: secretsData.llm_providers?.anthropic_api_key || process.env.ANTHROPIC_API_KEY, deepinfraApiKey: secretsData.llm_providers?.deepinfra_api_key || process.env.DEEPINFRA_API_KEY,
openaiApiKey: secretsData.llm_providers?.openai_api_key || process.env.OPENAI_API_KEY,
googleApiKey: secretsData.llm_providers?.google_api_key || process.env.GOOGLE_API_KEY,
openrouterApiKey: secretsData.llm_providers?.openrouter_api_key || process.env.OPENROUTER_API_KEY,
defaultModel: { defaultModel: {
provider: configData.defaults?.model_provider || 'anthropic', provider: configData.defaults?.model_provider || 'deepinfra',
model: configData.defaults?.model || 'claude-sonnet-4-6', model: configData.defaults?.model || 'zai-org/GLM-5',
}, },
licenseModels: { licenseModels: {
free: { free: {
default: configData.license_models?.free?.default || 'claude-haiku-4-5-20251001', default: configData.license_models?.free?.default || 'zai-org/GLM-5',
cost_optimized: configData.license_models?.free?.cost_optimized || 'claude-haiku-4-5-20251001', cost_optimized: configData.license_models?.free?.cost_optimized || 'zai-org/GLM-5',
complex: configData.license_models?.free?.complex || 'claude-haiku-4-5-20251001', complex: configData.license_models?.free?.complex || 'zai-org/GLM-5',
allowed_models: configData.license_models?.free?.allowed_models || ['claude-haiku-4-5-20251001'], allowed_models: configData.license_models?.free?.allowed_models || ['zai-org/GLM-5'],
}, },
pro: { pro: {
default: configData.license_models?.pro?.default || 'claude-sonnet-4-6', default: configData.license_models?.pro?.default || 'zai-org/GLM-5',
cost_optimized: configData.license_models?.pro?.cost_optimized || 'claude-haiku-4-5-20251001', cost_optimized: configData.license_models?.pro?.cost_optimized || 'zai-org/GLM-5',
complex: configData.license_models?.pro?.complex || 'claude-sonnet-4-6', complex: configData.license_models?.pro?.complex || 'zai-org/GLM-5',
blocked_models: configData.license_models?.pro?.blocked_models || ['claude-opus-4-6'], blocked_models: configData.license_models?.pro?.blocked_models || ['Qwen/Qwen3-235B-A22B-Instruct-2507'],
}, },
enterprise: { enterprise: {
default: configData.license_models?.enterprise?.default || 'claude-sonnet-4-6', default: configData.license_models?.enterprise?.default || 'zai-org/GLM-5',
cost_optimized: configData.license_models?.enterprise?.cost_optimized || 'claude-haiku-4-5-20251001', cost_optimized: configData.license_models?.enterprise?.cost_optimized || 'zai-org/GLM-5',
complex: configData.license_models?.enterprise?.complex || 'claude-opus-4-6', complex: configData.license_models?.enterprise?.complex || 'Qwen/Qwen3-235B-A22B-Instruct-2507',
}, },
}, },
}, },
@@ -181,6 +178,9 @@ function loadConfig() {
storageClass: configData.kubernetes?.storage_class || process.env.SANDBOX_STORAGE_CLASS || '', storageClass: configData.kubernetes?.storage_class || process.env.SANDBOX_STORAGE_CLASS || '',
imagePullPolicy: configData.kubernetes?.image_pull_policy || process.env.IMAGE_PULL_POLICY || 'Always', imagePullPolicy: configData.kubernetes?.image_pull_policy || process.env.IMAGE_PULL_POLICY || 'Always',
}, },
// Search API keys
tavilyApiKey: secretsData.search?.tavily_api_key || process.env.TAVILY_API_KEY,
}; };
} }
@@ -200,10 +200,9 @@ const app = Fastify({
}, },
}); });
// Validate at least one LLM provider is configured // Validate LLM provider is configured
const hasAnyProvider = Object.values(config.providerConfig).some(key => !!key); if (!config.providerConfig.deepinfraApiKey) {
if (!hasAnyProvider) { app.log.error('DEEPINFRA_API_KEY is required');
app.log.error('At least one LLM provider API key is required (ANTHROPIC_API_KEY, OPENAI_API_KEY, GOOGLE_API_KEY, or OPENROUTER_API_KEY)');
process.exit(1); process.exit(1);
} }
@@ -407,6 +406,8 @@ app.log.debug('Initializing auth routes...');
const authRoutes = new AuthRoutes({ const authRoutes = new AuthRoutes({
authService, authService,
betterAuth, betterAuth,
containerManager,
userService,
}); });
// Register routes // Register routes
@@ -581,6 +582,7 @@ try {
ohlcService: () => ohlcService, ohlcService: () => ohlcService,
symbolIndexService: () => symbolIndexService, symbolIndexService: () => symbolIndexService,
workspaceManager: undefined, // Will be set per-session workspaceManager: undefined, // Will be set per-session
tavilyApiKey: config.tavilyApiKey,
}); });
// Register agent tool configurations // Register agent tool configurations
@@ -588,20 +590,27 @@ try {
toolRegistry.registerAgentTools({ toolRegistry.registerAgentTools({
agentName: 'main', agentName: 'main',
platformTools: ['symbol_lookup', 'get_chart_data'], platformTools: ['symbol_lookup', 'get_chart_data'],
mcpTools: ['category_list'], // category_list lets the main agent see existing research scripts mcpTools: ['python_list', 'backtest_strategy', 'list_active_strategies'],
}); });
// Research subagent: only MCP tools for script creation/execution // Research subagent: only MCP tools for script creation/execution
toolRegistry.registerAgentTools({ toolRegistry.registerAgentTools({
agentName: 'research', agentName: 'research',
platformTools: [], // No platform tools (works at script level) platformTools: [], // No platform tools (works at script level)
mcpTools: ['category_*', 'execute_research'], mcpTools: ['python_*', 'execute_research'],
}); });
// Code reviewer subagent: no tools by default // Indicator subagent: workspace patch + category tools + evaluate_indicator
toolRegistry.registerAgentTools({ toolRegistry.registerAgentTools({
agentName: 'code-reviewer', agentName: 'indicator',
platformTools: [], platformTools: [],
mcpTools: ['workspace_read', 'workspace_patch', 'python_*', 'evaluate_indicator'],
});
// Web explore subagent: platform search/fetch tools only (no MCP needed)
toolRegistry.registerAgentTools({
agentName: 'web-explore',
platformTools: ['web_search', 'fetch_page', 'arxiv_search'],
mcpTools: [], mcpTools: [],
}); });

View File

@@ -1,10 +1,14 @@
import type { FastifyInstance, FastifyRequest, FastifyReply } from 'fastify'; import type { FastifyInstance, FastifyRequest, FastifyReply } from 'fastify';
import type { AuthService } from '../auth/auth-service.js'; import type { AuthService } from '../auth/auth-service.js';
import type { BetterAuthInstance } from '../auth/better-auth-config.js'; import type { BetterAuthInstance } from '../auth/better-auth-config.js';
import type { ContainerManager } from '../k8s/container-manager.js';
import type { UserService } from '../db/user-service.js';
export interface AuthRoutesConfig { export interface AuthRoutesConfig {
authService: AuthService; authService: AuthService;
betterAuth: BetterAuthInstance; betterAuth: BetterAuthInstance;
containerManager: ContainerManager;
userService: UserService;
} }
/** /**
@@ -74,6 +78,14 @@ export class AuthRoutes {
// Ensure user has a license // Ensure user has a license
await this.config.authService.ensureUserLicense(result.userId, email); await this.config.authService.ensureUserLicense(result.userId, email);
// Warm up the sandbox container so it's likely ready by first login
this.config.userService.getUserLicense(result.userId).then((license) => {
if (license) {
this.config.containerManager.ensureContainerRunning(result.userId, license.license, false)
.catch((err) => app.log.warn({ err, userId: result.userId }, 'Container warmup on registration failed'));
}
}).catch((err) => app.log.warn({ err, userId: result.userId }, 'Failed to fetch license for container warmup'));
// Auto sign in after registration // Auto sign in after registration
const signInResult = await this.config.authService.signIn(email, password); const signInResult = await this.config.authService.signIn(email, password);

View File

@@ -112,28 +112,31 @@ export class OHLCService {
return this.formatHistoryResult(data, start_time, end_time, period_seconds, countback); return this.formatHistoryResult(data, start_time, end_time, period_seconds, countback);
} }
// Step 3: Request missing data via relay // Step 3: Request each missing range from the relay individually so we
// only fetch what's actually absent, not the whole requested window.
this.logger.info({ ticker, period_seconds, missingRanges: missingRanges.length, dataCount: data.length }, 'Requesting missing OHLC data from relay'); this.logger.info({ ticker, period_seconds, missingRanges: missingRanges.length, dataCount: data.length }, 'Requesting missing OHLC data from relay');
try { try {
const notification = await this.relayClient.requestHistoricalOHLC( for (const [rangeStart, rangeEnd] of missingRanges) {
ticker, const notification = await this.relayClient.requestHistoricalOHLC(
period_seconds, ticker,
start_time, period_seconds,
end_time rangeStart,
// countback is NOT passed as a limit — the ingestor must fetch the full range. rangeEnd
// Countback is applied below after we have the complete dataset. // countback is NOT passed as a limit — the ingestor must fetch the full range.
); // Countback is applied below after we have the complete dataset.
);
this.logger.info({ this.logger.info({
ticker, ticker,
period_seconds, period_seconds,
row_count: notification.row_count, rangeStart: rangeStart.toString(),
status: notification.status, rangeEnd: rangeEnd.toString(),
}, 'Historical data request completed'); row_count: notification.row_count,
status: notification.status,
}, 'Relay range request completed');
}
// Step 4: Query Iceberg again for complete dataset // Step 4: Query Iceberg again for complete dataset
this.logger.info({ ticker, period_seconds, notification_status: notification.status, row_count: notification.row_count }, 'Relay notification received, re-querying Iceberg');
data = await this.icebergClient.queryOHLC(ticker, period_seconds, start_time, end_time); data = await this.icebergClient.queryOHLC(ticker, period_seconds, start_time, end_time);
this.logger.info({ ticker, period_seconds, dataCount: data.length }, 'Final Iceberg query complete, returning result'); this.logger.info({ ticker, period_seconds, dataCount: data.length }, 'Final Iceberg query complete, returning result');

View File

@@ -27,7 +27,8 @@ export function createMCPToolWrapper(
toolInfo: MCPToolInfo, toolInfo: MCPToolInfo,
mcpClient: MCPClientConnector, mcpClient: MCPClientConnector,
logger: FastifyBaseLogger, logger: FastifyBaseLogger,
onImage?: (image: { data: string; mimeType: string }) => void onImage?: (image: { data: string; mimeType: string }) => void,
onWorkspaceMutation?: (storeName: string, newState: unknown) => void
): DynamicStructuredTool { ): DynamicStructuredTool {
// Convert MCP input schema to Zod schema // Convert MCP input schema to Zod schema
const zodSchema = mcpInputSchemaToZod(toolInfo.inputSchema); const zodSchema = mcpInputSchemaToZod(toolInfo.inputSchema);
@@ -42,6 +43,28 @@ export function createMCPToolWrapper(
logger.info({ tool: toolInfo.name }, 'MCP tool call completed'); logger.info({ tool: toolInfo.name }, 'MCP tool call completed');
// Fire workspace mutation callback when workspace_patch or workspace_write succeeds.
// The sandbox returns {"success": true, "data": <newState>} as a text content item.
if (
onWorkspaceMutation &&
(toolInfo.name === 'workspace_patch' || toolInfo.name === 'workspace_write')
) {
const content = (result as any)?.content;
if (Array.isArray(content)) {
for (const item of content) {
if (item.type === 'text' && item.text) {
try {
const parsed = JSON.parse(item.text);
if (parsed?.success && parsed?.data !== undefined) {
onWorkspaceMutation((input as any).store_name as string, parsed.data);
}
} catch { /* ignore parse errors */ }
break; // only need first text item
}
}
}
}
// Handle different MCP result formats // Handle different MCP result formats
if (typeof result === 'string') { if (typeof result === 'string') {
return result; return result;
@@ -180,7 +203,10 @@ export function createMCPToolWrappers(
toolInfos: MCPToolInfo[], toolInfos: MCPToolInfo[],
mcpClient: MCPClientConnector, mcpClient: MCPClientConnector,
logger: FastifyBaseLogger, logger: FastifyBaseLogger,
onImage?: (image: { data: string; mimeType: string }) => void onImage?: (image: { data: string; mimeType: string }) => void,
onWorkspaceMutation?: (storeName: string, newState: unknown) => void
): DynamicStructuredTool[] { ): DynamicStructuredTool[] {
return toolInfos.map(toolInfo => createMCPToolWrapper(toolInfo, mcpClient, logger, onImage)); return toolInfos.map(toolInfo =>
createMCPToolWrapper(toolInfo, mcpClient, logger, onImage, onWorkspaceMutation)
);
} }

View File

@@ -0,0 +1,65 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
/**
* ArXiv Search Tool
*
* Searches arXiv for academic papers using the LangChain ArxivRetriever.
* Free, no API key required.
*/
export interface ArxivSearchToolConfig {
logger: FastifyBaseLogger;
}
export function createArxivSearchTool(config: ArxivSearchToolConfig): DynamicStructuredTool {
const { logger } = config;
return new DynamicStructuredTool({
name: 'arxiv_search',
description: 'Search arXiv for academic papers. Returns titles, authors, abstracts, and PDF links. Use this for scientific or technical research queries instead of web_search.',
schema: z.object({
query: z.string().describe('The research query'),
max_results: z.number().optional().default(5).describe('Maximum number of papers to return (default: 5)'),
}),
func: async ({ query, max_results }) => {
logger.debug({ query, max_results }, 'Executing arxiv_search tool');
try {
const { ArxivRetriever } = await import('@langchain/community/retrievers/arxiv');
const retriever = new ArxivRetriever({
getFullDocuments: false,
maxSearchResults: max_results,
});
const docs = await retriever.invoke(query);
const results = docs.map(doc => {
const meta = doc.metadata as Record<string, any>;
// Derive PDF URL from abstract URL: /abs/ID -> /pdf/ID
const pdfUrl = typeof meta.url === 'string'
? meta.url.replace('/abs/', '/pdf/')
: undefined;
return {
title: meta.title,
authors: Array.isArray(meta.authors) ? meta.authors : [],
abstract: doc.pageContent,
published: meta.published,
url: meta.url,
pdf_url: pdfUrl,
};
});
logger.info({ query, resultCount: results.length }, 'arXiv search completed');
return JSON.stringify({ query, results });
} catch (error) {
logger.error({ error, query }, 'arxiv_search tool failed');
return JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
}
},
});
}

View File

@@ -0,0 +1,80 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
const MAX_CONTENT_LENGTH = 50_000;
/**
* Fetch Page Tool
*
* Fetches a URL and returns its content as text/markdown.
* - PDFs are converted to text using pdf-parse
* - HTML pages are scraped with cheerio
* - Output is truncated to 50k characters
*/
export interface FetchPageToolConfig {
logger: FastifyBaseLogger;
}
export function createFetchPageTool(config: FetchPageToolConfig): DynamicStructuredTool {
const { logger } = config;
return new DynamicStructuredTool({
name: 'fetch_page',
description: 'Fetch a web page or PDF and return its text content. PDFs are automatically converted to markdown. Use this after web_search or arxiv_search to read the full content of a result.',
schema: z.object({
url: z.string().url().describe('The URL to fetch'),
}),
func: async ({ url }) => {
logger.debug({ url }, 'Executing fetch_page tool');
try {
const response = await fetch(url, {
headers: { 'User-Agent': 'Mozilla/5.0 (compatible; research-agent/1.0)' },
signal: AbortSignal.timeout(30_000),
});
if (!response.ok) {
return JSON.stringify({ error: `HTTP ${response.status}: ${response.statusText}`, url });
}
const contentType = response.headers.get('content-type') ?? '';
const isPdf = contentType.includes('pdf') || url.toLowerCase().endsWith('.pdf');
let content: string;
if (isPdf) {
const buffer = Buffer.from(await response.arrayBuffer());
const { PDFParse } = await import('pdf-parse');
const arrayBuffer = buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength);
const parser = new PDFParse({ data: arrayBuffer });
const result = await parser.getText();
content = result.text;
logger.debug({ url, chars: content.length, pages: result.pages.length }, 'PDF text extracted');
} else {
const html = await response.text();
const { load } = await import('cheerio');
const $ = load(html);
// Remove non-content elements
$('script, style, nav, footer, header, aside, [role="navigation"]').remove();
// Prefer article/main content
const main = $('article, main, [role="main"]').first();
content = (main.length ? main : $('body')).text().replace(/\s{3,}/g, '\n\n').trim();
logger.debug({ url, chars: content.length }, 'HTML page scraped');
}
const truncated = content.length > MAX_CONTENT_LENGTH;
const output = truncated ? content.slice(0, MAX_CONTENT_LENGTH) + '\n\n[content truncated]' : content;
return JSON.stringify({ url, content: output, truncated });
} catch (error) {
logger.error({ error, url }, 'fetch_page tool failed');
return JSON.stringify({ error: error instanceof Error ? error.message : String(error), url });
}
},
});
}

View File

@@ -0,0 +1,53 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { IndicatorSubagent } from '../../harness/subagents/indicator/index.js';
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
export interface IndicatorAgentToolConfig {
indicatorSubagent: IndicatorSubagent;
context: SubagentContext;
logger: FastifyBaseLogger;
}
/**
* Creates a LangChain tool that delegates to the indicator subagent.
* Mirrors the pattern of research-agent.tool.ts.
*/
export function createIndicatorAgentTool(config: IndicatorAgentToolConfig): DynamicStructuredTool {
const { indicatorSubagent, context, logger } = config;
return new DynamicStructuredTool({
name: 'indicator',
description: `Delegate to the indicator subagent for all indicator-related tasks on the chart.
Use this tool for:
- Reading which indicators are currently on the chart and explaining what they show
- Adding indicators to the chart ("show RSI", "add Bollinger Bands with std=1.5")
- Modifying indicator parameters ("change MACD fast to 8", "set RSI length to 21")
- Removing indicators ("remove all moving averages", "clear the volume indicators")
- Toggling indicator visibility
- Creating custom indicators using Python scripts
- Recommending indicators for a given strategy or analysis goal
ALWAYS use this tool for any request about the chart's indicators.
NEVER modify the indicators workspace store directly.`,
schema: z.object({
instruction: z.string().describe(
'The indicator task to perform. Be specific about which indicators, parameters, ' +
'and what changes are needed. Include relevant context like the current symbol ' +
'if the user mentioned it.'
),
}),
func: async ({ instruction }: { instruction: string }): Promise<string> => {
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to indicator subagent');
try {
return await indicatorSubagent.execute(context, instruction);
} catch (error) {
logger.error({ error, errorMessage: (error as Error)?.message }, 'Indicator subagent failed');
throw error;
}
},
});
}

View File

@@ -0,0 +1,49 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { WebExploreSubagent } from '../../harness/subagents/web-explore/index.js';
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
export interface WebExploreAgentToolConfig {
webExploreSubagent: WebExploreSubagent;
context: SubagentContext;
logger: FastifyBaseLogger;
}
/**
* Creates a LangChain tool that delegates to the web-explore subagent.
* The subagent decides whether to use web search or arXiv based on the instruction.
*/
export function createWebExploreAgentTool(config: WebExploreAgentToolConfig): DynamicStructuredTool {
const { webExploreSubagent, context, logger } = config;
return new DynamicStructuredTool({
name: 'web_explore',
description: `Search the web or academic databases and return a summarized answer.
Use this tool when the user asks about:
- Current events, news, or real-time information
- Documentation, tutorials, or how-to guides
- Academic papers, research findings, or scientific topics
- Any topic that benefits from external sources
The subagent will search the web (or arXiv for academic queries), fetch relevant content, and return a markdown summary with cited sources.`,
schema: z.object({
instruction: z.string().describe(
'What to search for and summarize. Be specific — include the topic, what aspects matter, ' +
'and any context that helps narrow the search (e.g. "recent papers on momentum factor in equities" ' +
'or "how to configure rate limiting in Fastify").'
),
}),
func: async ({ instruction }: { instruction: string }): Promise<string> => {
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to web-explore subagent');
try {
return await webExploreSubagent.execute(context, instruction);
} catch (error) {
logger.error({ error, errorMessage: (error as Error)?.message }, 'Web explore subagent failed');
throw error;
}
},
});
}

View File

@@ -0,0 +1,65 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
/**
* Web Search Tool
*
* Calls the Tavily REST API directly. The config interface is intentionally
* minimal so the underlying provider can be swapped without touching callers.
*/
export interface WebSearchToolConfig {
apiKey: string;
logger: FastifyBaseLogger;
}
export function createWebSearchTool(config: WebSearchToolConfig): DynamicStructuredTool {
const { apiKey, logger } = config;
return new DynamicStructuredTool({
name: 'web_search',
description: 'Search the web. Returns titles, URLs, and content summaries. Use this for general web searches. For academic/scientific papers, prefer arxiv_search instead.',
schema: z.object({
query: z.string().describe('The search query'),
max_results: z.number().optional().default(8).describe('Maximum number of results to return (default: 8)'),
}),
func: async ({ query, max_results }) => {
logger.debug({ query, max_results }, 'Executing web_search tool');
try {
const response = await fetch('https://api.tavily.com/search', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
api_key: apiKey,
query,
max_results,
search_depth: 'basic',
}),
signal: AbortSignal.timeout(30_000),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`Tavily API error ${response.status}: ${text}`);
}
const data = await response.json() as { results?: Array<{ title: string; url: string; content: string }> };
const items = (data.results ?? []).map(r => ({
title: r.title,
url: r.url,
snippet: r.content,
}));
logger.info({ query, resultCount: items.length }, 'Web search completed');
return JSON.stringify({ query, results: items });
} catch (error) {
logger.error({ error, query, errorMessage: error instanceof Error ? error.message : String(error) }, 'web_search tool failed');
return JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
}
},
});
}

View File

@@ -6,6 +6,9 @@ import type { SymbolIndexService } from '../services/symbol-index-service.js';
import type { WorkspaceManager } from '../workspace/workspace-manager.js'; import type { WorkspaceManager } from '../workspace/workspace-manager.js';
import { createSymbolLookupTool } from './platform/symbol-lookup.tool.js'; import { createSymbolLookupTool } from './platform/symbol-lookup.tool.js';
import { createGetChartDataTool } from './platform/get-chart-data.tool.js'; import { createGetChartDataTool } from './platform/get-chart-data.tool.js';
import { createWebSearchTool } from './platform/web-search.tool.js';
import { createFetchPageTool } from './platform/fetch-page.tool.js';
import { createArxivSearchTool } from './platform/arxiv-search.tool.js';
import { createMCPToolWrappers, type MCPToolInfo } from './mcp/mcp-tool-wrapper.js'; import { createMCPToolWrappers, type MCPToolInfo } from './mcp/mcp-tool-wrapper.js';
/** /**
@@ -13,13 +16,13 @@ import { createMCPToolWrappers, type MCPToolInfo } from './mcp/mcp-tool-wrapper.
* Specifies which tools are available to which agent * Specifies which tools are available to which agent
*/ */
export interface AgentToolConfig { export interface AgentToolConfig {
/** Agent name (e.g., 'main', 'research', 'code-reviewer') */ /** Agent name (e.g., 'main', 'research', 'web-explore') */
agentName: string; agentName: string;
/** Platform tool names to include */ /** Platform tool names to include */
platformTools: string[]; platformTools: string[];
/** MCP tool patterns/names to include (supports wildcards like 'category_*') */ /** MCP tool patterns/names to include (supports wildcards like 'python_*') */
mcpTools: string[]; mcpTools: string[];
} }
@@ -31,6 +34,7 @@ export interface PlatformServices {
ohlcService?: OHLCService | (() => OHLCService | undefined); ohlcService?: OHLCService | (() => OHLCService | undefined);
symbolIndexService?: SymbolIndexService | (() => SymbolIndexService | undefined); symbolIndexService?: SymbolIndexService | (() => SymbolIndexService | undefined);
workspaceManager?: WorkspaceManager | (() => WorkspaceManager | undefined); workspaceManager?: WorkspaceManager | (() => WorkspaceManager | undefined);
tavilyApiKey?: string;
} }
/** /**
@@ -81,7 +85,8 @@ export class ToolRegistry {
mcpClient?: MCPClientConnector, mcpClient?: MCPClientConnector,
availableMCPTools?: MCPToolInfo[], availableMCPTools?: MCPToolInfo[],
workspaceManager?: WorkspaceManager, workspaceManager?: WorkspaceManager,
onImage?: (image: { data: string; mimeType: string }) => void onImage?: (image: { data: string; mimeType: string }) => void,
onWorkspaceMutation?: (storeName: string, newState: unknown) => void
): Promise<DynamicStructuredTool[]> { ): Promise<DynamicStructuredTool[]> {
const config = this.agentToolConfigs.get(agentName); const config = this.agentToolConfigs.get(agentName);
@@ -105,7 +110,7 @@ export class ToolRegistry {
// Add MCP tools (if MCP client and tools are available) // Add MCP tools (if MCP client and tools are available)
if (mcpClient && availableMCPTools && availableMCPTools.length > 0) { if (mcpClient && availableMCPTools && availableMCPTools.length > 0) {
const filteredMCPTools = this.filterMCPTools(availableMCPTools, config.mcpTools); const filteredMCPTools = this.filterMCPTools(availableMCPTools, config.mcpTools);
const mcpToolInstances = createMCPToolWrappers(filteredMCPTools, mcpClient, this.logger, onImage); const mcpToolInstances = createMCPToolWrappers(filteredMCPTools, mcpClient, this.logger, onImage, onWorkspaceMutation);
tools.push(...mcpToolInstances); tools.push(...mcpToolInstances);
this.logger.debug( this.logger.debug(
@@ -180,6 +185,25 @@ export class ToolRegistry {
break; break;
} }
case 'web_search': {
if (this.platformServices.tavilyApiKey) {
tool = createWebSearchTool({ apiKey: this.platformServices.tavilyApiKey, logger: this.logger });
} else {
this.logger.warn('TAVILY_API_KEY not configured — web_search tool unavailable');
}
break;
}
case 'fetch_page': {
tool = createFetchPageTool({ logger: this.logger });
break;
}
case 'arxiv_search': {
tool = createArxivSearchTool({ logger: this.logger });
break;
}
default: default:
this.logger.warn({ tool: toolName }, 'Unknown platform tool'); this.logger.warn({ tool: toolName }, 'Unknown platform tool');
return null; return null;
@@ -202,7 +226,7 @@ export class ToolRegistry {
/** /**
* Filter MCP tools based on patterns/names * Filter MCP tools based on patterns/names
* Supports wildcards like 'category_*' or exact names like 'execute_research' * Supports wildcards like 'python_*' or exact names like 'execute_research'
*/ */
private filterMCPTools(availableTools: MCPToolInfo[], patterns: string[]): MCPToolInfo[] { private filterMCPTools(availableTools: MCPToolInfo[], patterns: string[]): MCPToolInfo[] {
if (patterns.length === 0) { if (patterns.length === 0) {
@@ -221,7 +245,7 @@ export class ToolRegistry {
/** /**
* Check if a tool name matches a pattern * Check if a tool name matches a pattern
* Supports wildcards: 'category_*' matches 'category_write', 'category_read', etc. * Supports wildcards: 'python_*' matches 'python_write', 'python_read', etc.
*/ */
private matchesPattern(toolName: string, pattern: string): boolean { private matchesPattern(toolName: string, pattern: string): boolean {
if (pattern === toolName) { if (pattern === toolName) {

View File

@@ -11,11 +11,11 @@
* TradingView bar format (used by web frontend) * TradingView bar format (used by web frontend)
*/ */
export interface TradingViewBar { export interface TradingViewBar {
time: number; // Unix timestamp in SECONDS time: number; // Unix timestamp in SECONDS
open: number | null; // null for gap bars (no trades that period) open: number; // always non-null — ingestor forward-fills interior gaps
high: number | null; high: number;
low: number | null; low: number;
close: number | null; close: number;
volume?: number | null; volume?: number | null;
// Optional extra columns from ohlc.proto // Optional extra columns from ohlc.proto
buy_vol?: number; buy_vol?: number;
@@ -31,13 +31,13 @@ export interface TradingViewBar {
* Backend OHLC format (from Iceberg) * Backend OHLC format (from Iceberg)
*/ */
export interface BackendOHLC { export interface BackendOHLC {
timestamp: bigint; // Unix timestamp in NANOSECONDS — kept as bigint to preserve precision timestamp: bigint; // Unix timestamp in NANOSECONDS — kept as bigint to preserve precision
ticker: string; // Nautilus format: "BTC/USDT.BINANCE" ticker: string; // Nautilus format: "BTC/USDT.BINANCE"
period_seconds: number; period_seconds: number;
open: number | null; // null for gap bars (no trades that period) open: number; // always non-null — ingestor forward-fills interior gaps
high: number | null; high: number;
low: number | null; low: number;
close: number | null; close: number;
volume: number | null; volume: number | null;
} }

View File

@@ -96,7 +96,7 @@ export const LICENSE_TIER_TEMPLATES: Record<LicenseTier, License> = {
memoryRequest: '512Mi', memoryLimit: '2Gi', memoryRequest: '512Mi', memoryLimit: '2Gi',
cpuRequest: '250m', cpuLimit: '2000m', cpuRequest: '250m', cpuLimit: '2000m',
storage: '10Gi', tmpSizeLimit: '256Mi', storage: '10Gi', tmpSizeLimit: '256Mi',
enableIdleShutdown: true, idleTimeoutMinutes: 60, enableIdleShutdown: false, idleTimeoutMinutes: 0,
}, },
}, },
enterprise: { enterprise: {

View File

@@ -55,6 +55,20 @@ export class ContainerSync {
this.logger = logger.child({ component: 'ContainerSync' }); this.logger = logger.child({ component: 'ContainerSync' });
} }
/**
* Parse a raw MCP callTool response into the tool's return value.
* MCP tool results are wrapped as: { content: [{ type: 'text', text: '<json>' }] }
*/
private parseMcpResult(raw: unknown): unknown {
const r = raw as any;
const text = r?.content?.[0]?.text ?? r?.[0]?.text;
if (typeof text === 'string') {
return JSON.parse(text);
}
// Already unwrapped (shouldn't happen in practice)
return raw;
}
/** /**
* Load a workspace store from the container. * Load a workspace store from the container.
* Returns the stored state or indicates the store doesn't exist. * Returns the stored state or indicates the store doesn't exist.
@@ -68,7 +82,7 @@ export class ContainerSync {
try { try {
this.logger.debug({ store: storeName }, 'Loading store from container'); this.logger.debug({ store: storeName }, 'Loading store from container');
const result = (await this.mcpClient.callTool('workspace_read', { const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_read', {
store_name: storeName, store_name: storeName,
})) as { exists: boolean; data?: unknown; error?: string }; })) as { exists: boolean; data?: unknown; error?: string };
@@ -104,7 +118,7 @@ export class ContainerSync {
try { try {
this.logger.debug({ store: storeName }, 'Saving store to container'); this.logger.debug({ store: storeName }, 'Saving store to container');
const result = (await this.mcpClient.callTool('workspace_write', { const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_write', {
store_name: storeName, store_name: storeName,
data: state, data: state,
})) as { success: boolean; error?: string }; })) as { success: boolean; error?: string };
@@ -136,7 +150,7 @@ export class ContainerSync {
try { try {
this.logger.debug({ store: storeName, patchOps: patch.length }, 'Patching store in container'); this.logger.debug({ store: storeName, patchOps: patch.length }, 'Patching store in container');
const result = (await this.mcpClient.callTool('workspace_patch', { const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_patch', {
store_name: storeName, store_name: storeName,
patch, patch,
})) as { success: boolean; data?: unknown; error?: string }; })) as { success: boolean; data?: unknown; error?: string };

View File

@@ -59,12 +59,12 @@ class SyncEntry {
/** /**
* Set state directly (used for loading from container). * Set state directly (used for loading from container).
* Resets sequence to 0. * Sets sequence to 1 so clients at seq 0 (empty state) receive a full snapshot.
*/ */
setState(newState: unknown): void { setState(newState: unknown): void {
this.state = deepClone(newState); this.state = deepClone(newState);
this.lastSnapshot = deepClone(newState); this.lastSnapshot = deepClone(newState);
this.seq = 0; this.seq = 1;
this.history = []; this.history = [];
} }

View File

@@ -272,12 +272,84 @@ export interface Shape {
*/ */
export type ShapesStore = Record<string, Shape>; export type ShapesStore = Record<string, Shape>;
/**
* Parameter schema entry for a custom indicator.
*/
export interface CustomIndicatorParam {
type: 'int' | 'float' | 'bool' | 'string';
default: any;
description?: string;
min?: number;
max?: number;
}
/**
* Per-series plot configuration for a custom indicator output column.
* style maps to LineStudyPlotStyle: 0=Line, 1=Histogram, 3=Dots/Cross,
* 4=Area, 5=Columns, 6=Circles, 9=StepLine.
*/
export interface PlotConfig {
style: number;
color?: string;
linewidth?: number;
visible?: boolean;
}
/**
* Shaded region between two plots ("plot_plot") or two bands ("hline_hline").
*/
export interface FilledAreaConfig {
id: string;
type: 'plot_plot' | 'hline_hline';
series1: string;
series2: string;
color?: string;
opacity?: number;
}
/**
* Horizontal reference line (e.g. RSI overbought/oversold level).
* linestyle: 0=solid, 1=dotted, 2=dashed.
*/
export interface BandConfig {
id: string;
value: number;
color?: string;
linewidth?: number;
linestyle?: number;
visible?: boolean;
}
/**
* Output column descriptor for a custom indicator.
*/
export interface CustomIndicatorColumn {
name: string;
display_name?: string;
description?: string;
plot?: PlotConfig;
}
/**
* Metadata needed to auto-construct a TradingView custom study.
* Populated by the indicator subagent when adding a custom_ indicator.
*/
export interface CustomIndicatorMetadata {
display_name: string;
parameters: Record<string, CustomIndicatorParam>;
input_series: string[];
output_columns: CustomIndicatorColumn[];
pane: 'price' | 'separate';
filled_areas?: FilledAreaConfig[];
bands?: BandConfig[];
}
/** /**
* Indicator instance on TradingView chart. * Indicator instance on TradingView chart.
*/ */
export interface IndicatorInstance { export interface IndicatorInstance {
id: string; id: string;
talib_name: string; pandas_ta_name: string;
instance_name: string; instance_name: string;
parameters: Record<string, any>; parameters: Record<string, any>;
tv_study_id?: string; tv_study_id?: string;
@@ -289,6 +361,8 @@ export interface IndicatorInstance {
created_at?: number; created_at?: number;
modified_at?: number; modified_at?: number;
original_id?: string; original_id?: string;
/** Populated for custom_ indicators; drives TV custom study auto-construction. */
custom_metadata?: CustomIndicatorMetadata;
} }
/** /**

View File

@@ -45,6 +45,7 @@ export class CCXTFetcher {
const exchange = this.getExchange(exchangeName); const exchange = this.getExchange(exchangeName);
// Load market info from CCXT // Load market info from CCXT
this.logger.info({ exchangeName, symbol }, 'Loading markets for metadata');
await exchange.loadMarkets(); await exchange.loadMarkets();
const market = exchange.market(symbol); const market = exchange.market(symbol);
@@ -108,8 +109,9 @@ export class CCXTFetcher {
// Map period seconds to CCXT timeframe // Map period seconds to CCXT timeframe
const timeframe = this.secondsToTimeframe(periodSeconds); const timeframe = this.secondsToTimeframe(periodSeconds);
const marketsLoaded = exchange.markets != null && Object.keys(exchange.markets).length > 0;
this.logger.info( this.logger.info(
{ ticker, timeframe, startMs, endMs, limit }, { ticker, timeframe, startMs, endMs, limit, marketsLoaded },
'Fetching historical OHLC' 'Fetching historical OHLC'
); );
@@ -120,44 +122,76 @@ export class CCXTFetcher {
// The caller's limit/countback is irrelevant to how much we need to fetch from the exchange. // The caller's limit/countback is irrelevant to how much we need to fetch from the exchange.
const PAGE_SIZE = 1000; const PAGE_SIZE = 1000;
const FETCH_RETRIES = 3;
const FETCH_RETRY_DELAY_MS = 5000;
while (since < endMs) { while (since < endMs) {
try { let candles;
const candles = await exchange.fetchOHLCV( let lastError;
symbol, for (let attempt = 1; attempt <= FETCH_RETRIES; attempt++) {
timeframe, try {
since, candles = await exchange.fetchOHLCV(symbol, timeframe, since, PAGE_SIZE);
PAGE_SIZE lastError = null;
);
if (candles.length === 0) {
break; break;
} catch (error) {
lastError = error;
const isRetryable = error.constructor?.name === 'NetworkError' ||
error.constructor?.name === 'RequestTimeout' ||
error.constructor?.name === 'ExchangeNotAvailable';
this.logger.warn(
{
errorType: error.constructor?.name,
error: error.message,
errorUrl: error.url,
ticker,
since,
attempt,
retryable: isRetryable
},
'OHLC fetch attempt failed'
);
if (!isRetryable || attempt === FETCH_RETRIES) break;
await exchange.sleep(FETCH_RETRY_DELAY_MS * attempt);
} }
}
// Filter candles within the requested time range if (lastError) {
const filteredCandles = candles.filter(c => {
const timestamp = c[0];
return timestamp >= startMs && timestamp < endMs; // endMs is exclusive
});
fetchedCandles.push(...filteredCandles);
// Advance to next batch start
const lastTimestamp = candles[candles.length - 1][0];
since = lastTimestamp + (periodSeconds * 1000);
if (since >= endMs) {
break;
}
// Apply rate limiting
await exchange.sleep(exchange.rateLimit);
} catch (error) {
this.logger.error( this.logger.error(
{ error: error.message, ticker, since }, {
errorType: lastError.constructor?.name,
error: lastError.message,
errorUrl: lastError.url,
ticker,
since,
marketsLoaded: exchange.markets != null && Object.keys(exchange.markets).length > 0,
stack: lastError.stack
},
'Error fetching OHLC' 'Error fetching OHLC'
); );
throw error; throw lastError;
} }
if (candles.length === 0) {
break;
}
// Filter candles within the requested time range
const filteredCandles = candles.filter(c => {
const timestamp = c[0];
return timestamp >= startMs && timestamp < endMs; // endMs is exclusive
});
fetchedCandles.push(...filteredCandles);
// Advance to next batch start
const lastTimestamp = candles[candles.length - 1][0];
since = lastTimestamp + (periodSeconds * 1000);
if (since >= endMs) {
break;
}
// Apply rate limiting
await exchange.sleep(exchange.rateLimit);
} }
// Get metadata for proper denomination // Get metadata for proper denomination
@@ -173,32 +207,44 @@ export class CCXTFetcher {
const periodMs = periodSeconds * 1000; const periodMs = periodSeconds * 1000;
// Only create null gap bars for interior gaps — periods where real data exists // Forward-fill interior gaps — periods between the first and last real bar
// on BOTH sides (i.e., between the first and last real bar). Do NOT append // where the exchange returned no candle. Edge gaps (before firstRealTs or
// null bars before the first real bar or after the last real bar: those edge // after lastRealTs) are left absent; they'll be caught by gap detection and
// positions may be in-progress candles or simply outside the exchange's history, // trigger a targeted backfill request.
// and we have no positive signal that a gap exists there.
const realTimestamps = [...fetchedByTs.keys()].sort((a, b) => a - b); const realTimestamps = [...fetchedByTs.keys()].sort((a, b) => a - b);
const firstRealTs = realTimestamps[0]; const firstRealTs = realTimestamps[0];
const lastRealTs = realTimestamps[realTimestamps.length - 1]; const lastRealTs = realTimestamps[realTimestamps.length - 1];
const allCandles = []; const allCandles = [];
let gapCount = 0; let gapCount = 0;
let prevClose = null;
for (let ts = firstRealTs; ts <= lastRealTs; ts += periodMs) { for (let ts = firstRealTs; ts <= lastRealTs; ts += periodMs) {
if (fetchedByTs.has(ts)) { if (fetchedByTs.has(ts)) {
allCandles.push(this.convertToOHLC(fetchedByTs.get(ts), ticker, periodSeconds, metadata)); const bar = this.convertToOHLC(fetchedByTs.get(ts), ticker, periodSeconds, metadata);
} else { prevClose = bar.close;
// Interior gap — confirmed by real bars on both sides allCandles.push(bar);
} else if (prevClose !== null) {
// Interior gap — forward-fill with previous close, zero volume
gapCount++; gapCount++;
allCandles.push(this.createGapBar(ts, ticker, periodSeconds, metadata)); allCandles.push({
ticker,
timestamp: (ts * 1_000_000).toString(),
open: prevClose,
high: prevClose,
low: prevClose,
close: prevClose,
volume: '0',
open_time: (ts * 1_000_000).toString(),
close_time: ((ts + periodSeconds * 1000) * 1_000_000).toString()
});
} }
} }
if (gapCount > 0) { if (gapCount > 0) {
this.logger.info( this.logger.info(
{ ticker, gapCount, total: allCandles.length }, { ticker, gapCount, total: allCandles.length },
'Filled interior gap bars for missing periods in source data' 'Forward-filled interior gap bars with previous close price'
); );
} }
@@ -264,24 +310,6 @@ export class CCXTFetcher {
}; };
} }
/**
* Create a gap bar for a period with no trade data.
* All OHLC/volume fields are null — the timestamp slot is reserved but unpopulated.
*/
createGapBar(timestampMs, ticker, periodSeconds, metadata) {
return {
ticker,
timestamp: (timestampMs * 1_000_000).toString(), // Convert ms to nanoseconds
open: null,
high: null,
low: null,
close: null,
volume: null,
open_time: (timestampMs * 1_000_000).toString(),
close_time: ((timestampMs + periodSeconds * 1000) * 1_000_000).toString()
};
}
/** /**
* Convert CCXT trade to our Tick format * Convert CCXT trade to our Tick format
* Uses precision fields from market metadata for proper integer representation * Uses precision fields from market metadata for proper integer representation

View File

@@ -285,7 +285,14 @@ class IngestorWorker {
} catch (error) { } catch (error) {
this.logger.error( this.logger.error(
{ error: error.message, request_id, ticker }, {
errorType: error.constructor?.name,
error: error.message,
errorUrl: error.url,
request_id,
ticker,
stack: error.stack
},
'Failed to process historical request' 'Failed to process historical request'
); );

View File

@@ -181,16 +181,14 @@ export class KafkaProducer {
errorMessage: metadata.error_message errorMessage: metadata.error_message
}, },
rows: ohlcData.map(candle => { rows: ohlcData.map(candle => {
// null open/high/low/close signals a gap bar (no trades that period).
// Omit fields from the protobuf message when null so hasOpen() etc. return false.
const row = { const row = {
timestamp: candle.timestamp, timestamp: candle.timestamp,
ticker: candle.ticker, ticker: candle.ticker,
open: candle.open,
high: candle.high,
low: candle.low,
close: candle.close,
}; };
if (candle.open != null) row.open = candle.open;
if (candle.high != null) row.high = candle.high;
if (candle.low != null) row.low = candle.low;
if (candle.close != null) row.close = candle.close;
if (candle.volume != null) row.volume = candle.volume; if (candle.volume != null) row.volume = candle.volume;
return row; return row;
}) })

View File

@@ -8,12 +8,12 @@ message OHLC {
// Timestamp in nanoseconds since epoch // Timestamp in nanoseconds since epoch
uint64 timestamp = 1; uint64 timestamp = 1;
// Prices are stored as doubles (Nautilus-aligned, no denominator needed). // Prices are stored as integers (Nautilus-aligned with precision denominator).
// Optional to support null bars for periods with no trades. // Always non-null — ingestor forward-fills interior gaps with the previous close.
optional int64 open = 2; int64 open = 2;
optional int64 high = 3; int64 high = 3;
optional int64 low = 4; int64 low = 4;
optional int64 close = 5; int64 close = 5;
optional int64 volume = 6; optional int64 volume = 6;
optional int64 buy_vol = 7; optional int64 buy_vol = 7;
optional int64 sell_vol = 8; optional int64 sell_vol = 8;

View File

@@ -40,6 +40,7 @@ WORKDIR /app
# Install runtime dependencies only # Install runtime dependencies only
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
libzmq5 \ libzmq5 \
git \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Create non-root user # Create non-root user
@@ -76,6 +77,7 @@ USER dexorder
# Environment variables (can be overridden in k8s) # Environment variables (can be overridden in k8s)
ENV PYTHONUNBUFFERED=1 \ ENV PYTHONUNBUFFERED=1 \
MPLCONFIGDIR=/tmp \ MPLCONFIGDIR=/tmp \
NUMBA_CACHE_DIR=/tmp/numba_cache \
LOG_LEVEL=INFO \ LOG_LEVEL=INFO \
CONFIG_PATH=/app/config/config.yaml \ CONFIG_PATH=/app/config/config.yaml \
SECRETS_PATH=/app/config/secrets.yaml \ SECRETS_PATH=/app/config/secrets.yaml \

View File

@@ -0,0 +1,34 @@
"""
dexorder.nautilus — Nautilus Trader integration for strategy backtesting.
Quants import PandasStrategy to write strategies:
from dexorder.nautilus import PandasStrategy
import pandas as pd
import pandas_ta as ta
class MyStrategy(PandasStrategy):
def evaluate(self, dfs):
df = dfs.get("BTC/USDT.BINANCE:3600")
if df is None or len(df) < 14:
return
rsi = ta.rsi(df["close"], length=14)
if rsi.iloc[-1] < 30:
self.buy(0.01)
elif rsi.iloc[-1] > 70:
self.sell(0.01)
SecretsVault provides the interface for user-owned exchange API keys
(stub until the user-local vault is implemented):
from dexorder.nautilus import SecretsVault
"""
from dexorder.nautilus.pandas_strategy import PandasStrategy, PandasStrategyConfig
from dexorder.secrets_vault import SecretsVault
__all__ = [
"PandasStrategy",
"PandasStrategyConfig",
"SecretsVault",
]

View File

@@ -0,0 +1,358 @@
"""
Backtest runner — sets up Nautilus BacktestEngine and runs a PandasStrategy.
Entry points
------------
run_backtest() — called from execute_strategy MCP tool (via thread executor)
_load_strategy_class() — exec() the user's implementation.py, find PandasStrategy subclass
_setup_custom_indicators() — register user indicators with pandas-ta via ta.import_dir()
_compute_metrics() — extract P&L metrics from completed BacktestEngine
"""
from __future__ import annotations
import inspect
import logging
from pathlib import Path
from typing import Any
import pandas as pd
log = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Custom indicator setup
# ---------------------------------------------------------------------------
def _setup_custom_indicators(data_dir: Path) -> None:
"""Register user's custom indicators with pandas-ta (delegates to python_tools)."""
from dexorder.tools.python_tools import setup_custom_indicators
setup_custom_indicators(data_dir)
# ---------------------------------------------------------------------------
# Strategy class loading
# ---------------------------------------------------------------------------
def _load_strategy_class(impl_path: Path) -> type:
"""
Execute implementation.py and return the unique PandasStrategy subclass.
The exec namespace is seeded with:
PandasStrategy — base class (so the subclass check works)
pd — pandas
ta — pandas_ta (if available)
Raises:
ValueError: if zero or multiple PandasStrategy subclasses are defined
SyntaxError / Exception: if the file fails to parse or execute
"""
from dexorder.nautilus.pandas_strategy import PandasStrategy
namespace: dict[str, Any] = {
"__builtins__": __builtins__,
"PandasStrategy": PandasStrategy,
"pd": pd,
}
try:
import pandas_ta as ta
namespace["ta"] = ta
except ImportError:
pass
code = impl_path.read_text()
exec(compile(code, str(impl_path), "exec"), namespace) # noqa: S102
subclasses = [
obj for obj in namespace.values()
if (
inspect.isclass(obj)
and issubclass(obj, PandasStrategy)
and obj is not PandasStrategy
)
]
if len(subclasses) == 0:
raise ValueError(
f"No PandasStrategy subclass found in {impl_path}. "
"The strategy file must define exactly one class that inherits from PandasStrategy."
)
if len(subclasses) > 1:
names = [c.__name__ for c in subclasses]
raise ValueError(
f"Multiple PandasStrategy subclasses found in {impl_path}: {names}. "
"Define exactly one concrete strategy class per file."
)
return subclasses[0]
# ---------------------------------------------------------------------------
# Metrics extraction
# ---------------------------------------------------------------------------
def _compute_metrics(
engine,
venue_strs: list[str],
initial_capital: float,
all_bars: list,
) -> dict[str, Any]:
"""
Extract performance metrics from a completed BacktestEngine.
Returns dict with:
total_return float — fractional (0.15 = +15%)
sharpe_ratio float — annualized; 0.0 if no trades or constant equity
max_drawdown float — max peak-to-trough as fraction (0.10 = 10% drawdown)
win_rate float — fraction of trades with positive realized PnL
trade_count int
equity_curve list[{timestamp: int_unix_s, equity: float}]
"""
# Reconstruct equity curve from fills
equity_points: list[dict] = []
if all_bars:
equity_points.append({
"timestamp": all_bars[0].ts_event // 1_000_000_000,
"equity": initial_capital,
})
running_equity = initial_capital
trade_count = 0
winning_trades = 0
try:
fills_df = engine.trader.generate_order_fills_report()
except Exception as exc:
log.debug("generate_order_fills_report() failed: %s", exc)
fills_df = None
if fills_df is not None and len(fills_df) > 0:
# Sort by event time
if "ts_event" in fills_df.columns:
fills_df = fills_df.sort_values("ts_event")
for _, fill in fills_df.iterrows():
rpnl = fill.get("realized_pnl") if hasattr(fill, "get") else None
if rpnl is None:
continue
# Nautilus Money objects: str form is "15.32 USDT"
rpnl_float: float | None = None
try:
if hasattr(rpnl, "as_decimal"):
rpnl_float = float(rpnl.as_decimal())
elif rpnl is not None:
rpnl_str = str(rpnl).strip()
if rpnl_str and rpnl_str.lower() not in ("none", "nan"):
rpnl_float = float(rpnl_str.split()[0])
except (ValueError, TypeError, IndexError):
pass
if rpnl_float is not None and rpnl_float != 0.0:
ts_s: int | None = None
raw_ts = fill.get("ts_event") if hasattr(fill, "get") else None
if raw_ts is not None:
try:
ts_s = int(raw_ts) // 1_000_000_000
except (TypeError, ValueError):
pass
running_equity += rpnl_float
trade_count += 1
if rpnl_float > 0:
winning_trades += 1
if ts_s is not None:
equity_points.append({"timestamp": ts_s, "equity": running_equity})
if all_bars:
equity_points.append({
"timestamp": all_bars[-1].ts_event // 1_000_000_000,
"equity": running_equity,
})
# Try to get actual final balance from the account (more accurate than fill reconstruction)
try:
from nautilus_trader.model.identifiers import Venue
for venue_str in venue_strs:
account = engine.cache.account_for_venue(Venue(venue_str))
if account is None:
continue
# Sum all balances (quote currency is what we started with)
for bal in account.balances().values():
total = getattr(bal, "total", None)
if total is not None:
final_val = float(str(total).split()[0]) if not hasattr(total, "as_decimal") else float(total.as_decimal())
# Use the account balance as the definitive final equity
running_equity = final_val
if equity_points:
equity_points[-1]["equity"] = running_equity
break
except Exception as exc:
log.debug("Account balance extraction failed: %s", exc)
# Core metrics
total_return = (running_equity - initial_capital) / initial_capital if initial_capital else 0.0
win_rate = winning_trades / trade_count if trade_count > 0 else 0.0
# Sharpe ratio (annualized) from equity curve returns
sharpe = 0.0
if len(equity_points) > 2 and all_bars and len(all_bars) > 1:
equity_series = pd.Series([p["equity"] for p in equity_points])
returns = equity_series.pct_change().dropna()
if len(returns) > 1 and returns.std() > 0:
bar_duration_ns = (all_bars[-1].ts_event - all_bars[0].ts_event) / max(len(all_bars) - 1, 1)
if bar_duration_ns > 0:
bars_per_year = (365 * 24 * 3600 * 1e9) / bar_duration_ns
sharpe = float((returns.mean() / returns.std()) * (bars_per_year ** 0.5))
# Max drawdown
max_drawdown = 0.0
if len(equity_points) > 1:
equity_arr = pd.Series([p["equity"] for p in equity_points])
rolling_max = equity_arr.cummax()
drawdowns = (equity_arr - rolling_max) / rolling_max.replace(0, float("nan"))
max_drawdown = float(abs(drawdowns.min())) if len(drawdowns) > 0 else 0.0
return {
"total_return": round(total_return, 6),
"sharpe_ratio": round(sharpe, 4),
"max_drawdown": round(max_drawdown, 6),
"win_rate": round(win_rate, 4),
"trade_count": trade_count,
"equity_curve": equity_points,
}
# ---------------------------------------------------------------------------
# Main entry point
# ---------------------------------------------------------------------------
def run_backtest(
strategy_class: type,
feeds: list[tuple[str, int]],
ohlc_dfs: dict[str, pd.DataFrame],
initial_capital: float = 10_000.0,
paper: bool = True,
) -> dict[str, Any]:
"""
Configure and run a BacktestEngine synchronously.
Designed to be called from asyncio via loop.run_in_executor() since
BacktestEngine does not support async.
Args:
strategy_class: Concrete PandasStrategy subclass to instantiate
feeds: List of (ticker, period_seconds) pairs, e.g. [("BTC/USDT.BINANCE", 3600)]
ohlc_dfs: Dict of feed_key → full OHLC+ DataFrame (with buy_vol, sell_vol, etc.)
initial_capital: Starting account balance in quote currency
paper: Always True for historical backtest (flag reserved for forward testing)
Returns:
Dict of performance metrics (see _compute_metrics)
"""
from nautilus_trader.backtest.engine import BacktestEngine, BacktestEngineConfig
from nautilus_trader.backtest.models import FillModel
from nautilus_trader.config import LoggingConfig
from nautilus_trader.model.enums import OmsType, AccountType
from nautilus_trader.model.identifiers import Venue
from nautilus_trader.model.objects import Money
from dexorder.nautilus.pandas_strategy import PandasStrategyConfig, make_feed_key
from dexorder.nautilus.data_adapter import (
make_instrument_from_metadata,
make_bar_type,
df_to_bars,
extras_lookup,
)
# --- Engine config ---
engine_config = BacktestEngineConfig(
trader_id="DEXORDER-BACKTEST-001",
logging=LoggingConfig(log_level="ERROR"),
)
engine = BacktestEngine(config=engine_config)
# --- Per-venue setup (unique venues from feeds) ---
venues_seen: set[str] = set()
all_bars: list = []
feed_keys: list[str] = []
instruments: dict[str, Any] = {}
price_precisions: dict[str, int] = {}
size_precisions: dict[str, int] = {}
for ticker, period_seconds in feeds:
feed_key = make_feed_key(ticker, period_seconds)
feed_keys.append(feed_key)
from dexorder.symbol_metadata_client import parse_ticker
exchange_id, _ = parse_ticker(ticker)
if exchange_id not in venues_seen:
venues_seen.add(exchange_id)
# Determine quote currency from ticker (e.g. USDT from BTC/USDT)
_, market_id = parse_ticker(ticker)
quote_str = market_id.split("/")[1] if "/" in market_id else "USDT"
from nautilus_trader.model.currencies import Currency
quote_currency = Currency.from_str(quote_str)
engine.add_venue(
venue=Venue(exchange_id),
oms_type=OmsType.NETTING,
account_type=AccountType.CASH,
base_currency=None,
starting_balances=[Money(initial_capital, quote_currency)],
fill_model=FillModel(),
)
# Instrument and bars
instrument, pp, sp = make_instrument_from_metadata(ticker)
instruments[feed_key] = instrument
price_precisions[feed_key] = pp
size_precisions[feed_key] = sp
engine.add_instrument(instrument)
df = ohlc_dfs.get(feed_key)
if df is not None and not df.empty:
bar_type = make_bar_type(ticker, period_seconds)
bars = df_to_bars(df, bar_type, pp, sp)
engine.add_data(bars)
all_bars.extend(bars)
else:
log.warning("No OHLC data for feed %s — strategy will receive no bars", feed_key)
if not all_bars:
return {
"total_return": 0.0, "sharpe_ratio": 0.0, "max_drawdown": 0.0,
"win_rate": 0.0, "trade_count": 0, "equity_curve": [],
}
# Sort combined bars by timestamp for metrics computation
all_bars.sort(key=lambda b: b.ts_event)
# --- Instantiate and configure strategy ---
strategy_config = PandasStrategyConfig(
strategy_id=f"{strategy_class.__name__}-001",
feed_keys=tuple(feed_keys),
initial_capital=initial_capital,
)
strategy = strategy_class(config=strategy_config)
# Inject OHLC+ extras before run
for feed_key, df in ohlc_dfs.items():
if df is not None and not df.empty:
strategy._inject_extras(feed_key, extras_lookup(df))
engine.add_strategy(strategy)
# --- Run ---
engine.run()
# --- Extract metrics ---
metrics = _compute_metrics(engine, list(venues_seen), initial_capital, all_bars)
engine.dispose()
return metrics

View File

@@ -0,0 +1,235 @@
"""
Data adapter — converts our OHLC DataFrames to Nautilus objects.
Functions
---------
make_instrument — CurrencyPair from ticker string
make_bar_type — BarType from ticker + period_seconds
df_to_bars — OHLC DataFrame → list[Bar]
extras_lookup — extract OHLC+ extras dict from DataFrame
make_instrument_from_metadata — instrument with best-effort precision
"""
from __future__ import annotations
import logging
from typing import Optional
import pandas as pd
from nautilus_trader.model.currencies import Currency
from nautilus_trader.model.data import Bar, BarType, BarSpecification
from nautilus_trader.model.enums import BarAggregation, PriceType, AggregationSource
from nautilus_trader.model.identifiers import InstrumentId, Symbol, Venue
from nautilus_trader.model.instruments import CurrencyPair
from nautilus_trader.model.objects import Price, Quantity
from dexorder.symbol_metadata_client import parse_ticker
from dexorder.nautilus.pandas_strategy import (
bar_type_from_feed_key,
_PERIOD_TO_AGGREGATION,
)
log = logging.getLogger(__name__)
# Columns in our OHLC+ DataFrames that are extras (not part of Nautilus Bar)
_EXTRA_COLS = ("buy_vol", "sell_vol", "open_interest")
def make_bar_type(ticker: str, period_seconds: int) -> BarType:
"""
Construct a Nautilus BarType from our ticker and period_seconds.
Period mapping:
period_seconds < 60 → SECOND, step = period_seconds
period_seconds < 3600 → MINUTE, step = period_seconds // 60
period_seconds < 86400 → HOUR, step = period_seconds // 3600
else → DAY, step = period_seconds // 86400
Price type = MID (standard for crypto OHLC).
Source = EXTERNAL (we supply pre-aggregated data, not Nautilus aggregation).
"""
exchange_id, market_id = parse_ticker(ticker)
instrument_id = InstrumentId(Symbol(market_id), Venue(exchange_id))
for threshold, agg, divisor in _PERIOD_TO_AGGREGATION:
if period_seconds < threshold:
step = max(1, period_seconds // divisor)
break
else:
agg = BarAggregation.DAY
step = max(1, period_seconds // 86400)
spec = BarSpecification(step=step, aggregation=agg, price_type=PriceType.MID)
return BarType(instrument_id=instrument_id, bar_spec=spec,
aggregation_source=AggregationSource.EXTERNAL)
def make_instrument(
ticker: str,
price_precision: int = 8,
size_precision: int = 8,
tick_size: Optional[float] = None,
lot_size: Optional[float] = None,
maker_fee: float = 0.001,
taker_fee: float = 0.001,
margin_init: float = 0.0,
margin_maint: float = 0.0,
) -> CurrencyPair:
"""
Create a minimal CurrencyPair instrument from a Nautilus-format ticker.
Args:
ticker: e.g. "BTC/USDT.BINANCE"
price_precision: decimal places for price (default 8)
size_precision: decimal places for quantity (default 8)
tick_size: minimum price increment (defaults to 10^-price_precision)
lot_size: minimum order size (defaults to 10^-size_precision)
maker_fee, taker_fee: fee rates as fractions (0.001 = 0.1%)
margin_init, margin_maint: margin ratios (0.0 = spot/no margin)
"""
exchange_id, market_id = parse_ticker(ticker)
base_str, quote_str = market_id.split("/")
instrument_id = InstrumentId(Symbol(market_id), Venue(exchange_id))
if tick_size is None:
tick_size = 10.0 ** (-price_precision)
if lot_size is None:
lot_size = 10.0 ** (-size_precision)
ts_now = 0 # static instrument — timestamp not relevant for backtesting
return CurrencyPair(
instrument_id=instrument_id,
raw_symbol=Symbol(market_id),
base_currency=Currency.from_str(base_str),
quote_currency=Currency.from_str(quote_str),
price_precision=price_precision,
size_precision=size_precision,
price_increment=Price(tick_size, price_precision),
size_increment=Quantity(lot_size, size_precision),
lot_size=Quantity(lot_size, size_precision),
max_quantity=None,
min_quantity=Quantity(lot_size, size_precision),
max_notional=None,
min_notional=None,
max_price=None,
min_price=None,
margin_init=margin_init,
margin_maint=margin_maint,
maker_fee=maker_fee,
taker_fee=taker_fee,
ts_event=ts_now,
ts_init=ts_now,
)
def make_instrument_from_metadata(ticker: str) -> tuple[CurrencyPair, int, int]:
"""
Create a CurrencyPair using SymbolMetadata when available.
Returns:
(instrument, price_precision, size_precision)
Falls back to (instrument with 8/8 defaults) if metadata is unavailable.
"""
try:
from dexorder.symbol_metadata_client import SymbolMetadataClient
from dexorder.api import get_api
# DataAPIImpl stores the catalog URI as an attribute on the OHLCClient
api = get_api()
ohlc_client = getattr(api.data, '_ohlc_client', None) or getattr(api.data, 'ohlc_client', None)
iceberg_client = getattr(ohlc_client, 'iceberg', None) if ohlc_client else None
catalog_uri = getattr(iceberg_client, 'catalog_uri', None) if iceberg_client else None
if catalog_uri:
meta_client = SymbolMetadataClient(catalog_uri=catalog_uri)
meta = meta_client.get_metadata(ticker)
pp = meta.price_precision or 8
sp = meta.size_precision or 8
instrument = make_instrument(
ticker,
price_precision=pp,
size_precision=sp,
tick_size=meta.tick_size,
lot_size=meta.lot_size,
maker_fee=meta.maker_fee or 0.001,
taker_fee=meta.taker_fee or 0.001,
margin_init=meta.margin_init or 0.0,
margin_maint=meta.margin_maint or 0.0,
)
return instrument, pp, sp
except Exception:
log.debug("make_instrument_from_metadata: metadata unavailable for %s, using defaults", ticker)
instrument = make_instrument(ticker)
return instrument, 8, 8
def df_to_bars(
df: pd.DataFrame,
bar_type: BarType,
price_precision: int = 8,
size_precision: int = 8,
) -> list[Bar]:
"""
Convert an OHLC DataFrame to a list of Nautilus Bar objects.
Args:
df: DataFrame with columns [timestamp (ns), open, high, low, close].
volume column is optional; defaults to 0.0 if absent.
bar_type: BarType to tag each bar with.
price_precision: decimal precision for Price construction.
size_precision: decimal precision for Quantity construction.
Returns:
list[Bar] sorted ascending by timestamp.
"""
has_volume = "volume" in df.columns
bars = []
for row in df.itertuples(index=False):
ts_ns = int(row.timestamp)
volume = float(row.volume) if has_volume else 0.0
bar = Bar(
bar_type=bar_type,
open=Price(float(row.open), price_precision),
high=Price(float(row.high), price_precision),
low=Price(float(row.low), price_precision),
close=Price(float(row.close), price_precision),
volume=Quantity(volume, size_precision),
ts_event=ts_ns,
ts_init=ts_ns,
)
bars.append(bar)
return bars
def extras_lookup(df: pd.DataFrame) -> dict[int, dict]:
"""
Build a {ts_event_ns → {buy_vol, sell_vol, open_interest}} mapping.
Values are None for columns absent from the DataFrame.
Used by PandasStrategy._inject_extras() to enrich each bar with
OHLC+ fields that Nautilus Bar does not carry natively.
"""
result: dict[int, dict] = {}
present = {col: col in df.columns for col in _EXTRA_COLS}
for row in df.itertuples(index=False):
ts_ns = int(row.timestamp)
entry: dict = {}
for col in _EXTRA_COLS:
if present[col]:
val = getattr(row, col)
entry[col] = None if (val is None or (isinstance(val, float) and pd.isna(val))) else float(val)
else:
entry[col] = None
result[ts_ns] = entry
return result

View File

@@ -0,0 +1,315 @@
"""
PandasStrategy — Nautilus Strategy base class with a DataFrame-oriented API.
Quants subclass PandasStrategy and implement evaluate(dfs) — the same function
they'd write in a research notebook. No Nautilus objects appear in quant code.
Features:
- Multiple data feeds: subscribe to N (ticker, period_seconds) pairs
- evaluate(dfs) receives a dict[feed_key, DataFrame] where feed_key is "TICKER:period_seconds"
- Every feed's DataFrame includes OHLC + volume, buy_vol, sell_vol, open_interest
- Timer hook (on_timer) reserved as extension point — TBD
Feed key format: "BTC/USDT.BINANCE:3600"
"""
from __future__ import annotations
import logging
from abc import abstractmethod
from typing import TYPE_CHECKING
import pandas as pd
from nautilus_trader.config import StrategyConfig
from nautilus_trader.model.data import Bar, BarType, BarSpecification
from nautilus_trader.model.enums import BarAggregation, PriceType, AggregationSource, OrderSide, TimeInForce
from nautilus_trader.model.identifiers import InstrumentId, Symbol, Venue
from nautilus_trader.model.objects import Quantity
from nautilus_trader.trading.strategy import Strategy
from dexorder.symbol_metadata_client import parse_ticker
log = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Feed key helpers
# ---------------------------------------------------------------------------
_PERIOD_TO_AGGREGATION: list[tuple[int, BarAggregation, int]] = [
# (threshold_exclusive, aggregation, divisor)
# period_seconds < 60 → SECOND, step = period_seconds
(60, BarAggregation.SECOND, 1),
# 60 <= period_seconds < 3600 → MINUTE
(3600, BarAggregation.MINUTE, 60),
# 3600 <= period_seconds < 86400 → HOUR
(86400, BarAggregation.HOUR, 3600),
]
_AGG_TO_SECONDS: dict[BarAggregation, int] = {
BarAggregation.SECOND: 1,
BarAggregation.MINUTE: 60,
BarAggregation.HOUR: 3600,
BarAggregation.DAY: 86400,
}
def make_feed_key(ticker: str, period_seconds: int) -> str:
"""Return canonical feed key, e.g. 'BTC/USDT.BINANCE:3600'."""
return f"{ticker}:{period_seconds}"
def parse_feed_key(feed_key: str) -> tuple[str, int]:
"""Split 'BTC/USDT.BINANCE:3600' → ('BTC/USDT.BINANCE', 3600)."""
ticker, period_str = feed_key.rsplit(":", 1)
return ticker, int(period_str)
def bar_type_from_feed_key(feed_key: str) -> BarType:
"""Build a Nautilus BarType from a feed key string."""
ticker, period_seconds = parse_feed_key(feed_key)
exchange_id, market_id = parse_ticker(ticker)
instrument_id = InstrumentId(Symbol(market_id), Venue(exchange_id))
for threshold, agg, divisor in _PERIOD_TO_AGGREGATION:
if period_seconds < threshold:
step = period_seconds // divisor
break
else:
agg = BarAggregation.DAY
step = period_seconds // 86400
spec = BarSpecification(step=step, aggregation=agg, price_type=PriceType.MID)
return BarType(instrument_id=instrument_id, bar_spec=spec,
aggregation_source=AggregationSource.EXTERNAL)
def feed_key_from_bar_type(bar_type: BarType) -> str:
"""Reconstruct the feed key from a BarType."""
iid = bar_type.instrument_id
ticker = f"{iid.symbol}.{iid.venue}"
multiplier = _AGG_TO_SECONDS.get(bar_type.spec.aggregation, 1)
period_seconds = bar_type.spec.step * multiplier
return f"{ticker}:{period_seconds}"
# ---------------------------------------------------------------------------
# Config
# ---------------------------------------------------------------------------
class PandasStrategyConfig(StrategyConfig, frozen=True):
"""
Configuration for PandasStrategy.
feed_keys: tuple of feed key strings, e.g. ("BTC/USDT.BINANCE:3600",)
Set by the backtest/activate runner — not by the quant's code.
initial_capital: informational; actual account balance is set in BacktestEngine.
"""
feed_keys: tuple[str, ...] = ()
initial_capital: float = 10_000.0
# ---------------------------------------------------------------------------
# Base class
# ---------------------------------------------------------------------------
class PandasStrategy(Strategy):
"""
Base class for quant strategies.
Quants implement evaluate(dfs) — the same function they'd write in a research
notebook. All bar accumulation, OHLC+ field injection, and DataFrame management
is handled internally.
Example
-------
::
from dexorder.nautilus import PandasStrategy
import pandas as pd
import pandas_ta as ta
class MyStrategy(PandasStrategy):
def evaluate(self, dfs):
df = dfs.get("BTC/USDT.BINANCE:3600")
if df is None or len(df) < 14:
return
rsi = ta.rsi(df["close"], length=14)
if rsi.iloc[-1] < 30:
self.buy(0.01)
elif rsi.iloc[-1] > 70:
self.sell(0.01)
"""
def __init__(self, config: PandasStrategyConfig) -> None:
super().__init__(config)
# Per-feed row accumulator
self._rows: dict[str, list[dict]] = {}
# Per-feed DataFrame (updated after each bar)
self._dfs: dict[str, pd.DataFrame] = {}
# Per-feed extras lookup: {ts_event_ns: {buy_vol, sell_vol, open_interest}}
self._extras: dict[str, dict[int, dict]] = {}
# Resolved BarType objects (populated in on_start)
self._bar_types: dict[str, BarType] = {}
# ------------------------------------------------------------------
# Nautilus lifecycle
# ------------------------------------------------------------------
def on_start(self) -> None:
for feed_key in self.config.feed_keys:
bar_type = bar_type_from_feed_key(feed_key)
self._bar_types[feed_key] = bar_type
self.subscribe_bars(bar_type)
def on_bar(self, bar: Bar) -> None:
feed_key = feed_key_from_bar_type(bar.bar_type)
ts_ns = bar.ts_event
# Merge OHLC+ extras (buy_vol, sell_vol, open_interest) by timestamp
extras = self._extras.get(feed_key, {}).get(ts_ns, {})
row = {
"timestamp": ts_ns,
"open": float(bar.open),
"high": float(bar.high),
"low": float(bar.low),
"close": float(bar.close),
"volume": float(bar.volume),
"buy_vol": extras.get("buy_vol"),
"sell_vol": extras.get("sell_vol"),
"open_interest": extras.get("open_interest"),
}
if feed_key not in self._rows:
self._rows[feed_key] = []
self._rows[feed_key].append(row)
self._dfs[feed_key] = pd.DataFrame(self._rows[feed_key])
self.evaluate(self._dfs)
def on_stop(self) -> None:
pass
# ------------------------------------------------------------------
# Quant API — override in subclass
# ------------------------------------------------------------------
@abstractmethod
def evaluate(self, dfs: dict[str, pd.DataFrame]) -> None:
"""
Implement your strategy logic here.
Called after every new bar on any subscribed feed.
Args:
dfs: Dict mapping feed_key → DataFrame.
Feed key format: "TICKER:period_seconds", e.g. "BTC/USDT.BINANCE:3600".
DataFrame columns: timestamp (ns), open, high, low, close, volume,
buy_vol, sell_vol, open_interest.
All rows up to and including the latest bar are included.
A feed's DataFrame is absent (key missing) until its first bar arrives.
Trading methods available:
self.buy(quantity, feed_key=None) — market buy
self.sell(quantity, feed_key=None) — market sell
self.flatten(feed_key=None) — close all positions for feed
"""
def on_timer(self, timer_name: str) -> None:
"""
Called on timer ticks (TBD — timer wiring not yet implemented).
Override to handle time-based evaluation independent of bar arrival.
Default implementation calls evaluate() with current DataFrames.
"""
self.evaluate(self._dfs)
# ------------------------------------------------------------------
# Order helpers
# ------------------------------------------------------------------
def _resolve_feed_key(self, feed_key: str | None) -> str | None:
"""Return feed_key if given, else the first configured feed key."""
if feed_key is not None:
return feed_key
keys = self.config.feed_keys
return keys[0] if keys else None
def _instrument_id_for_feed(self, feed_key: str) -> InstrumentId | None:
ticker, _ = parse_feed_key(feed_key)
try:
exchange_id, market_id = parse_ticker(ticker)
return InstrumentId(Symbol(market_id), Venue(exchange_id))
except ValueError:
log.error("Cannot parse ticker from feed key: %s", feed_key)
return None
def buy(self, quantity: float, feed_key: str | None = None) -> None:
"""Submit a market buy order. Defaults to the first configured feed."""
fk = self._resolve_feed_key(feed_key)
if not fk:
log.error("buy(): no feed key available")
return
instrument_id = self._instrument_id_for_feed(fk)
if instrument_id is None:
return
instrument = self.cache.instrument(instrument_id)
if instrument is None:
log.error("buy(): instrument not found for %s", instrument_id)
return
order = self.order_factory.market(
instrument_id=instrument_id,
order_side=OrderSide.BUY,
quantity=Quantity(quantity, instrument.size_precision),
time_in_force=TimeInForce.GTC,
)
self.submit_order(order)
def sell(self, quantity: float, feed_key: str | None = None) -> None:
"""Submit a market sell order. Defaults to the first configured feed."""
fk = self._resolve_feed_key(feed_key)
if not fk:
log.error("sell(): no feed key available")
return
instrument_id = self._instrument_id_for_feed(fk)
if instrument_id is None:
return
instrument = self.cache.instrument(instrument_id)
if instrument is None:
log.error("sell(): instrument not found for %s", instrument_id)
return
order = self.order_factory.market(
instrument_id=instrument_id,
order_side=OrderSide.SELL,
quantity=Quantity(quantity, instrument.size_precision),
time_in_force=TimeInForce.GTC,
)
self.submit_order(order)
def flatten(self, feed_key: str | None = None) -> None:
"""Close all open positions for the specified feed (defaults to first feed)."""
fk = self._resolve_feed_key(feed_key)
if not fk:
return
instrument_id = self._instrument_id_for_feed(fk)
if instrument_id is None:
return
positions = self.cache.positions_open(instrument_id=instrument_id)
for pos in positions:
self.close_position(pos)
# ------------------------------------------------------------------
# Runner API — called by backtest_runner, not by quant code
# ------------------------------------------------------------------
def _inject_extras(self, feed_key: str, extras: dict[int, dict]) -> None:
"""
Pre-load OHLC+ extras for a feed before the backtest runs.
Args:
feed_key: e.g. "BTC/USDT.BINANCE:3600"
extras: {ts_event_ns: {"buy_vol": float|None, "sell_vol": float|None,
"open_interest": float|None}}
"""
self._extras[feed_key] = extras

View File

@@ -141,39 +141,6 @@ class OHLCClient:
# Step 5: Query Iceberg again for complete dataset # Step 5: Query Iceberg again for complete dataset
df = self.iceberg.query_ohlc(ticker, period_seconds, start_time, end_time) df = self.iceberg.query_ohlc(ticker, period_seconds, start_time, end_time)
return self._forward_fill_gaps(df, period_seconds)
def _forward_fill_gaps(self, df: pd.DataFrame, period_seconds: int) -> pd.DataFrame:
"""
Forward-fill interior missing bars by carrying the last known close into
open, high, low, and close of any gap bar.
Only interior gaps (rows already present with null OHLC from the ingestor,
or timestamp slots missing between real bars) are filled. Edge gaps (before
the first real bar or after the last real bar) are left as-is.
"""
if df.empty:
return df
df = df.sort_index()
# Identify rows that are gap bars (null close)
is_gap = df['close'].isna()
if not is_gap.any():
return df
# Forward-fill close across gap rows, then copy into open/high/low
df['close'] = df['close'].ffill()
price_cols = ['open', 'high', 'low']
for col in price_cols:
if col in df.columns:
df[col] = df[col].where(~is_gap, df['close'])
# Zero out volume for filled gap rows
if 'volume' in df.columns:
df['volume'] = df['volume'].where(~is_gap, 0.0)
return df return df
async def __aenter__(self): async def __aenter__(self):

View File

@@ -0,0 +1,71 @@
"""
User Secrets Vault
Stores user-owned API keys for live exchange execution. Secured with the user's
password — Dexorder cannot read these secrets. This is entirely separate from
secrets.yaml, which holds Dexorder infrastructure credentials (Iceberg, MinIO, etc.).
Currently a stub — raises NotImplementedError on all calls. Will be backed by
a user-local encrypted store in a future iteration.
"""
class SecretsVault:
"""
Interface for the user secrets vault.
The vault is secured with the user's own password; the Dexorder platform
cannot decrypt or access its contents. This distinguishes it from the
system-level secrets.yaml, which stores infrastructure credentials managed
by Dexorder operators.
Typical keys stored here:
"BINANCE_API_KEY", "BINANCE_API_SECRET"
"COINBASE_API_KEY", "COINBASE_API_SECRET"
etc.
"""
def get_secret(self, key: str) -> str:
"""
Retrieve a user secret by key.
Args:
key: Identifier for the secret, e.g. "BINANCE_API_KEY"
Returns:
The secret value as a string.
Raises:
NotImplementedError: Always — vault not yet implemented.
"""
raise NotImplementedError(
"User secrets vault is not yet implemented. "
"Live execution API key management is a future feature."
)
def set_secret(self, key: str, value: str) -> None:
"""
Store a secret in the vault.
Args:
key: Identifier for the secret
value: Secret value to store (stored encrypted with user's password)
Raises:
NotImplementedError: Always — vault not yet implemented.
"""
raise NotImplementedError(
"User secrets vault is not yet implemented. "
"Live execution API key management is a future feature."
)
def delete_secret(self, key: str) -> None:
"""
Remove a secret from the vault.
Raises:
NotImplementedError: Always — vault not yet implemented.
"""
raise NotImplementedError(
"User secrets vault is not yet implemented."
)

View File

@@ -0,0 +1,173 @@
"""
activate_strategy / deactivate_strategy — start and stop live or paper trading.
paper=True (default): forward paper trading — strategy runs on live data with
simulated fills. No API keys required.
paper=False: live trading — real order execution via user's exchange API keys,
retrieved from the user secrets vault. Currently raises
NotImplementedError until the vault is implemented.
Full live-data feed streaming for forward testing is TBD (requires a live bar
source). This module establishes the interface and stubs the runtime loop.
"""
import json
import logging
from typing import Any
log = logging.getLogger(__name__)
# Registry of active strategies: {strategy_name → runtime state dict}
# In a future implementation this will hold live strategy runners.
_active_strategies: dict[str, dict] = {}
async def activate_strategy(
strategy_name: str,
feeds: list[dict],
allocation: float,
paper: bool = True,
) -> list:
"""
Activate a strategy for live or paper forward trading.
Args:
strategy_name: Display name as saved via python_write("strategy", ...)
feeds: List of feed dicts, e.g. [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
allocation: Capital allocated in quote currency (e.g. 5000.0 USDT)
paper: True = paper/simulated fills (default); False = live execution
Returns:
list[TextContent] with JSON:
{"status": "activated", "strategy_name": str, "paper": bool, "allocation": float}
On error:
{"error": str}
"""
from mcp.types import TextContent
def _err(msg: str) -> list:
log.error("activate_strategy '%s': %s", strategy_name, msg)
return [TextContent(type="text", text=json.dumps({"error": msg}))]
if strategy_name in _active_strategies:
return _err(
f"Strategy '{strategy_name}' is already active. "
"Call deactivate_strategy first."
)
if not paper:
# Live execution requires the user secrets vault for API keys.
# The vault is not yet implemented.
try:
from dexorder.secrets_vault import SecretsVault
_vault = SecretsVault()
_vault.get_secret("__probe__") # will raise NotImplementedError
except NotImplementedError:
return _err(
"Live trading (paper=False) requires the user secrets vault, "
"which is not yet implemented. Use paper=True for paper forward testing."
)
# Validate feeds
if not feeds:
return _err("feeds list is empty")
parsed_feeds: list[tuple[str, int]] = []
for f in feeds:
sym = f.get("symbol", "")
ps = f.get("period_seconds", 3600)
if not sym:
return _err(f"Feed entry missing 'symbol': {f}")
parsed_feeds.append((sym, int(ps)))
# TODO: Full implementation — start a live/paper trading loop:
# 1. Load strategy class from category files
# 2. Set up custom indicators via _setup_custom_indicators()
# 3. Subscribe to live bar stream for each feed
# 4. Initialize paper account (Nautilus SimulatedExchange) or live account
# 5. Run strategy event loop (on_bar → evaluate → submit orders)
# This requires a live data feed adapter (TBD).
log.info(
"activate_strategy: registering '%s' (paper=%s, allocation=%.2f) — "
"live feed loop is TBD",
strategy_name, paper, allocation,
)
_active_strategies[strategy_name] = {
"strategy_name": strategy_name,
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
"allocation": allocation,
"paper": paper,
"status": "registered",
"pnl": 0.0,
}
payload = {
"status": "activated",
"strategy_name": strategy_name,
"paper": paper,
"allocation": allocation,
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
"note": (
"Strategy registered. Live data feed streaming is not yet implemented — "
"forward trading will begin when the live feed adapter is available."
),
}
return [TextContent(type="text", text=json.dumps(payload))]
async def deactivate_strategy(strategy_name: str) -> list:
"""
Deactivate a running strategy and return its final P&L summary.
Args:
strategy_name: Display name of the active strategy
Returns:
list[TextContent] with JSON:
{"status": "deactivated", "strategy_name": str, "final_pnl": float}
On error:
{"error": str}
"""
from mcp.types import TextContent
def _err(msg: str) -> list:
log.error("deactivate_strategy '%s': %s", strategy_name, msg)
return [TextContent(type="text", text=json.dumps({"error": msg}))]
if strategy_name not in _active_strategies:
return _err(f"Strategy '{strategy_name}' is not active")
state = _active_strategies.pop(strategy_name)
# TODO: Stop the live feed loop and collect final P&L from the running engine.
final_pnl = state.get("pnl", 0.0)
log.info("deactivate_strategy: stopped '%s', final_pnl=%.4f", strategy_name, final_pnl)
payload = {
"status": "deactivated",
"strategy_name": strategy_name,
"final_pnl": final_pnl,
}
return [TextContent(type="text", text=json.dumps(payload))]
async def list_active_strategies() -> list:
"""
Return a list of currently active strategies and their status.
Returns:
list[TextContent] with JSON:
{"active_strategies": [{strategy_name, paper, allocation, feeds, pnl}, ...]}
"""
from mcp.types import TextContent
payload = {
"active_strategies": list(_active_strategies.values()),
}
return [TextContent(type="text", text=json.dumps(payload))]

View File

@@ -0,0 +1,163 @@
"""
backtest_strategy — run a PandasStrategy against historical OHLC data.
Called directly from the MCP server's async handle_tool_call.
Returns a JSON payload with backtest metrics and equity curve, following the
same pattern as evaluate_indicator.py.
"""
import json
import logging
from pathlib import Path
from typing import Any
log = logging.getLogger(__name__)
# All OHLC+ columns to request from the DataAPI
_OHLC_EXTRA_COLUMNS = ["volume", "buy_vol", "sell_vol", "open_interest"]
async def backtest_strategy(
strategy_name: str,
feeds: list[dict],
from_time: Any,
to_time: Any,
initial_capital: float = 10_000.0,
paper: bool = True,
) -> list:
"""
Load a saved strategy, fetch OHLC+ data for each feed, and run a backtest.
Args:
strategy_name: Display name as saved via python_write("strategy", ...)
feeds: List of feed dicts, e.g. [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
from_time: Backtest start (Unix timestamp or date string)
to_time: Backtest end (Unix timestamp or date string)
initial_capital: Starting balance in quote currency (default 10,000)
paper: Always True for historical backtest (flag reserved for forward testing)
Returns:
list[TextContent] with JSON payload:
{
"strategy_name": str,
"feeds": [...],
"initial_capital": float,
"paper": bool,
"total_candles": int,
"total_return": float, # fractional (0.15 = +15%)
"sharpe_ratio": float,
"max_drawdown": float, # fractional (0.10 = 10% drawdown)
"win_rate": float,
"trade_count": int,
"equity_curve": [{"timestamp": int, "equity": float}, ...]
}
On error:
{"error": str}
"""
from mcp.types import TextContent
def _err(msg: str) -> list:
log.error("backtest_strategy '%s': %s", strategy_name, msg)
return [TextContent(type="text", text=json.dumps({"error": msg}))]
# --- 1. Validate feeds input ---
if not feeds:
return _err("feeds list is empty — provide at least one {symbol, period_seconds} entry")
parsed_feeds: list[tuple[str, int]] = []
for f in feeds:
sym = f.get("symbol", "")
ps = f.get("period_seconds", 3600)
if not sym:
return _err(f"Feed entry missing 'symbol': {f}")
parsed_feeds.append((sym, int(ps)))
# --- 2. Resolve strategy implementation file ---
try:
from dexorder.tools.python_tools import get_category_manager, sanitize_name
category_manager = get_category_manager()
safe_name = sanitize_name(strategy_name)
impl_path = category_manager.src_dir / "strategy" / safe_name / "implementation.py"
if not impl_path.exists():
return _err(f"Strategy '{strategy_name}' not found (looked at {impl_path})")
except Exception as exc:
return _err(f"Failed to locate strategy: {exc}")
# --- 3. Register custom indicators with pandas-ta ---
try:
from dexorder.nautilus.backtest_runner import _setup_custom_indicators
_setup_custom_indicators(category_manager.src_dir)
except Exception as exc:
log.warning("backtest_strategy: custom indicator setup failed: %s", exc)
# --- 4. Load strategy class ---
try:
from dexorder.nautilus.backtest_runner import _load_strategy_class
strategy_class = _load_strategy_class(impl_path)
except Exception as exc:
log.exception("backtest_strategy: strategy load failed")
return _err(f"Strategy load failed: {exc}")
# --- 5. Fetch OHLC+ data for each feed ---
try:
from dexorder.api import get_api
api = get_api()
except Exception as exc:
return _err(f"API not available: {exc}")
ohlc_dfs: dict[str, Any] = {}
total_candles = 0
for ticker, period_seconds in parsed_feeds:
from dexorder.nautilus.pandas_strategy import make_feed_key
feed_key = make_feed_key(ticker, period_seconds)
try:
df = await api.data.historical_ohlc(
ticker=ticker,
period_seconds=period_seconds,
start_time=from_time,
end_time=to_time,
extra_columns=_OHLC_EXTRA_COLUMNS,
)
except Exception as exc:
log.exception("backtest_strategy: OHLC fetch failed for %s", feed_key)
return _err(f"OHLC fetch failed for {feed_key}: {exc}")
if df.empty:
return _err(f"No OHLC data for {feed_key} in the requested range")
ohlc_dfs[feed_key] = df
total_candles += len(df)
# --- 6. Run backtest in thread executor (BacktestEngine is synchronous) ---
try:
import asyncio
from dexorder.nautilus.backtest_runner import run_backtest
loop = asyncio.get_event_loop()
metrics = await loop.run_in_executor(
None,
lambda: run_backtest(
strategy_class=strategy_class,
feeds=parsed_feeds,
ohlc_dfs=ohlc_dfs,
initial_capital=initial_capital,
paper=paper,
),
)
except Exception as exc:
log.exception("backtest_strategy: backtest run failed")
return _err(f"Backtest failed: {exc}")
# --- 7. Return results ---
payload = {
"strategy_name": strategy_name,
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
"initial_capital": initial_capital,
"paper": paper,
"total_candles": total_candles,
**metrics,
}
return [TextContent(type="text", text=json.dumps(payload))]

View File

@@ -0,0 +1,243 @@
"""
evaluate_indicator — runs a pandas-ta (or custom) indicator against real OHLC data.
Called directly from the MCP server's async handle_tool_call, so it can await
the DataAPI without subprocess overhead.
Returns a JSON object with a `values` array of {timestamp, ...} records, where
timestamp is a Unix second integer and value fields hold floats (or null for NaN).
"""
import json
import logging
from pathlib import Path
from typing import Any
import pandas as pd
log = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Input routing — which series each pandas-ta function expects
# ---------------------------------------------------------------------------
# Maps pandas_ta_name → tuple of column names from the OHLCV dataframe
# Columns available: open, high, low, close, volume
# "volume" is fetched via extra_columns=["volume"]
_INPUTS: dict[str, tuple[str, ...]] = {
# Close only
"sma": ("close",),
"ema": ("close",),
"wma": ("close",),
"dema": ("close",),
"tema": ("close",),
"trima": ("close",),
"kama": ("close",),
"t3": ("close",),
"hma": ("close",),
"alma": ("close",),
"midpoint": ("close",),
"rsi": ("close",),
"macd": ("close",),
"mom": ("close",),
"roc": ("close",),
"trix": ("close",),
"cmo": ("close",),
"ao": ("high", "low"), # ao uses midprice (high, low)
"apo": ("close",),
"coppock": ("close",),
"dpo": ("close",),
"fisher": ("high", "low"),
"rvgi": ("open", "high", "low", "close"),
"kst": ("close",),
"stdev": ("close",),
"linreg": ("close",),
"slope": ("close",),
"vwma": ("close", "volume"),
"obv": ("close", "volume"),
"pvt": ("close", "volume"),
"efi": ("close", "volume"),
# High + Low
"hl2": ("high", "low"),
"midprice": ("high", "low"),
# High + Low + Close
"hlc3": ("high", "low", "close"),
"atr": ("high", "low", "close"),
"kc": ("high", "low", "close"),
"donchian": ("high", "low", "close"),
"stoch": ("high", "low", "close"),
"stochrsi": ("high", "low", "close"),
"cci": ("high", "low", "close"),
"willr": ("high", "low", "close"),
"adx": ("high", "low", "close"),
"aroon": ("high", "low", "close"),
"uo": ("high", "low", "close"),
"psar": ("high", "low", "close"),
"vortex": ("high", "low", "close"),
"chop": ("high", "low", "close"),
"supertrend": ("high", "low", "close"),
"ichimoku": ("high", "low", "close"),
# Open + High + Low + Close
"ohlc4": ("open", "high", "low", "close"),
"bop": ("open", "high", "low", "close"),
# High + Low + Close + Volume
"mfi": ("high", "low", "close", "volume"),
"ad": ("high", "low", "close", "volume"),
"adosc": ("high", "low", "close", "volume"),
"cmf": ("high", "low", "close", "volume"),
"eom": ("high", "low", "close", "volume"),
"kvo": ("high", "low", "close", "volume"),
# VWAP needs datetime index — handled specially
"vwap": ("high", "low", "close", "volume"),
}
_NEEDS_VOLUME = {name for name, cols in _INPUTS.items() if "volume" in cols}
async def evaluate_indicator(
symbol: str,
from_time: Any,
to_time: Any,
period_seconds: int,
pandas_ta_name: str,
parameters: dict,
) -> list:
"""
Fetch OHLC data and evaluate a pandas-ta indicator.
Returns a list containing a single MCP TextContent with JSON:
{
"symbol": ...,
"period_seconds": ...,
"pandas_ta_name": ...,
"parameters": {...},
"candle_count": N,
"columns": ["timestamp", "value"] or ["timestamp", "col1", "col2", ...],
"values": [{"timestamp": <unix_s>, "value": <float|null>}, ...]
}
"""
from mcp.types import TextContent
try:
import pandas_ta as ta
except ImportError:
return [TextContent(type="text", text=json.dumps({"error": "pandas_ta not installed"}))]
name_lower = pandas_ta_name.lower()
# For custom indicators, register them with pandas-ta first, then resolve
# input columns from their stored metadata.
if name_lower.startswith("custom_"):
import os
from dexorder.tools.python_tools import setup_custom_indicators, get_category_manager
setup_custom_indicators(Path(os.environ.get("DATA_DIR", "data")))
fn = getattr(ta, name_lower, None)
if fn is None:
return [TextContent(type="text", text=json.dumps({
"error": (
f"Custom indicator '{pandas_ta_name}' not found after registering "
"custom indicators. Make sure the indicator was created with "
"python_write(category='indicator', name='...') and that its "
"implementation.py defines a function matching the sanitized name."
)
}))]
# Get input_series from the indicator's metadata
indicator_name = pandas_ta_name[len("custom_"):]
mgr = get_category_manager()
read_result = mgr.read("indicator", indicator_name)
if read_result.get("exists") and read_result.get("metadata"):
raw_series = read_result["metadata"].get("input_series") or ["close"]
input_cols = tuple(raw_series)
else:
input_cols = ("close",)
else:
# Look up the pandas-ta function for built-in indicators
fn = getattr(ta, name_lower, None)
if fn is None:
return [TextContent(type="text", text=json.dumps({
"error": f"Unknown indicator '{pandas_ta_name}'. Check pandas_ta_name against the supported list."
}))]
# Determine required columns
input_cols = _INPUTS.get(name_lower, ("close",))
needs_volume = "volume" in input_cols
# Fetch OHLC
try:
from dexorder.api import get_api
api = get_api()
df = await api.data.historical_ohlc(
ticker=symbol,
period_seconds=period_seconds,
start_time=from_time,
end_time=to_time,
extra_columns=["volume"] if needs_volume else [],
)
except Exception as exc:
log.exception("evaluate_indicator: OHLC fetch failed")
return [TextContent(type="text", text=json.dumps({"error": f"OHLC fetch failed: {exc}"}))]
if df.empty:
return [TextContent(type="text", text=json.dumps({
"error": f"No OHLC data for {symbol} in the requested range"
}))]
# VWAP already requires a DatetimeIndex — the OHLC df index is already a
# DatetimeIndex, so no extra work needed here.
# Build positional args
args = []
for col in input_cols:
if col not in df.columns:
return [TextContent(type="text", text=json.dumps({
"error": f"Column '{col}' not in fetched dataframe (columns: {list(df.columns)})"
}))]
args.append(df[col])
# Compute
try:
result = fn(*args, **parameters)
except Exception as exc:
log.exception("evaluate_indicator: computation failed")
return [TextContent(type="text", text=json.dumps({
"error": f"Indicator computation failed: {exc}"
}))]
# Convert DatetimeIndex → Unix seconds
timestamps = (df.index.astype("int64") // 1_000_000_000).tolist()
# Serialize output
if isinstance(result, pd.DataFrame):
columns = ["timestamp"] + list(result.columns)
values = []
for i, ts in enumerate(timestamps):
row: dict[str, Any] = {"timestamp": int(ts)}
for col in result.columns:
v = result.iloc[i][col]
row[col] = None if (isinstance(v, float) and pd.isna(v)) else float(v)
values.append(row)
elif isinstance(result, pd.Series):
columns = ["timestamp", "value"]
values = [
{"timestamp": int(ts), "value": None if pd.isna(v) else float(v)}
for ts, v in zip(timestamps, result.tolist())
]
else:
return [TextContent(type="text", text=json.dumps({
"error": f"Unexpected indicator output type: {type(result).__name__}"
}))]
payload = {
"symbol": symbol,
"period_seconds": period_seconds,
"pandas_ta_name": pandas_ta_name,
"parameters": parameters,
"candle_count": len(df),
"columns": columns,
"values": values,
}
return [TextContent(type="text", text=json.dumps(payload))]

View File

@@ -0,0 +1,182 @@
#!/usr/bin/env python3
"""
Indicator harness — tests a custom indicator against synthetic OHLC data.
Runs in a subprocess so the indicator code is isolated from the MCP server process.
Usage: python indicator_harness.py <impl_path> <metadata_path>
Outputs JSON to stdout:
{
"success": bool,
"output": str, # human-readable summary of the indicator output
"error": str # error message / traceback if failed (null on success)
}
"""
import importlib.util
import json
import os
import sys
import traceback
import types
from pathlib import Path
# Ensure dexorder package is importable (same as research_harness.py)
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
# ---------------------------------------------------------------------------
# Synthetic OHLCV data — 200 deterministic bars, no network required
# ---------------------------------------------------------------------------
def make_synthetic_ohlcv(n: int = 200):
import numpy as np
import pandas as pd
rng = np.random.default_rng(42)
# Realistic BTC-style price random walk
returns = rng.normal(0, 0.015, n)
closes = 40_000.0 * np.cumprod(1.0 + returns)
opens = np.empty(n)
opens[0] = closes[0]
opens[1:] = closes[:-1] # open = previous close
noise = np.abs(rng.normal(0, 0.005, n))
highs = np.maximum(opens, closes) * (1.0 + noise)
lows = np.minimum(opens, closes) * (1.0 - noise)
volumes = rng.uniform(1e6, 1e8, n)
return pd.DataFrame({
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
})
def summarize(result, n: int) -> str:
import pandas as pd
if isinstance(result, pd.Series):
nan_count = int(result.isna().sum())
valid = result.dropna()
sample = [round(float(v), 4) for v in valid.tail(5).values] if len(valid) else []
return (
f"Series({n} bars), NaN: {nan_count}/{n}, "
f"last 5 valid values: {sample}"
)
elif isinstance(result, pd.DataFrame):
cols = list(result.columns)
nan_counts = {c: int(result[c].isna().sum()) for c in cols}
sample = {}
for col in cols:
valid = result[col].dropna()
if len(valid):
sample[col] = [round(float(v), 4) for v in valid.tail(3).values]
return (
f"DataFrame({n} bars × {len(cols)} cols {cols}), "
f"NaN counts: {nan_counts}, last 3 valid per col: {sample}"
)
else:
return f"Unexpected return type: {type(result).__name__}"
def main():
if len(sys.argv) < 3:
print(json.dumps({"success": False, "error": "Usage: indicator_harness.py <impl_path> <metadata_path>"}))
sys.exit(1)
impl_path = sys.argv[1]
metadata_path = sys.argv[2]
# --- Load metadata ---
input_series = ["close"]
parameters: dict = {}
try:
with open(metadata_path) as f:
meta = json.load(f)
input_series = meta.get("input_series") or ["close"]
param_schema = meta.get("parameters") or {}
for pname, pinfo in param_schema.items():
if isinstance(pinfo, dict) and "default" in pinfo:
parameters[pname] = pinfo["default"]
elif not isinstance(pinfo, dict):
# bare value (legacy)
parameters[pname] = pinfo
except Exception as e:
print(json.dumps({"success": False, "error": f"Failed to read metadata: {e}"}))
sys.exit(0)
# --- Generate synthetic data ---
try:
import numpy # noqa: F401 — verify numpy available
import pandas as pd
except ImportError as e:
print(json.dumps({"success": False, "error": f"Missing required package: {e}"}))
sys.exit(0)
df = make_synthetic_ohlcv(n=200)
n = len(df)
# --- Load implementation ---
try:
spec = importlib.util.spec_from_file_location("_indicator_impl", impl_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore[union-attr]
except Exception:
tb = traceback.format_exc()
print(json.dumps({"success": False, "error": f"Import failed:\n{tb}"}))
sys.exit(0)
# --- Find the indicator function ---
# Prefer a function whose name matches the sanitized directory name,
# fall back to the first public function in the module.
fn_name = os.path.basename(os.path.dirname(impl_path)).lower()
fn = getattr(module, fn_name, None)
if fn is None:
candidates = [
v for k, v in vars(module).items()
if isinstance(v, types.FunctionType) and not k.startswith("_")
]
fn = candidates[0] if candidates else None
if fn is None:
print(json.dumps({"success": False, "error": "No callable function found in implementation.py"}))
sys.exit(0)
# --- Build positional args from input_series ---
args = []
for col in input_series:
if col not in df.columns:
print(json.dumps({"success": False, "error": f"input_series '{col}' not in synthetic df columns {list(df.columns)}"}))
sys.exit(0)
args.append(df[col])
# --- Execute ---
try:
result = fn(*args, **parameters)
except Exception:
tb = traceback.format_exc()
print(json.dumps({"success": False, "error": f"Execution failed:\n{tb}"}))
sys.exit(0)
# --- Validate output type ---
if not isinstance(result, (pd.Series, pd.DataFrame)):
print(json.dumps({
"success": False,
"error": (
f"Indicator must return pd.Series or pd.DataFrame, "
f"got {type(result).__name__}. "
"Wrap the output if using pandas-ta internally."
),
}))
sys.exit(0)
print(json.dumps({"success": True, "output": summarize(result, n)}))
if __name__ == "__main__":
main()

View File

@@ -30,8 +30,9 @@ from typing import Any, Optional
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
# Path to the research harness script (written to disk, not inline) # Path to the harness scripts (written to disk, not inline)
_RESEARCH_HARNESS = Path(__file__).parent / "research_harness.py" _RESEARCH_HARNESS = Path(__file__).parent / "research_harness.py"
_INDICATOR_HARNESS = Path(__file__).parent / "indicator_harness.py"
# Import conda manager for package installation # Import conda manager for package installation
try: try:
@@ -62,12 +63,15 @@ class BaseMetadata:
@dataclass @dataclass
class StrategyMetadata(BaseMetadata): class StrategyMetadata(BaseMetadata):
"""Metadata for trading strategies.""" """Metadata for trading strategies."""
data_feeds: list[str] = None # Required data feeds (e.g., ["BTC/USD", "ETH/USD"]) data_feeds: list[dict] = None # Required data feeds: [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600, "description": "..."}]
parameters: dict = None # Strategy parameters: {"param_name": {"default": value, "description": "..."}}
conda_packages: list[str] = None # Additional conda packages required conda_packages: list[str] = None # Additional conda packages required
def __post_init__(self): def __post_init__(self):
if self.data_feeds is None: if self.data_feeds is None:
self.data_feeds = [] self.data_feeds = []
if self.parameters is None:
self.parameters = {}
if self.conda_packages is None: if self.conda_packages is None:
self.conda_packages = [] self.conda_packages = []
@@ -75,12 +79,78 @@ class StrategyMetadata(BaseMetadata):
@dataclass @dataclass
class IndicatorMetadata(BaseMetadata): class IndicatorMetadata(BaseMetadata):
"""Metadata for technical indicators.""" """Metadata for technical indicators."""
default_length: int = 14 # Default period/length parameter
conda_packages: list[str] = None # Additional conda packages required conda_packages: list[str] = None # Additional conda packages required
# Fields for TradingView custom study auto-construction:
parameters: dict = None
# Parameter schema: {param_name: {type: "int"|"float"|"bool"|"string",
# default: value, description?: str, min?: num, max?: num}}
# Example: {"length": {"type": "int", "default": 14, "min": 1, "max": 500}}
input_series: list = None
# OHLCV columns the indicator function receives as positional args.
# Valid values: "open", "high", "low", "close", "volume"
# Example: ["close"] or ["high", "low", "close", "volume"]
output_columns: list = None
# Output series produced by the function. Each entry:
# {
# name: str, # column name (or "value" for plain Series)
# display_name?: str, # label shown in TV legend
# description?: str,
# plot?: {
# style: int, # LineStudyPlotStyle: 0=Line, 1=Histogram, 3=Dots/Cross,
# # 4=Area, 5=Columns, 6=Circles, 9=StepLine
# color?: str, # CSS hex e.g. "#2196F3" (auto-assigned if omitted)
# linewidth?: int, # 14 (default 2)
# visible?: bool # default true
# }
# }
# Example (single line): [{"name": "value", "display_name": "My Indicator"}]
# Example (multi-line): [{"name": "upper", "plot": {"style": 0}}, {"name": "lower", "plot": {"style": 0}}]
# Example (histogram): [{"name": "value", "plot": {"style": 1}}]
# Example (MACD-style): [{"name": "macd", "plot": {"style": 0}}, {"name": "signal", "plot": {"style": 0}}, {"name": "hist", "plot": {"style": 1}}]
pane: str = "separate"
# Where to render: "price" (overlaid on candles) or "separate" (sub-pane)
filled_areas: list = None
# Optional shaded regions between two plots or two bands. Each entry:
# {
# id: str, # unique id e.g. "fill_upper_lower"
# type: str, # "plot_plot" (between two series) or "hline_hline" (between two bands)
# series1: str, # output_column name (for plot_plot) or band id (for hline_hline)
# series2: str,
# color?: str, # CSS hex fill color (default semi-transparent blue)
# opacity?: float # 0.01.0 (default 0.1)
# }
# Example (Bollinger fill): [{"id": "fill", "type": "plot_plot", "series1": "upper", "series2": "lower", "color": "#2196F3", "opacity": 0.1}]
bands: list = None
# Optional horizontal reference lines (e.g. RSI overbought/oversold). Each entry:
# {
# id: str, # unique id e.g. "ob"
# value: float, # fixed y-level
# color?: str, # CSS hex (default "#787B86")
# linewidth?: int, # default 1
# linestyle?: int, # 0=solid, 1=dotted, 2=dashed (default 2)
# visible?: bool # default true
# }
# Example (RSI levels): [{"id": "ob", "value": 70}, {"id": "os", "value": 30}]
def __post_init__(self): def __post_init__(self):
if self.conda_packages is None: if self.conda_packages is None:
self.conda_packages = [] self.conda_packages = []
if self.input_series is None:
self.input_series = ["close"]
if self.output_columns is None:
self.output_columns = [{"name": "value"}]
if self.parameters is None:
self.parameters = {}
if self.filled_areas is None:
self.filled_areas = []
if self.bands is None:
self.bands = []
@dataclass @dataclass
@@ -141,21 +211,212 @@ def get_category_path(data_dir: Path, category: Category, name: str) -> Path:
return data_dir / category.value / safe_name return data_dir / category.value / safe_name
# =============================================================================
# Git Manager
# =============================================================================
class GitManager:
"""
Thin wrapper around git subprocess calls for category revision tracking.
All operations are non-fatal: errors are logged as warnings.
"""
def __init__(self, repo_dir: Path):
self.repo_dir = repo_dir
def _run(self, *args, check: bool = True) -> subprocess.CompletedProcess:
return subprocess.run(
["git"] + list(args),
cwd=self.repo_dir,
capture_output=True,
text=True,
check=check,
)
def ensure_init(self):
"""Init git repo if not exists; initial commit if files already present."""
if (self.repo_dir / ".git").exists():
return
self._run("init", "-b", "main")
self._run("config", "user.email", "sandbox@dexorder.ai")
self._run("config", "user.name", "Dexorder Sandbox")
# Commit any pre-existing files (migrated from old layout)
status = self._run("status", "--porcelain")
if status.stdout.strip():
self._run("add", "-A")
self._run("commit", "-m", "init: migrate existing category files")
log.info(f"Git repo initialized at {self.repo_dir}")
def commit(self, message: str) -> Optional[str]:
"""Stage all changes and commit. Returns short hash or None if nothing to commit / on error."""
try:
self._run("add", "-A")
status = self._run("status", "--porcelain")
if not status.stdout.strip():
return None # nothing changed
self._run("commit", "-m", message)
result = self._run("rev-parse", "--short", "HEAD")
return result.stdout.strip()
except Exception as e:
log.warning(f"Git commit failed (non-fatal): {e}")
return None
def log(self, path: Optional[Path] = None, n: int = 20) -> list[dict]:
"""Return recent commits, optionally filtered to a path."""
cmd = ["log", f"-{n}", "--pretty=format:%H|%h|%s|%ai"]
if path:
cmd += ["--", str(path.relative_to(self.repo_dir))]
result = self._run(*cmd, check=False)
entries = []
for line in result.stdout.strip().splitlines():
if line:
parts = line.split("|", 3)
if len(parts) == 4:
entries.append({
"hash": parts[0],
"short_hash": parts[1],
"message": parts[2],
"date": parts[3],
})
return entries
def restore(self, revision: str, path: Optional[Path] = None) -> Optional[str]:
"""Restore path (or entire tree) to revision state. Returns new commit hash."""
try:
rel = str(path.relative_to(self.repo_dir)) if path else "."
self._run("checkout", revision, "--", rel)
return self.commit(f"revert: restore to {revision[:8]}")
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr.strip()) from e
# =============================================================================
# Custom Indicator Setup
# =============================================================================
def setup_custom_indicators(data_dir: Path) -> None:
"""
Register user's custom indicators with pandas-ta.
Loads each indicator's implementation.py directly via importlib and binds
the function as ``ta.custom_{sanitized_name}`` so that evaluate_indicator
can call it as ``getattr(ta, "custom_trendflex", None)``.
The binding is idempotent indicators already registered are skipped.
Note: pandas-ta's ta.import_dir() requires a category-based directory
structure (e.g. tmpdir/momentum/trendflex.py) plus a companion
``{name}_method`` function. Our indicators don't follow that convention,
so we bind directly instead.
"""
try:
import pandas_ta as ta
except ImportError:
log.warning("pandas-ta not available — custom indicators will not be registered")
return
src_dir = data_dir / "src"
indicator_root = src_dir / "indicator"
if not indicator_root.exists():
return
import importlib.util
import types
# Track which sanitized names we've seen to handle duplicate directories
# (e.g. "TrendFlex" and "trendflex" both sanitise to "custom_trendflex").
seen: set[str] = set()
registered = 0
# Sort so that exact-lowercase names (e.g. "trendflex") come before mixed-case
# variants (e.g. "TrendFlex") — when duplicates exist the lowercase one wins.
for ind_dir in sorted(indicator_root.iterdir(), key=lambda p: (p.name != p.name.lower(), p.name.lower())):
if not ind_dir.is_dir():
continue
impl = ind_dir / "implementation.py"
if not impl.exists():
continue
sanitized = ind_dir.name.lower().replace("-", "_").replace(" ", "_")
ta_name = f"custom_{sanitized}"
if ta_name in seen:
log.warning(
"Duplicate custom indicator name '%s' from directory '%s' — skipping",
ta_name, ind_dir.name,
)
continue
seen.add(ta_name)
# Skip if already bound (e.g. called multiple times in a process)
if getattr(ta, ta_name, None) is not None:
continue
try:
spec = importlib.util.spec_from_file_location(ta_name, impl)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore[union-attr]
# Find the callable: prefer the function whose name matches the
# sanitized directory name, fall back to any top-level function.
fn = getattr(module, sanitized, None)
if fn is None:
candidates = [
v for k, v in vars(module).items()
if isinstance(v, types.FunctionType) and not k.startswith("_")
]
fn = candidates[0] if candidates else None
if fn is None:
log.warning("No callable found in %s — skipping", impl)
continue
setattr(ta, ta_name, fn)
registered += 1
log.debug("Registered custom indicator '%s' from %s", ta_name, impl)
except Exception:
log.warning("Could not register indicator '%s':", ind_dir.name, exc_info=True)
if registered > 0:
log.info("Registered %d custom indicator(s) with pandas-ta", registered)
# ============================================================================= # =============================================================================
# Category File Manager # Category File Manager
# ============================================================================= # =============================================================================
class CategoryFileManager: class CategoryFileManager:
""" """
Manages category-based file operations with validation. Manages category-based file operations with validation and git revision tracking.
Category files live under {data_dir}/src/ which is the git repo root.
Workspace and other ephemeral data remain under {data_dir}/ but outside the repo.
""" """
def __init__(self, data_dir: Path): def __init__(self, data_dir: Path):
self.data_dir = data_dir self.data_dir = data_dir
# Ensure category directories exist src = self.src_dir
for category in Category: src.mkdir(parents=True, exist_ok=True)
(data_dir / category.value).mkdir(parents=True, exist_ok=True)
# Migrate: move existing top-level category dirs into src/ (one-time)
for cat in Category:
old = data_dir / cat.value
new = src / cat.value
if old.exists() and not new.exists():
old.rename(new)
log.info(f"Migrated {old}{new}")
else:
new.mkdir(exist_ok=True)
# Init git repo in src/
self.git = GitManager(src)
self.git.ensure_init()
@property
def src_dir(self) -> Path:
"""Root of the versioned category code (git repo root)."""
return self.data_dir / "src"
def write( def write(
self, self,
@@ -191,7 +452,7 @@ class CategoryFileManager:
} }
# Get item directory # Get item directory
item_dir = get_category_path(self.data_dir, cat, name) item_dir = get_category_path(self.src_dir, cat, name)
item_dir.mkdir(parents=True, exist_ok=True) item_dir.mkdir(parents=True, exist_ok=True)
# Write implementation # Write implementation
@@ -228,11 +489,19 @@ class CategoryFileManager:
"validation": validation, "validation": validation,
} }
# Auto-execute research scripts after successful write # Auto-execute after successful write to give the agent immediate runtime feedback
if cat == Category.RESEARCH and validation["success"]: if validation["success"]:
log.info(f"Auto-executing research script: {name}") if cat == Category.RESEARCH:
execution_result = self.execute_research(name) log.info(f"Auto-executing research script: {name}")
result["execution"] = execution_result result["execution"] = self.execute_research(name)
elif cat == Category.INDICATOR:
log.info(f"Auto-executing indicator test: {name}")
result["execution"] = self._execute_indicator(item_dir)
# Commit to git
commit_hash = self.git.commit(f"create({category}): {name}")
if commit_hash:
result["revision"] = commit_hash
return result return result
@@ -241,6 +510,7 @@ class CategoryFileManager:
category: str, category: str,
name: str, name: str,
code: Optional[str] = None, code: Optional[str] = None,
patches: Optional[list[dict]] = None,
description: Optional[str] = None, description: Optional[str] = None,
metadata: Optional[dict] = None metadata: Optional[dict] = None
) -> dict[str, Any]: ) -> dict[str, Any]:
@@ -250,7 +520,8 @@ class CategoryFileManager:
Args: Args:
category: Category name category: Category name
name: Display name for the item name: Display name for the item
code: Python implementation code (optional, omit to keep existing) code: Full Python implementation code to replace existing (optional)
patches: List of {old_string, new_string} replacements (optional, preferred for small changes)
description: Updated description (optional, omit to keep existing) description: Updated description (optional, omit to keep existing)
metadata: Additional metadata updates (optional) metadata: Additional metadata updates (optional)
@@ -261,12 +532,15 @@ class CategoryFileManager:
- validation: dict - results from test harness (if code updated) - validation: dict - results from test harness (if code updated)
- error: str (if any) - error: str (if any)
""" """
if code is not None and patches is not None:
return {"success": False, "error": "Provide either 'code' or 'patches', not both"}
try: try:
cat = Category(category) cat = Category(category)
except ValueError: except ValueError:
return {"success": False, "error": f"Invalid category '{category}'"} return {"success": False, "error": f"Invalid category '{category}'"}
item_dir = get_category_path(self.data_dir, cat, name) item_dir = get_category_path(self.src_dir, cat, name)
if not item_dir.exists(): if not item_dir.exists():
return {"success": False, "error": f"Item '{name}' does not exist in category '{category}'"} return {"success": False, "error": f"Item '{name}' does not exist in category '{category}'"}
@@ -282,8 +556,28 @@ class CategoryFileManager:
except Exception as e: except Exception as e:
return {"success": False, "error": f"Failed to read existing metadata: {e}"} return {"success": False, "error": f"Failed to read existing metadata: {e}"}
# Update code if provided # Apply string-replacement patches if provided
if code is not None: if patches is not None:
if not impl_path.exists():
return {"success": False, "error": "Cannot patch: implementation file does not exist"}
try:
current_code = impl_path.read_text()
for i, patch in enumerate(patches):
old = patch.get("old_string", "")
new = patch.get("new_string", "")
if old not in current_code:
return {"success": False, "error": f"Patch {i}: old_string not found in file"}
if current_code.count(old) > 1:
return {"success": False, "error": f"Patch {i}: old_string is not unique — add more surrounding context"}
current_code = current_code.replace(old, new, 1)
impl_path.write_text(current_code)
log.info(f"Applied {len(patches)} patch(es) to {impl_path}")
code = current_code # trigger validation below
except Exception as e:
return {"success": False, "error": f"Failed to apply patches: {e}"}
# Update code if provided (full replace)
if code is not None and patches is None:
try: try:
impl_path.write_text(code) impl_path.write_text(code)
log.info(f"Updated {cat.value} implementation: {impl_path}") log.info(f"Updated {cat.value} implementation: {impl_path}")
@@ -321,11 +615,21 @@ class CategoryFileManager:
result["validation"] = validation result["validation"] = validation
result["success"] = validation["success"] result["success"] = validation["success"]
# Auto-execute research scripts after successful edit (if code was updated) # Auto-execute after successful edit to give the agent immediate runtime feedback
if cat == Category.RESEARCH and code is not None and result["success"]: if code is not None and result["success"]:
log.info(f"Auto-executing research script after edit: {name}") if cat == Category.RESEARCH:
execution_result = self.execute_research(name) log.info(f"Auto-executing research script after edit: {name}")
result["execution"] = execution_result result["execution"] = self.execute_research(name)
elif cat == Category.INDICATOR:
log.info(f"Auto-executing indicator test after edit: {name}")
result["execution"] = self._execute_indicator(item_dir)
# Commit to git if code changed
if code is not None and result["success"]:
action = "patch" if patches is not None else "edit"
commit_hash = self.git.commit(f"{action}({category}): {name}")
if commit_hash:
result["revision"] = commit_hash
return result return result
@@ -349,7 +653,7 @@ class CategoryFileManager:
except ValueError: except ValueError:
return {"exists": False, "error": f"Invalid category '{category}'"} return {"exists": False, "error": f"Invalid category '{category}'"}
item_dir = get_category_path(self.data_dir, cat, name) item_dir = get_category_path(self.src_dir, cat, name)
if not item_dir.exists(): if not item_dir.exists():
return {"exists": False} return {"exists": False}
@@ -385,7 +689,7 @@ class CategoryFileManager:
except ValueError: except ValueError:
return {"error": f"Invalid category '{category}'"} return {"error": f"Invalid category '{category}'"}
cat_dir = self.data_dir / cat.value cat_dir = self.src_dir / cat.value
items = [] items = []
for item_dir in cat_dir.iterdir(): for item_dir in cat_dir.iterdir():
@@ -487,33 +791,58 @@ class CategoryFileManager:
def _validate_indicator(self, impl_path: Path) -> dict[str, Any]: def _validate_indicator(self, impl_path: Path) -> dict[str, Any]:
""" """
Validate an indicator implementation. Validate an indicator by running it against synthetic OHLC data.
Runs basic syntax check and imports. Uses indicator_harness.py in a subprocess so the indicator code is
isolated from the MCP server process. Catches import errors, runtime
errors, and wrong return types not just syntax.
""" """
meta_path = impl_path.parent / "metadata.json"
return self._execute_indicator(impl_path.parent, timeout=30)
def _execute_indicator(self, item_dir: Path, timeout: int = 30) -> dict[str, Any]:
"""
Run an indicator against synthetic OHLC data via indicator_harness.py.
Returns:
dict with success, output (human-readable summary), error
"""
impl_path = item_dir / "implementation.py"
meta_path = item_dir / "metadata.json"
if not impl_path.exists():
return {"success": False, "error": "implementation.py not found"}
if not meta_path.exists():
return {"success": False, "error": "metadata.json not found"}
try: try:
result = subprocess.run( result = subprocess.run(
[sys.executable, "-m", "py_compile", str(impl_path)], [sys.executable, str(_INDICATOR_HARNESS), str(impl_path), str(meta_path)],
capture_output=True, capture_output=True,
text=True, text=True,
timeout=10, timeout=timeout,
cwd=str(item_dir),
) )
if result.returncode == 0:
return {
"success": True,
"output": "Indicator syntax valid",
}
else:
return {
"success": False,
"output": result.stderr,
"error": "Syntax error in indicator",
}
except subprocess.TimeoutExpired: except subprocess.TimeoutExpired:
return {"success": False, "error": "Validation timeout"} return {"success": False, "error": f"Indicator test timed out after {timeout}s"}
except Exception as e: except Exception as e:
return {"success": False, "error": f"Validation failed: {e}"} return {"success": False, "error": f"Harness launch failed: {e}"}
if result.returncode != 0:
return {
"success": False,
"error": f"Harness process failed:\n{result.stderr}",
}
try:
data = json.loads(result.stdout)
except json.JSONDecodeError:
return {
"success": False,
"error": f"Harness produced invalid JSON:\n{result.stdout[:500]}",
}
return data
def _run_research_harness(self, impl_path: Path, item_dir: Path, timeout: int = 30) -> dict[str, Any]: def _run_research_harness(self, impl_path: Path, item_dir: Path, timeout: int = 30) -> dict[str, Any]:
""" """
@@ -594,7 +923,7 @@ class CategoryFileManager:
- content: list of TextContent and ImageContent objects (MCP format) - content: list of TextContent and ImageContent objects (MCP format)
- error: str (if any) - error: str (if any)
""" """
item_dir = get_category_path(self.data_dir, Category.RESEARCH, name) item_dir = get_category_path(self.src_dir, Category.RESEARCH, name)
if not item_dir.exists(): if not item_dir.exists():
return {"error": f"Research script '{name}' does not exist"} return {"error": f"Research script '{name}' does not exist"}
@@ -654,6 +983,66 @@ class CategoryFileManager:
return {"content": content} return {"content": content}
def git_log(
self,
category: Optional[str] = None,
name: Optional[str] = None,
limit: int = 20
) -> dict[str, Any]:
"""
List recent git commits, optionally filtered to a category or item.
Returns:
dict with:
- success: bool
- commits: list of {hash, short_hash, message, date}
"""
path = None
if category:
try:
cat = Category(category)
except ValueError:
return {"success": False, "error": f"Invalid category '{category}'"}
if name:
path = get_category_path(self.src_dir, cat, name)
else:
path = self.src_dir / cat.value
entries = self.git.log(path=path, n=limit)
return {"success": True, "commits": entries}
def git_revert(self, revision: str, category: str, name: str) -> dict[str, Any]:
"""
Restore a category item to a previous git revision (creates a new commit).
Returns:
dict with:
- success: bool
- revision: str - new commit hash
- validation: dict
- error: str (if any)
"""
try:
cat = Category(category)
except ValueError:
return {"success": False, "error": f"Invalid category '{category}'"}
item_dir = get_category_path(self.src_dir, cat, name)
if not item_dir.exists():
return {"success": False, "error": f"Item '{name}' not found in '{category}'"}
try:
commit_hash = self.git.restore(revision, path=item_dir)
except RuntimeError as e:
return {"success": False, "error": str(e)}
validation = self._validate(cat, item_dir)
return {
"success": validation["success"],
"revision": commit_hash,
"validation": validation,
}
# ============================================================================= # =============================================================================
# Global Manager Instance # Global Manager Instance
# ============================================================================= # =============================================================================

View File

@@ -3,7 +3,7 @@
Research script harness - runs implementation.py in a subprocess with API Research script harness - runs implementation.py in a subprocess with API
initialization, stdout/stderr capture, and matplotlib figure capture. initialization, stdout/stderr capture, and matplotlib figure capture.
This file is written to disk and invoked by category_tools.py rather than This file is written to disk and invoked by python_tools.py rather than
being passed inline via `python -c`, so the harness code is inspectable and being passed inline via `python -c`, so the harness code is inspectable and
not regenerated on every call. not regenerated on every call.
@@ -77,6 +77,16 @@ try:
except Exception as e: except Exception as e:
print(f"WARNING: API initialization failed: {e}", file=sys.stderr) print(f"WARNING: API initialization failed: {e}", file=sys.stderr)
# ---------------------------------------------------------------------------
# Register custom indicators so research scripts can use df.ta.my_indicator()
# ---------------------------------------------------------------------------
try:
from dexorder.tools.python_tools import setup_custom_indicators
_data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
setup_custom_indicators(_data_dir)
except Exception as e:
print(f"WARNING: Custom indicator registration failed: {e}", file=sys.stderr)
def main(): def main():
if len(sys.argv) < 2: if len(sys.argv) < 2:

View File

@@ -43,6 +43,23 @@ class WorkspaceStore:
# Map of "store_name/json/pointer/path" -> list of callbacks # Map of "store_name/json/pointer/path" -> list of callbacks
self._triggers: dict[str, list[Callable[[Any, Any], None]]] = {} self._triggers: dict[str, list[Callable[[Any, Any], None]]] = {}
def _ensure_intermediate_paths(self, state: dict, patch: list[dict]) -> dict:
"""Create missing intermediate objects for deep patch paths (mirrors gateway logic)."""
import copy
state = copy.deepcopy(state)
for op in patch:
if op.get("op") not in ("add", "replace"):
continue
parts = [p for p in op.get("path", "").split("/") if p]
if len(parts) <= 1:
continue
current = state
for part in parts[:-1]:
if not isinstance(current.get(part), dict):
current[part] = {}
current = current[part]
return state
def _store_path(self, store_name: str) -> Path: def _store_path(self, store_name: str) -> Path:
"""Get the filesystem path for a store.""" """Get the filesystem path for a store."""
# Sanitize store name to prevent directory traversal # Sanitize store name to prevent directory traversal
@@ -136,6 +153,9 @@ class WorkspaceStore:
with open(path, "r") as f: with open(path, "r") as f:
old_state = json.load(f) old_state = json.load(f)
# Create missing intermediate objects for deep paths (mirrors gateway logic)
old_state = self._ensure_intermediate_paths(old_state, patch)
# Apply patch # Apply patch
new_state = jsonpatch.apply_patch(old_state, patch) new_state = jsonpatch.apply_patch(old_state, patch)

View File

@@ -50,3 +50,4 @@ dependencies:
- starlette>=0.27.0 - starlette>=0.27.0
- uvicorn>=0.27.0 - uvicorn>=0.27.0
- sse-starlette>=1.6.0 - sse-starlette>=1.6.0
- nautilus_trader>=1.200.0

View File

@@ -10,6 +10,7 @@ Brings together:
""" """
import asyncio import asyncio
import contextlib
import logging import logging
import os import os
import signal import signal
@@ -20,8 +21,8 @@ from typing import Optional
import uvicorn import uvicorn
import yaml import yaml
from mcp.server import Server from mcp.server import Server
from mcp.server.sse import SseServerTransport
from mcp.server.stdio import stdio_server from mcp.server.stdio import stdio_server
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from mcp.types import Tool, TextContent, ImageContent from mcp.types import Tool, TextContent, ImageContent
from starlette.applications import Starlette from starlette.applications import Starlette
from starlette.requests import Request from starlette.requests import Request
@@ -34,8 +35,11 @@ from dexorder.conda_manager import sync_packages, install_packages
from dexorder.events import EventType, UserEvent, DeliverySpec from dexorder.events import EventType, UserEvent, DeliverySpec
from dexorder.impl.charting_api_impl import ChartingAPIImpl from dexorder.impl.charting_api_impl import ChartingAPIImpl
from dexorder.impl.data_api_impl import DataAPIImpl from dexorder.impl.data_api_impl import DataAPIImpl
from dexorder.tools.category_tools import get_category_manager from dexorder.tools.python_tools import get_category_manager
from dexorder.tools.workspace_tools import get_workspace_store from dexorder.tools.workspace_tools import get_workspace_store
from dexorder.tools.evaluate_indicator import evaluate_indicator
from dexorder.tools.backtest_strategy import backtest_strategy
from dexorder.tools.activate_strategy import activate_strategy, deactivate_strategy, list_active_strategies
# ============================================================================= # =============================================================================
# Global Data Directory # Global Data Directory
@@ -249,7 +253,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
} }
), ),
Tool( Tool(
name="category_write", name="python_write",
description="Write a new strategy, indicator, or research script with validation", description="Write a new strategy, indicator, or research script with validation",
inputSchema={ inputSchema={
"type": "object", "type": "object",
@@ -273,15 +277,27 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}, },
"metadata": { "metadata": {
"type": "object", "type": "object",
"description": "Optional category-specific metadata (e.g., default_length for indicators, data_feeds for strategies)" "description": (
"Optional category-specific metadata. "
"For strategy: include 'data_feeds' (list of {symbol, period_seconds, description}) "
"and 'parameters' (object mapping param_name → {default, description}). "
"Example: {\"data_feeds\": [{\"symbol\": \"BTC/USDT.BINANCE\", \"period_seconds\": 3600, \"description\": \"Primary BTC/USDT hourly feed\"}], "
"\"parameters\": {\"rsi_length\": {\"default\": 14, \"description\": \"RSI lookback period\"}, \"threshold\": {\"default\": 70, \"description\": \"Overbought level\"}}}. "
"For indicator: include 'default_length' (int). "
"For any category: 'conda_packages' (list of package names) if extra dependencies are needed."
)
} }
}, },
"required": ["category", "name", "description", "code"] "required": ["category", "name", "description", "code"]
} }
), ),
Tool( Tool(
name="category_edit", name="python_edit",
description="Edit an existing category script (updates code, description, or metadata)", description=(
"Edit an existing category script. "
"Use 'patches' for targeted string replacements (preferred for small changes), "
"or 'code' to replace the full implementation. Do not supply both."
),
inputSchema={ inputSchema={
"type": "object", "type": "object",
"properties": { "properties": {
@@ -296,7 +312,24 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}, },
"code": { "code": {
"type": "string", "type": "string",
"description": "Updated Python code (optional, omit to keep existing)" "description": "Full replacement Python code. Use only when rewriting the entire implementation; prefer 'patches' for targeted edits."
},
"patches": {
"type": "array",
"description": (
"Targeted code edits as old/new string pairs. Preferred over 'code' for small changes. "
"Each patch: {\"old_string\": \"exact text to find\", \"new_string\": \"replacement text\"}. "
"old_string must be unique in the file (add surrounding context if needed). "
"Patches are applied in order."
),
"items": {
"type": "object",
"properties": {
"old_string": {"type": "string"},
"new_string": {"type": "string"}
},
"required": ["old_string", "new_string"]
}
}, },
"description": { "description": {
"type": "string", "type": "string",
@@ -304,14 +337,20 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}, },
"metadata": { "metadata": {
"type": "object", "type": "object",
"description": "Updated metadata fields (optional)" "description": (
"Updated metadata fields (optional). "
"For strategy: 'data_feeds' (list of {symbol, period_seconds, description}) "
"and/or 'parameters' (object mapping param_name → {default, description}). "
"For indicator: 'default_length' (int). "
"For any category: 'conda_packages' (list of package names)."
)
} }
}, },
"required": ["category", "name"] "required": ["category", "name"]
} }
), ),
Tool( Tool(
name="category_read", name="python_read",
description="Read a category script and its metadata", description="Read a category script and its metadata",
inputSchema={ inputSchema={
"type": "object", "type": "object",
@@ -330,7 +369,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
} }
), ),
Tool( Tool(
name="category_list", name="python_list",
description="List all items in a category with names and descriptions", description="List all items in a category with names and descriptions",
inputSchema={ inputSchema={
"type": "object", "type": "object",
@@ -344,6 +383,53 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"required": ["category"] "required": ["category"]
} }
), ),
Tool(
name="python_log",
description="Show git commit history for category items. Filter by category and/or name to see history for a specific item.",
inputSchema={
"type": "object",
"properties": {
"category": {
"type": "string",
"enum": ["strategy", "indicator", "research"],
"description": "Filter to this category (optional)"
},
"name": {
"type": "string",
"description": "Filter to this item (optional, requires category)"
},
"limit": {
"type": "integer",
"description": "Max commits to return (default 20)",
"default": 20
}
},
"required": []
}
),
Tool(
name="python_revert",
description="Restore a category item to a previous git revision. Creates a new commit — non-destructive.",
inputSchema={
"type": "object",
"properties": {
"revision": {
"type": "string",
"description": "Git commit hash (full or short) to restore to"
},
"category": {
"type": "string",
"enum": ["strategy", "indicator", "research"],
"description": "Category of the item"
},
"name": {
"type": "string",
"description": "Display name of the item to restore"
}
},
"required": ["revision", "category", "name"]
}
),
Tool( Tool(
name="conda_sync", name="conda_sync",
description="Sync conda packages: scan all metadata, remove unused packages (excluding base environment)", description="Sync conda packages: scan all metadata, remove unused packages (excluding base environment)",
@@ -381,13 +467,179 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}, },
"required": ["name"] "required": ["name"]
} }
) ),
Tool(
name="evaluate_indicator",
description=(
"Evaluate a pandas-ta indicator against real OHLC data and return a structured "
"array of timestamped values. Use this to validate that an indicator computes "
"correctly before adding it to the workspace, or to inspect its output values."
),
inputSchema={
"type": "object",
"properties": {
"symbol": {
"type": "string",
"description": "Market symbol in 'MARKET.EXCHANGE' format, e.g. 'BTC/USDT.BINANCE'"
},
"from_time": {
"description": "Start of time range. Unix timestamp (int) or date string e.g. '30 days ago', '2024-01-01'"
},
"to_time": {
"description": "End of time range. Unix timestamp (int) or date string e.g. 'now', '2024-03-01'"
},
"period_seconds": {
"type": "integer",
"description": "Candle period in seconds (e.g. 3600 for 1h, 900 for 15m, 86400 for 1d)",
"default": 3600
},
"pandas_ta_name": {
"type": "string",
"description": "Lowercase pandas-ta function name, e.g. 'rsi', 'macd', 'bbands'"
},
"parameters": {
"type": "object",
"description": "pandas-ta keyword arguments, e.g. {\"length\": 14} or {\"fast\": 12, \"slow\": 26, \"signal\": 9}",
"default": {}
}
},
"required": ["symbol", "from_time", "to_time", "pandas_ta_name"]
}
),
Tool(
name="backtest_strategy",
description=(
"Run a saved trading strategy against historical OHLC data using Nautilus Trader "
"BacktestEngine. Returns performance metrics (total return, Sharpe ratio, "
"max drawdown, win rate, trade count) and a full equity curve. "
"Supports multiple data feeds and includes order-flow fields (buy_vol, sell_vol, "
"open_interest) in the strategy's DataFrame."
),
inputSchema={
"type": "object",
"properties": {
"strategy_name": {
"type": "string",
"description": "Display name of the strategy as saved via python_write"
},
"feeds": {
"type": "array",
"description": "Data feeds to backtest against",
"items": {
"type": "object",
"properties": {
"symbol": {
"type": "string",
"description": "Market symbol in 'MARKET.EXCHANGE' format, e.g. 'BTC/USDT.BINANCE'"
},
"period_seconds": {
"type": "integer",
"description": "Candle period in seconds (e.g. 3600 for 1h)",
"default": 3600
}
},
"required": ["symbol"]
},
"minItems": 1
},
"from_time": {
"description": "Backtest start. Unix timestamp or date string e.g. '2024-01-01', '90 days ago'"
},
"to_time": {
"description": "Backtest end. Unix timestamp or date string e.g. '2025-01-01', 'now'"
},
"initial_capital": {
"type": "number",
"description": "Starting capital in quote currency (e.g. 10000.0 USDT)",
"default": 10000.0
},
"paper": {
"type": "boolean",
"description": "Always true for historical backtest (reserved for forward testing)",
"default": True
}
},
"required": ["strategy_name", "feeds", "from_time", "to_time"]
}
),
Tool(
name="activate_strategy",
description=(
"Activate a strategy for paper or live forward trading with a capital allocation. "
"paper=true (default): simulated fills on live data — no API keys required. "
"paper=false: real execution via user secrets vault (not yet implemented). "
"Note: live data streaming is TBD; this registers the strategy for when it becomes available."
),
inputSchema={
"type": "object",
"properties": {
"strategy_name": {
"type": "string",
"description": "Display name of the strategy as saved via python_write"
},
"feeds": {
"type": "array",
"description": "Data feeds for the strategy",
"items": {
"type": "object",
"properties": {
"symbol": {"type": "string"},
"period_seconds": {"type": "integer", "default": 3600}
},
"required": ["symbol"]
},
"minItems": 1
},
"allocation": {
"type": "number",
"description": "Capital allocated in quote currency (e.g. 5000.0 USDT)"
},
"paper": {
"type": "boolean",
"description": "True = paper/simulated (default); False = live execution",
"default": True
}
},
"required": ["strategy_name", "feeds", "allocation"]
}
),
Tool(
name="deactivate_strategy",
description="Stop an active strategy and return its final P&L summary.",
inputSchema={
"type": "object",
"properties": {
"strategy_name": {
"type": "string",
"description": "Display name of the active strategy to stop"
}
},
"required": ["strategy_name"]
}
),
Tool(
name="list_active_strategies",
description="List all currently active (live or paper) strategies and their status.",
inputSchema={
"type": "object",
"properties": {},
"required": []
}
),
] ]
@server.call_tool() @server.call_tool()
async def handle_tool_call(name: str, arguments: dict): async def handle_tool_call(name: str, arguments: dict):
"""Handle tool calls including workspace and category tools""" """Handle tool calls including workspace and category tools"""
get_lifecycle_manager().record_activity()
try:
return await _handle_tool_call_inner(name, arguments)
except Exception:
logging.exception("Unhandled exception in tool '%s'", name)
raise
async def _handle_tool_call_inner(name: str, arguments: dict):
if name == "workspace_read": if name == "workspace_read":
return workspace_store.read(arguments.get("store_name", "")) return workspace_store.read(arguments.get("store_name", ""))
elif name == "workspace_write": elif name == "workspace_write":
@@ -400,7 +652,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
arguments.get("store_name", ""), arguments.get("store_name", ""),
arguments.get("patch", []) arguments.get("patch", [])
) )
elif name == "category_write": elif name == "python_write":
result = category_manager.write( result = category_manager.write(
category=arguments.get("category", ""), category=arguments.get("category", ""),
name=arguments.get("name", ""), name=arguments.get("name", ""),
@@ -410,6 +662,8 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
) )
content = [] content = []
meta_parts = [f"success: {result['success']}", f"path: {result['path']}"] meta_parts = [f"success: {result['success']}", f"path: {result['path']}"]
if result.get("revision"):
meta_parts.append(f"revision: {result['revision']}")
if result.get("validation") and not result["validation"].get("success"): if result.get("validation") and not result["validation"].get("success"):
meta_parts.append(f"validation errors: {result['validation'].get('errors', [])}") meta_parts.append(f"validation errors: {result['validation'].get('errors', [])}")
content.append(TextContent(type="text", text="\n".join(meta_parts))) content.append(TextContent(type="text", text="\n".join(meta_parts)))
@@ -417,20 +671,23 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
exec_content = result["execution"].get("content", []) exec_content = result["execution"].get("content", [])
content.extend(exec_content) content.extend(exec_content)
image_count = sum(1 for item in exec_content if item.type == "image") image_count = sum(1 for item in exec_content if item.type == "image")
logging.info(f"category_write '{arguments.get('name')}': returning {len(content)} items, {image_count} images") logging.info(f"python_write '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
else: else:
logging.info(f"category_write '{arguments.get('name')}': no execution result (category={arguments.get('category')})") logging.info(f"python_write '{arguments.get('name')}': no execution result (category={arguments.get('category')})")
return content return content
elif name == "category_edit": elif name == "python_edit":
result = category_manager.edit( result = category_manager.edit(
category=arguments.get("category", ""), category=arguments.get("category", ""),
name=arguments.get("name", ""), name=arguments.get("name", ""),
code=arguments.get("code"), code=arguments.get("code"),
patches=arguments.get("patches"),
description=arguments.get("description"), description=arguments.get("description"),
metadata=arguments.get("metadata") metadata=arguments.get("metadata")
) )
content = [] content = []
meta_parts = [f"success: {result['success']}", f"path: {result['path']}"] meta_parts = [f"success: {result['success']}", f"path: {result['path']}"]
if result.get("revision"):
meta_parts.append(f"revision: {result['revision']}")
if result.get("validation") and not result["validation"].get("success"): if result.get("validation") and not result["validation"].get("success"):
meta_parts.append(f"validation errors: {result['validation'].get('errors', [])}") meta_parts.append(f"validation errors: {result['validation'].get('errors', [])}")
content.append(TextContent(type="text", text="\n".join(meta_parts))) content.append(TextContent(type="text", text="\n".join(meta_parts)))
@@ -438,19 +695,43 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
exec_content = result["execution"].get("content", []) exec_content = result["execution"].get("content", [])
content.extend(exec_content) content.extend(exec_content)
image_count = sum(1 for item in exec_content if item.type == "image") image_count = sum(1 for item in exec_content if item.type == "image")
logging.info(f"category_edit '{arguments.get('name')}': returning {len(content)} items, {image_count} images") logging.info(f"python_edit '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
else: else:
logging.info(f"category_edit '{arguments.get('name')}': no execution result") logging.info(f"python_edit '{arguments.get('name')}': no execution result")
return content return content
elif name == "category_read": elif name == "python_read":
return category_manager.read( return category_manager.read(
category=arguments.get("category", ""), category=arguments.get("category", ""),
name=arguments.get("name", "") name=arguments.get("name", "")
) )
elif name == "category_list": elif name == "python_list":
return category_manager.list_items( return category_manager.list_items(
category=arguments.get("category", "") category=arguments.get("category", "")
) )
elif name == "python_log":
result = category_manager.git_log(
category=arguments.get("category"),
name=arguments.get("name"),
limit=int(arguments.get("limit", 20))
)
lines = [f"success: {result['success']}"]
for c in result.get("commits", []):
lines.append(f"{c['short_hash']} {c['date'][:10]} {c['message']}")
return [TextContent(type="text", text="\n".join(lines))]
elif name == "python_revert":
result = category_manager.git_revert(
revision=arguments.get("revision", ""),
category=arguments.get("category", ""),
name=arguments.get("name", "")
)
meta_parts = [f"success: {result['success']}"]
if result.get("revision"):
meta_parts.append(f"revision: {result['revision']}")
if result.get("error"):
meta_parts.append(f"error: {result['error']}")
if result.get("validation") and not result["validation"].get("success"):
meta_parts.append(f"validation errors: {result['validation'].get('errors', [])}")
return [TextContent(type="text", text="\n".join(meta_parts))]
elif name == "conda_sync": elif name == "conda_sync":
# Get environment.yml path relative to main.py # Get environment.yml path relative to main.py
env_yml = Path(__file__).parent / "environment.yml" env_yml = Path(__file__).parent / "environment.yml"
@@ -469,6 +750,37 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
image_count = sum(1 for item in content if item.type == "image") image_count = sum(1 for item in content if item.type == "image")
logging.info(f"execute_research '{arguments.get('name')}': returning {len(content)} items, {image_count} images") logging.info(f"execute_research '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
return content return content
elif name == "evaluate_indicator":
return await evaluate_indicator(
symbol=arguments.get("symbol", ""),
from_time=arguments.get("from_time"),
to_time=arguments.get("to_time"),
period_seconds=int(arguments.get("period_seconds", 3600)),
pandas_ta_name=arguments.get("pandas_ta_name", ""),
parameters=arguments.get("parameters") or {},
)
elif name == "backtest_strategy":
return await backtest_strategy(
strategy_name=arguments.get("strategy_name", ""),
feeds=arguments.get("feeds", []),
from_time=arguments.get("from_time"),
to_time=arguments.get("to_time"),
initial_capital=float(arguments.get("initial_capital", 10_000.0)),
paper=bool(arguments.get("paper", True)),
)
elif name == "activate_strategy":
return await activate_strategy(
strategy_name=arguments.get("strategy_name", ""),
feeds=arguments.get("feeds", []),
allocation=float(arguments.get("allocation", 0.0)),
paper=bool(arguments.get("paper", True)),
)
elif name == "deactivate_strategy":
return await deactivate_strategy(
strategy_name=arguments.get("strategy_name", ""),
)
elif name == "list_active_strategies":
return await list_active_strategies()
else: else:
raise ValueError(f"Unknown tool: {name}") raise ValueError(f"Unknown tool: {name}")
@@ -477,26 +789,18 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
# ============================================================================= # =============================================================================
# SSE Transport Setup # Streamable HTTP Transport Setup
# ============================================================================= # =============================================================================
def create_sse_app(mcp_server: Server) -> Starlette: def create_streamable_http_app(mcp_server: Server) -> Starlette:
"""Create Starlette app with SSE endpoint for MCP""" """Create Starlette app with Streamable HTTP endpoint for MCP"""
# Create SSE transport instance session_manager = StreamableHTTPSessionManager(app=mcp_server)
sse = SseServerTransport("/messages/")
async def handle_sse(request: Request) -> Response: @contextlib.asynccontextmanager
"""Handle SSE connections for MCP""" async def lifespan(app: Starlette):
async with sse.connect_sse( async with session_manager.run():
request.scope, request.receive, request._send yield
) as streams:
await mcp_server.run(
streams[0],
streams[1],
mcp_server.create_initialization_options()
)
return Response()
async def handle_health(request: Request) -> Response: async def handle_health(request: Request) -> Response:
"""Health check endpoint for k8s probes and gateway readiness checks""" """Health check endpoint for k8s probes and gateway readiness checks"""
@@ -506,9 +810,9 @@ def create_sse_app(mcp_server: Server) -> Starlette:
) )
app = Starlette( app = Starlette(
lifespan=lifespan,
routes=[ routes=[
Route("/sse", handle_sse), Mount("/mcp", app=session_manager.handle_request),
Mount("/messages/", app=sse.handle_post_message),
Route("/health", handle_health), Route("/health", handle_health),
] ]
) )
@@ -648,9 +952,9 @@ class UserContainer:
self.mcp_server.create_initialization_options() self.mcp_server.create_initialization_options()
) )
elif self.config.mcp_transport == "sse": elif self.config.mcp_transport == "sse":
# Run MCP server via HTTP/SSE (for production) # Run MCP server via Streamable HTTP (for production)
logging.info(f"Starting MCP server with SSE transport on {self.config.mcp_http_host}:{self.config.mcp_http_port}") logging.info(f"Starting MCP server with Streamable HTTP transport on {self.config.mcp_http_host}:{self.config.mcp_http_port}")
app = create_sse_app(self.mcp_server) app = create_streamable_http_app(self.mcp_server)
config = uvicorn.Config( config = uvicorn.Config(
app, app,
host=self.config.mcp_http_host, host=self.config.mcp_http_host,

View File

@@ -4,9 +4,11 @@ import Card from 'primevue/card'
import { createTradingViewDatafeed } from '../composables/useTradingViewDatafeed' import { createTradingViewDatafeed } from '../composables/useTradingViewDatafeed'
import { useTradingViewShapes } from '../composables/useTradingViewShapes' import { useTradingViewShapes } from '../composables/useTradingViewShapes'
import { useTradingViewIndicators } from '../composables/useTradingViewIndicators' import { useTradingViewIndicators } from '../composables/useTradingViewIndicators'
import { useCustomIndicators, getCustomIndicatorsGetter } from '../composables/useCustomIndicators'
import { useChartStore } from '../stores/chart' import { useChartStore } from '../stores/chart'
import type { IChartingLibraryWidget } from '../types/tradingview' import type { IChartingLibraryWidget } from '../types/tradingview'
import { intervalToSeconds } from '../utils' import { intervalToSeconds } from '../utils'
import { wsManager } from '../composables/useWebSocket'
// Convert seconds to TradingView interval string // Convert seconds to TradingView interval string
function secondsToInterval(seconds: number): string { function secondsToInterval(seconds: number): string {
@@ -22,12 +24,25 @@ let datafeed: any = null
let isUpdatingFromChart = false // Flag to prevent circular updates let isUpdatingFromChart = false // Flag to prevent circular updates
let shapeCleanup: (() => void) | null = null // Cleanup function for shape sync let shapeCleanup: (() => void) | null = null // Cleanup function for shape sync
let indicatorCleanup: (() => void) | null = null // Cleanup function for indicator sync let indicatorCleanup: (() => void) | null = null // Cleanup function for indicator sync
let customIndicatorCleanup: (() => void) | null = null // Cleanup for custom TV studies
let chartInitialized = false // Guard against double-init on reconnect
const maybeInitChart = () => {
if (chartInitialized || !chartContainer.value) return
chartInitialized = true
initChart()
}
onMounted(() => { onMounted(() => {
if (!chartContainer.value) return // Wait for workspace to be ready (persistent stores loaded from container)
// before initializing TradingView, so stores are populated when onChartReady fires.
watch(wsManager.sessionStatus, (status) => {
if (status === 'ready') maybeInitChart()
}, { immediate: true })
})
// Wait for TradingView library to load // Wait for TradingView library to load
const initChart = () => { function initChart() {
if (!window.TradingView) { if (!window.TradingView) {
setTimeout(initChart, 100) setTimeout(initChart, 100)
return return
@@ -43,16 +58,23 @@ onMounted(() => {
container: chartContainer.value!, container: chartContainer.value!,
library_path: '/charting_library/', library_path: '/charting_library/',
locale: 'en', locale: 'en',
// Register the two generic custom study dispatch types.
// Must be provided here — TV has no dynamic study registration API.
custom_indicators_getter: getCustomIndicatorsGetter(),
disabled_features: [ disabled_features: [
'use_localstorage_for_settings', 'use_localstorage_for_settings',
'header_symbol_search', 'header_symbol_search',
'symbol_search_hot_key' 'symbol_search_hot_key'
], ],
enabled_features: [], enabled_features: [],
// Restrict indicators to only those supported by both TA-Lib and TradingView // Restrict indicators to only those supported by both TA-Lib and TradingView.
// Custom AI-generated indicators (from custom_indicators_getter) must also be listed here.
studies_access: { studies_access: {
type: 'white', type: 'white',
tools: [ tools: [
// AI custom indicator dispatch studies
{ name: 'dxo_customstudy_overlay' },
{ name: 'dxo_customstudy_pane' },
// Overlap Studies (14) // Overlap Studies (14)
{ name: 'Moving Average' }, { name: 'Moving Average' },
{ name: 'Moving Average Exponential' }, { name: 'Moving Average Exponential' },
@@ -150,15 +172,13 @@ onMounted(() => {
if (tvWidget) { if (tvWidget) {
shapeCleanup = useTradingViewShapes(tvWidget) shapeCleanup = useTradingViewShapes(tvWidget)
indicatorCleanup = useTradingViewIndicators(tvWidget) indicatorCleanup = useTradingViewIndicators(tvWidget)
customIndicatorCleanup = useCustomIndicators(tvWidget)
} }
}) })
} catch (error) { } catch (error) {
console.error('Failed to initialize TradingView widget:', error) console.error('Failed to initialize TradingView widget:', error)
} }
} }
initChart()
})
function initializeVisibleRange() { function initializeVisibleRange() {
if (!tvWidget) return if (!tvWidget) return
@@ -281,6 +301,12 @@ onBeforeUnmount(() => {
indicatorCleanup = null indicatorCleanup = null
} }
// Cleanup custom TV studies
if (customIndicatorCleanup) {
customIndicatorCleanup()
customIndicatorCleanup = null
}
if (tvWidget) { if (tvWidget) {
tvWidget.remove() tvWidget.remove()
tvWidget = null tvWidget = null

View File

@@ -238,14 +238,7 @@ const handleMessage = (data: WebSocketMessage) => {
// Stop agent processing // Stop agent processing
const stopAgent = () => { const stopAgent = () => {
// Send empty message to trigger interrupt without new agent round wsManager.send({ type: 'agent_stop', session_id: SESSION_ID })
const wsMessage = {
type: 'agent_user_message',
session_id: SESSION_ID,
content: '',
attachments: []
}
wsManager.send(wsMessage)
isAgentProcessing.value = false isAgentProcessing.value = false
removeToolCallBubble() removeToolCallBubble()
lastSentMessageId = null lastSentMessageId = null
@@ -586,7 +579,9 @@ onUnmounted(() => {
} }
.workspace-loading { .workspace-loading {
flex: 1; position: fixed;
inset: 0;
z-index: 9999;
display: flex; display: flex;
flex-direction: column; flex-direction: column;
align-items: center; align-items: center;
@@ -639,7 +634,7 @@ onUnmounted(() => {
.stop-button-container { .stop-button-container {
position: absolute; position: absolute;
bottom: 80px; bottom: 80px;
left: 20px; right: 20px;
z-index: 1000; z-index: 1000;
} }

View File

@@ -0,0 +1,551 @@
/**
* TradingView custom study integration for pandas-ta custom indicators.
*
* Architecture overview
* ---------------------
* TV's custom study API only allows registering study types via the
* `custom_indicators_getter` widget constructor option — there is no
* dynamic registration API (createCustomStudy does not exist on the widget
* or chart APIs).
*
* To support custom indicators that arrive at runtime (e.g. from the AI
* agent), we pre-register two generic dispatch studies in
* `custom_indicators_getter`:
*
* dxo_customstudy_overlay — is_price_study: true (drawn on price pane)
* dxo_customstudy_pane — is_price_study: false (separate pane)
*
* Each has a single text input `_cfg` (a config key) and MAX_PLOTS
* line plots. The constructor dispatches to `customStudyRegistry[cfgKey]`
* to look up the per-indicator configuration and data.
*
* These study type names MUST also appear in the `studies_access` whitelist
* in ChartView.vue — TV treats unlisted studies as nonexistent.
*
* Registration flow
* -----------------
* 1. Widget constructor calls getCustomIndicatorsGetter() which registers
* the two generic study types.
* 2. When a custom_ indicator appears in the store, registerCustomStudy():
* a. Stores the config in customStudyRegistry under a unique cfgKey.
* b. Calls chart.createStudy('dxo_customstudy_*', ..., { _cfg: cfgKey }).
* c. Calls study.setStudyTitle(indicator name) for a human-readable header.
* 3. TV calls the study's init(ctx, inputs):
* a. Reads symbol/period from ctx; builds the data cache key.
* b. Fires an async evaluateIndicator WebSocket request.
* 4. When data arrives the constructor calls the registered refreshCallback
* which calls IStudyApi.setInputValues() with a new config key, causing
* TV to re-run init()+main() with the now-populated cache.
*
* IMPORTANT: call getCustomIndicatorsGetter() and pass it as the
* `custom_indicators_getter` option when creating the TradingView widget.
*/
import { watch } from 'vue'
import { useIndicatorStore, type IndicatorInstance, type CustomIndicatorMetadata } from '../stores/indicators'
import { useChartStore } from '../stores/chart'
import { wsManager, type MessageHandler } from './useWebSocket'
import { intervalToSeconds } from '../utils'
// ---------------------------------------------------------------------------
// WebSocket helper — evaluate_indicator request/response
// ---------------------------------------------------------------------------
interface EvaluateResult {
symbol: string
period_seconds: number
pandas_ta_name: string
parameters: Record<string, any>
candle_count: number
columns: string[]
values: Array<Record<string, any>>
error?: string
}
function evaluateIndicator(
symbol: string,
fromTime: number,
toTime: number,
periodSeconds: number,
pandasTaName: string,
parameters: Record<string, any>,
timeoutMs = 30_000
): Promise<EvaluateResult> {
const requestId = `cind_${Date.now()}_${Math.random().toString(36).substring(7)}`
return new Promise((resolve, reject) => {
const timer = window.setTimeout(() => {
wsManager.removeHandler(handler)
reject(new Error(`evaluate_indicator timeout for ${pandasTaName}`))
}, timeoutMs)
const handler: MessageHandler = (message: any) => {
if (message.type !== 'evaluate_indicator_result') return
if (message.request_id !== requestId) return
clearTimeout(timer)
wsManager.removeHandler(handler)
if (message.error) reject(new Error(message.error))
else resolve(message as EvaluateResult)
}
wsManager.addHandler(handler)
wsManager.send({
type: 'evaluate_indicator',
request_id: requestId,
symbol,
from_time: fromTime,
to_time: toTime,
period_seconds: periodSeconds,
pandas_ta_name: pandasTaName,
parameters,
})
})
}
// ---------------------------------------------------------------------------
// Data cache — keyed by "indicatorId_symbol_periodSeconds_paramsHash"
// Each entry maps timestamp-ms → row object
// ---------------------------------------------------------------------------
type DataRow = Record<string, number | null>
const dataCache = new Map<string, Map<number, DataRow>>()
function cacheKey(indicatorId: string, symbol: string, periodSeconds: number, paramsHash: string): string {
return `${indicatorId}_${symbol}_${periodSeconds}_${paramsHash}`
}
function buildDataCache(result: EvaluateResult): Map<number, DataRow> {
const map = new Map<number, DataRow>()
for (const point of result.values) {
const tsMs = (point.timestamp as number) * 1000 // server sends Unix seconds → ms
map.set(tsMs, point as DataRow)
}
return map
}
// ---------------------------------------------------------------------------
// Custom study registry — config map shared between getter and composable
// ---------------------------------------------------------------------------
interface CustomStudyEntry {
indicatorId: string
pandasTaName: string
parameters: Record<string, any>
metadata: CustomIndicatorMetadata
}
// cfgKey → per-instance config; populated by registerCustomStudy()
const customStudyRegistry = new Map<string, CustomStudyEntry>()
// indicatorId → callback(newCfgKey); set by registerCustomStudy()
// Called by the constructor when async data arrives to trigger TV re-run.
const refreshCallbacks = new Map<string, (newCfgKey: string) => void>()
// TradingView widget reference — set by useCustomIndicators() so the
// constructor can query the current visible range.
let _tvWidget: any = null
// ---------------------------------------------------------------------------
// Generic study design constants
// ---------------------------------------------------------------------------
const MAX_PLOTS = 8
const MULTI_LINE_COLORS = [
'#2196F3', '#FF9800', '#4CAF50', '#E91E63', '#9C27B0',
'#00BCD4', '#FF5722', '#795548',
]
// ---------------------------------------------------------------------------
// Custom indicators getter
// Pass the result of this function as the widget option:
// custom_indicators_getter: getCustomIndicatorsGetter()
//
// The study type names must also be listed in studies_access in ChartView.vue.
// ---------------------------------------------------------------------------
export function getCustomIndicatorsGetter(): (_PineJS: any) => Promise<any[]> {
function makeGenericStudy(name: string, isPriceStudy: boolean): any {
const plots = Array.from({ length: MAX_PLOTS }, (_, i) => ({ id: `plot_${i}`, type: 'line' }))
const styles: Record<string, any> = {}
const defaultStyles: Record<string, any> = {}
for (let i = 0; i < MAX_PLOTS; i++) {
styles[`plot_${i}`] = { title: `Plot ${i}` }
defaultStyles[`plot_${i}`] = {
linestyle: 0,
linewidth: 1,
plottype: 0,
color: MULTI_LINE_COLORS[i % MULTI_LINE_COLORS.length],
visible: i === 0,
}
}
return {
name,
metainfo: {
_metainfoVersion: 51,
// Use @tv-custom-1 — @tv-basicstudies-1 is reserved for built-ins
// and TV throws "unexpected study id" if a custom indicator uses it.
id: `${name}@tv-custom-1`,
scriptIdPart: '',
name,
description: name,
shortDescription: name,
is_price_study: isPriceStudy,
isCustomIndicator: true,
format: { type: 'inherit' },
// Single text input carries the per-instance config key.
inputs: [
{ id: '_cfg', name: 'Config Key', type: 'text', defval: '' },
],
plots,
styles,
defaults: {
inputs: { _cfg: '' },
styles: defaultStyles,
},
},
// ES5 constructor — TV instantiates this with `new`
constructor: function (this: any) {
// Per-instance mutable state stored on the constructor instance
let _cfgKey = '' // current config key (from inputs(0))
let _dataKey = '' // data cache key (built from ctx symbol/period/params)
let _fetchGen = 0 // incremented each init(); used to cancel stale fetches
this.init = function (ctx: any, inputs: (i: number) => any) {
const cfgKey = inputs(0) as string
_cfgKey = cfgKey
_fetchGen++
const myGen = _fetchGen
const entry = customStudyRegistry.get(cfgKey)
if (!entry) return
// Derive symbol and period from the TV context object.
// ctx.symbol.ticker — symbol name without exchange prefix
// ctx.symbol.period — TV interval string ("15", "1D", etc.)
const symbol: string = ctx.symbol.ticker
const periodStr: string = ctx.symbol.period
const periodSeconds = intervalToSeconds(periodStr)
const paramsHash = JSON.stringify(entry.parameters)
const dk = cacheKey(entry.indicatorId, symbol, periodSeconds, paramsHash)
_dataKey = dk
if (dataCache.has(dk)) return // Data already fetched for this symbol/period/params
// Determine time range: prefer chart's visible range, fall back to 500-bar window
let fromTime: number
let toTime: number
const now = Math.floor(Date.now() / 1000)
toTime = now
fromTime = now - periodSeconds * 500
if (_tvWidget) {
try {
const range = _tvWidget.activeChart().getVisibleRange()
if (range?.from && range?.to) {
const dur = Math.floor(range.to) - Math.floor(range.from)
fromTime = Math.floor(range.from) - Math.floor(dur * 0.5)
toTime = Math.floor(range.to)
}
} catch { /* chart not yet ready */ }
}
// Capture mutable vars before async gap
const capturedDk = dk
const capturedCfgKey = cfgKey
evaluateIndicator(symbol, fromTime, toTime, periodSeconds, entry.pandasTaName, entry.parameters)
.then((result) => {
if (myGen !== _fetchGen) return // Superseded by a newer init() call
dataCache.set(capturedDk, buildDataCache(result))
// Create a sibling config key pointing to the same entry.
// Calling setInputValues() with this new key causes TV to
// re-invoke init()+main() with the now-populated cache.
const refreshKey = `${capturedCfgKey}__r`
customStudyRegistry.set(refreshKey, entry)
const cb = refreshCallbacks.get(entry.indicatorId)
if (cb) cb(refreshKey)
})
.catch((err) => {
console.error('[CustomIndicators] Failed to fetch data for', entry.pandasTaName, err)
})
}
this.main = function (ctx: any, _inputs: (i: number) => any) {
// ctx.symbol.bartime() returns the bar timestamp in milliseconds (documented)
const ts: number = ctx.symbol.bartime()
if (!_cfgKey || !_dataKey) return new Array(MAX_PLOTS).fill(NaN)
const entry = customStudyRegistry.get(_cfgKey)
if (!entry) return new Array(MAX_PLOTS).fill(NaN)
const cache = dataCache.get(_dataKey)
if (!cache) return new Array(MAX_PLOTS).fill(NaN)
const row = cache.get(ts)
return Array.from({ length: MAX_PLOTS }, (_, i) => {
const col = entry.metadata.output_columns[i]
return col && row ? (row[col.name] as number) ?? NaN : NaN
})
}
},
}
}
return (_PineJS: any): Promise<any[]> => {
return Promise.resolve([
makeGenericStudy('dxo_customstudy_overlay', true),
makeGenericStudy('dxo_customstudy_pane', false),
])
}
}
// ---------------------------------------------------------------------------
// Main composable
// ---------------------------------------------------------------------------
export function useCustomIndicators(tvWidget: any) {
_tvWidget = tvWidget
const indicatorStore = useIndicatorStore()
const chartStore = useChartStore()
// Maps indicator id → { cfgKey, tvStudyId, symbol }
const registered = new Map<string, { cfgKey: string; tvStudyId: string | null; symbol: string }>()
// Monotonic version counter per indicator for unique config keys
const cfgVersions = new Map<string, number>()
// Last-seen parameter hash per indicator id for change detection.
// Needed because Pinia $patch mutates in place (oldValue === newValue).
const lastParams = new Map<string, string>()
let isChartReady = false
function nextCfgKey(indicatorId: string): string {
const v = (cfgVersions.get(indicatorId) || 0) + 1
cfgVersions.set(indicatorId, v)
return `cfg_${indicatorId.replace(/[^a-zA-Z0-9]/g, '_')}_v${v}`
}
// Apply per-indicator visual overrides after createStudy() returns.
// Uses per-column plot config (style, color, linewidth, visible) from metadata.
function applyStudyOverrides(studyId: string, meta: CustomIndicatorMetadata) {
try {
const study = tvWidget.activeChart().getStudyById(studyId)
if (!study) return
const cols = meta.output_columns
const overrides: Record<string, any> = {}
for (let i = 0; i < MAX_PLOTS; i++) {
const col = cols[i]
if (col == null) {
overrides[`styles.plot_${i}.visible`] = false
continue
}
const p = col.plot
overrides[`styles.plot_${i}.visible`] = p?.visible ?? true
overrides[`styles.plot_${i}.plottype`] = p?.style ?? 0
overrides[`styles.plot_${i}.linewidth`] = p?.linewidth ?? 2
overrides[`styles.plot_${i}.linestyle`] = 0
overrides[`styles.plot_${i}.color`] = p?.color ?? MULTI_LINE_COLORS[i % MULTI_LINE_COLORS.length]
}
// Note: TV band `value` is fixed at metainfo-declaration time and cannot be changed
// via overrides. Indicators that need horizontal reference lines at configurable
// values (e.g. RSI at 70/30) should instead include a constant-value output column
// rather than relying on meta.bands.
study.applyOverrides(overrides)
} catch (err) {
console.warn('[CustomIndicators] Could not apply overrides:', err)
}
}
// ------------------------------------------------------------------
// Register a custom indicator as a TV study instance
// ------------------------------------------------------------------
async function registerCustomStudy(indicator: IndicatorInstance) {
const meta = indicator.custom_metadata
if (!meta) {
console.warn('[CustomIndicators] No custom_metadata on indicator:', indicator.id)
return
}
const symbol = indicator.symbol || chartStore.symbol
const cfgKey = nextCfgKey(indicator.id)
const forceOverlay = meta.pane === 'price'
const studyTypeName = meta.pane === 'price' ? 'dxo_customstudy_overlay' : 'dxo_customstudy_pane'
// Store per-instance config in the registry so the constructor can find it
customStudyRegistry.set(cfgKey, {
indicatorId: indicator.id,
pandasTaName: indicator.pandas_ta_name,
parameters: indicator.parameters,
metadata: meta,
})
// Register the callback invoked by the constructor after async data loads.
// We change the study's _cfg input to a sibling key, which causes TV to
// re-run init()+main() and pick up the freshly populated cache.
refreshCallbacks.set(indicator.id, (newCfgKey: string) => {
const entry = registered.get(indicator.id)
if (!entry?.tvStudyId) return
try {
const study = tvWidget.activeChart().getStudyById(entry.tvStudyId)
if (study) {
registered.set(indicator.id, { ...entry, cfgKey: newCfgKey })
study.setInputValues([{ id: '_cfg', value: newCfgKey }])
}
} catch (err) {
console.warn('[CustomIndicators] Could not refresh study after data load:', err)
}
})
try {
const tvStudyId = (await tvWidget.activeChart().createStudy(
studyTypeName, forceOverlay, false,
{ _cfg: cfgKey }
)) as string | null
registered.set(indicator.id, { cfgKey, tvStudyId: tvStudyId ?? null, symbol })
lastParams.set(indicator.id, JSON.stringify(indicator.parameters))
if (tvStudyId) {
// Set human-readable panel title (falls back to pandas_ta_name if no display name)
const displayName = meta.display_name || indicator.pandas_ta_name.replace(/^custom_/, '')
try {
const study = tvWidget.activeChart().getStudyById(tvStudyId)
if (study && typeof study.setStudyTitle === 'function') {
study.setStudyTitle(displayName)
}
} catch { /* setStudyTitle not available in this TV build */ }
applyStudyOverrides(tvStudyId, meta)
if (tvStudyId !== indicator.tv_study_id) {
indicatorStore.updateIndicator(indicator.id, { tv_study_id: tvStudyId })
}
}
console.log('[CustomIndicators] Registered:', indicator.pandas_ta_name, '→', studyTypeName, '(', tvStudyId, ')')
} catch (err) {
console.error('[CustomIndicators] Failed to create TV custom study:', studyTypeName, err)
}
}
// ------------------------------------------------------------------
// Remove a custom study from the chart
// ------------------------------------------------------------------
function removeCustomStudy(indicatorId: string) {
const entry = registered.get(indicatorId)
if (!entry) return
registered.delete(indicatorId)
lastParams.delete(indicatorId)
refreshCallbacks.delete(indicatorId)
if (entry.tvStudyId) {
try { tvWidget.activeChart().removeStudy(entry.tvStudyId) } catch { /* already gone */ }
}
}
// ------------------------------------------------------------------
// Re-register when parameters/symbol/period change (forces new data fetch)
// ------------------------------------------------------------------
async function refreshCustomStudy(indicator: IndicatorInstance) {
// Purge stale cache entries so init() fetches fresh data
for (const key of Array.from(dataCache.keys())) {
if (key.startsWith(`${indicator.id}_`)) {
dataCache.delete(key)
}
}
removeCustomStudy(indicator.id)
await registerCustomStudy(indicator)
}
// ------------------------------------------------------------------
// Store watcher — respond to indicator additions, changes, removals
//
// NOTE: Pinia $patch mutates in place, so oldValue === newValue for
// backend-originated updates. We track state manually via lastParams.
// ------------------------------------------------------------------
watch(
() => indicatorStore.indicators,
async (newIndicators) => {
if (!isChartReady) return
for (const [id, indicator] of Object.entries(newIndicators)) {
if (!indicator.pandas_ta_name.startsWith('custom_')) continue
if (!registered.has(id)) {
lastParams.set(id, JSON.stringify(indicator.parameters))
await registerCustomStudy(indicator)
} else {
const entry = registered.get(id)!
const currParams = JSON.stringify(indicator.parameters)
const prevParams = lastParams.get(id)
const currSymbol = indicator.symbol || chartStore.symbol
if (currParams !== prevParams || currSymbol !== entry.symbol) {
lastParams.set(id, currParams)
await refreshCustomStudy(indicator)
}
}
}
// Handle removals
for (const id of registered.keys()) {
if (!(id in newIndicators)) {
lastParams.delete(id)
removeCustomStudy(id)
}
}
},
{ deep: true }
)
// Re-fetch when chart resolution changes
watch(
() => chartStore.period,
() => {
if (!isChartReady) return
for (const [id, indicator] of Object.entries(indicatorStore.indicators)) {
if (!indicator.pandas_ta_name.startsWith('custom_')) continue
if (registered.has(id)) {
lastParams.set(id, JSON.stringify(indicator.parameters))
refreshCustomStudy(indicator)
}
}
}
)
// ------------------------------------------------------------------
// Chart ready — apply any indicators already in the store
// ------------------------------------------------------------------
// useCustomIndicators is always called from within tvWidget.onChartReady in ChartView,
// so the chart is already ready.
isChartReady = true
// TV processes custom_indicators_getter asynchronously (Promise microtask), so the
// custom study types are not yet available at onChartReady time. Defer the initial
// registration of any pending indicators until chart data loads — by that point the
// getter Promise has resolved and the study types are registered in TV's internal
// study index (and the studies_access whitelist check passes).
let initialApplied = false
tvWidget.activeChart().onDataLoaded().subscribe(null, () => {
if (initialApplied) return
initialApplied = true
const pending = Object.values(indicatorStore.indicators).filter(
(ind) => ind.pandas_ta_name.startsWith('custom_') && !registered.has(ind.id)
)
for (const indicator of pending) {
lastParams.set(indicator.id, JSON.stringify(indicator.parameters))
registerCustomStudy(indicator)
}
})
// Cleanup
return () => {
for (const id of [...registered.keys()]) {
removeCustomStudy(id)
}
registered.clear()
cfgVersions.clear()
lastParams.clear()
if (_tvWidget === tvWidget) _tvWidget = null
isChartReady = false
}
}

View File

@@ -259,37 +259,17 @@ export class WebSocketDatafeed implements IBasicDataFeed {
const rawBars: any[] = response.history.bars || [] const rawBars: any[] = response.history.bars || []
// Parse bars, preserving null OHLC for gap bars (no trades that period) // All bars have non-null prices — ingestor forward-fills interior gaps.
const parsedBars: Bar[] = rawBars.map((bar: any) => { const bars: Bar[] = rawBars.map((bar: any) => ({
if (bar.open === null || bar.close === null) { time: bar.time * 1000,
return { time: bar.time * 1000, open: null, high: null, low: null, close: null } open: parseFloat(bar.open) / denoms.tick,
} high: parseFloat(bar.high) / denoms.tick,
return { low: parseFloat(bar.low) / denoms.tick,
time: bar.time * 1000, close: parseFloat(bar.close) / denoms.tick,
open: parseFloat(bar.open) / denoms.tick, volume: parseFloat(bar.volume) / denoms.base
high: parseFloat(bar.high) / denoms.tick, }))
low: parseFloat(bar.low) / denoms.tick,
close: parseFloat(bar.close) / denoms.tick,
volume: parseFloat(bar.volume) / denoms.base
}
})
parsedBars.sort((a, b) => a.time - b.time) bars.sort((a, b) => a.time - b.time)
// Fill any gaps between returned bars with null bars so TradingView
// receives a contiguous array of the correct length.
const periodMs = intervalToSeconds(resolution) * 1000
const bars: Bar[] = []
for (let i = 0; i < parsedBars.length; i++) {
if (i > 0) {
const prev = parsedBars[i - 1].time
const curr = parsedBars[i].time
for (let t = prev + periodMs; t < curr; t += periodMs) {
bars.push({ time: t, open: null, high: null, low: null, close: null })
}
}
bars.push(parsedBars[i])
}
console.log('[TradingView Datafeed] Scaled bar sample:', bars[0]) console.log('[TradingView Datafeed] Scaled bar sample:', bars[0])

View File

@@ -367,6 +367,22 @@ export function useTradingViewIndicators(tvWidget: IChartingLibraryWidget) {
} }
isChartReady = true isChartReady = true
// Apply any indicators that arrived before chart was ready (e.g. from workspace sync on page load)
const pendingIndicators = Object.values(indicatorStore.indicators).filter(ind => !ind.tv_study_id)
if (pendingIndicators.length > 0) {
console.log('[Indicators] Chart ready, applying', pendingIndicators.length, 'pending indicators from store')
isApplyingTVUpdate = true
;(async () => {
try {
for (const indicator of pendingIndicators) {
await createTVStudy(indicator)
}
} finally {
isApplyingTVUpdate = false
}
})()
}
console.log('[Indicators] Setting up indicator event subscriptions') console.log('[Indicators] Setting up indicator event subscriptions')
console.log('[Indicators] Chart ready, widget:', tvWidget) console.log('[Indicators] Chart ready, widget:', tvWidget)
@@ -781,6 +797,10 @@ export function useTradingViewIndicators(tvWidget: IChartingLibraryWidget) {
async function createTVStudy(indicator: IndicatorInstance) { async function createTVStudy(indicator: IndicatorInstance) {
if (!isChartReady) return if (!isChartReady) return
// Custom indicators (pandas_ta_name starts with "custom_") are handled by
// useCustomIndicators — they use TV createCustomStudy, not createStudy.
if (indicator.pandas_ta_name.startsWith('custom_')) return
try { try {
const chart = tvWidget.activeChart() const chart = tvWidget.activeChart()
if (!chart) return if (!chart) return

View File

@@ -22,7 +22,7 @@ class WebSocketManager {
private reconnectAttempts = 0 private reconnectAttempts = 0
private maxReconnectAttempts = Infinity // Keep trying indefinitely private maxReconnectAttempts = Infinity // Keep trying indefinitely
private reconnectDelay = 1000 // Start with 1 second private reconnectDelay = 1000 // Start with 1 second
private maxReconnectDelay = 15000 // Max 15 seconds private maxReconnectDelay = 50000 // Max 50 seconds
/** /**
* Connect to WebSocket with JWT token for authentication * Connect to WebSocket with JWT token for authentication

View File

@@ -1,6 +1,63 @@
import { defineStore } from 'pinia' import { defineStore } from 'pinia'
import { ref } from 'vue' import { ref } from 'vue'
export interface CustomIndicatorParam {
type: 'int' | 'float' | 'bool' | 'string'
default: any
description?: string
min?: number
max?: number
}
/**
* Per-series plot configuration.
* style maps to LineStudyPlotStyle: 0=Line, 1=Histogram, 3=Dots/Cross,
* 4=Area, 5=Columns, 6=Circles, 9=StepLine.
*/
export interface PlotConfig {
style: number
color?: string
linewidth?: number
visible?: boolean
}
/** Shaded region between two plots ("plot_plot") or two bands ("hline_hline"). */
export interface FilledAreaConfig {
id: string
type: 'plot_plot' | 'hline_hline'
series1: string
series2: string
color?: string
opacity?: number
}
/** Horizontal reference line (e.g. RSI ob/os). linestyle: 0=solid, 1=dotted, 2=dashed. */
export interface BandConfig {
id: string
value: number
color?: string
linewidth?: number
linestyle?: number
visible?: boolean
}
export interface CustomIndicatorColumn {
name: string
display_name?: string
description?: string
plot?: PlotConfig
}
export interface CustomIndicatorMetadata {
display_name: string
parameters: Record<string, CustomIndicatorParam>
input_series: string[]
output_columns: CustomIndicatorColumn[]
pane: 'price' | 'separate'
filled_areas?: FilledAreaConfig[]
bands?: BandConfig[]
}
export interface IndicatorInstance { export interface IndicatorInstance {
id: string id: string
pandas_ta_name: string pandas_ta_name: string
@@ -15,6 +72,8 @@ export interface IndicatorInstance {
created_at?: number created_at?: number
modified_at?: number modified_at?: number
original_id?: string original_id?: string
/** Populated for custom_ indicators; drives TV custom study auto-construction. */
custom_metadata?: CustomIndicatorMetadata
} }
export const useIndicatorStore = defineStore('indicators', () => { export const useIndicatorStore = defineStore('indicators', () => {