major agent refactoring: wiki knowledge base, no RAG, no Qdrant, no Ollama

This commit is contained in:
2026-04-21 21:03:24 -04:00
parent 7e4b54d701
commit 44a1688657
80 changed files with 2699 additions and 4267 deletions

View File

@@ -16,13 +16,13 @@ supported_exchanges:
# limits and connection constraints — these are conservative starting values.
exchange_capacity:
BINANCE:
historical_slots: 1
historical_slots: 2
realtime_slots: 5
COINBASE:
historical_slots: 1
historical_slots: 2
realtime_slots: 4
KRAKEN:
historical_slots: 1
historical_slots: 2
realtime_slots: 3
# Kafka configuration

View File

@@ -1,6 +1,7 @@
# Development Plan
* Single conversation in gateway
* Wiki memory
* Agent unification & spawn tool
* Realtime data
* Triggers
* Strategy UI
@@ -10,7 +11,8 @@
* Live Execution
* Sandbox <=> Dexorder auth
* Chat channels
* MCP channel (with or without images)
* MCP channel (with or without images)
* TradingView indicator import tool
* Trader preferences tool
* Results persistence: research analysis, backtests, strategy performance metrics, etc.
*

View File

@@ -60,7 +60,7 @@ public class IngestorBroker implements AutoCloseable {
/** Re-queue realtime job if no heartbeat received within this window (ms) */
private static final long HEARTBEAT_TIMEOUT_MS = 25_000;
/** Re-queue historical job if not completed within this window (ms) */
private static final long HISTORICAL_TIMEOUT_MS = 60_000;
private static final long HISTORICAL_TIMEOUT_MS = 120_000;
private final ZmqChannelManager zmqManager;
private volatile boolean running;

View File

@@ -18,38 +18,18 @@ COPY src ./src
# Build (includes protobuf generation)
RUN npm run build
# Note: Python API files for research subagent are copied by bin/build script
# to src/harness/subagents/research/api-source/ before docker build
# Production image
FROM node:22-slim
WORKDIR /app
# Install dependencies for Ollama (early in the build for caching)
RUN apt-get update && apt-get install -y curl bash zstd ca-certificates && rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y bash zstd ca-certificates && rm -rf /var/lib/apt/lists/*
# Install Ollama (before npm dependencies for better caching)
RUN curl -fsSL https://ollama.com/install.sh | sh
# Create non-root user early (before pulling model)
# Create non-root user
RUN groupadd --gid 1001 nodejs && \
useradd --uid 1001 --gid nodejs --shell /bin/bash --create-home nodejs && \
chown -R nodejs:nodejs /app
# Pull embedding model (all-minilm: 90MB, CPU-friendly) as nodejs user
# This is the most expensive operation, so do it early
USER nodejs
RUN ollama serve & \
OLLAMA_PID=$! && \
sleep 10 && \
ollama pull all-minilm && \
kill $OLLAMA_PID && \
wait $OLLAMA_PID || true
# Switch back to root for remaining setup
USER root
# Copy package files
COPY package*.json ./
@@ -65,16 +45,14 @@ COPY protobuf ./protobuf
# Copy k8s templates (not included in TypeScript build)
COPY src/k8s/templates ./dist/k8s/templates
# Copy harness prompts (not included in TypeScript build)
# Copy harness prompts (welcome.md, etc.)
COPY src/harness/prompts ./dist/harness/prompts
# Copy all subagent directories (config.yaml, system-prompt.md, memory/, etc.)
# TypeScript build already compiled .ts files to .js in dist, so we copy the entire
# source directory to get all non-TypeScript assets, then remove .ts duplicates
COPY src/harness/subagents ./dist/harness/subagents
# Remove source .ts files (we only need the compiled .js from builder stage)
# Keep .yaml, .md files and memory/ directories
RUN find ./dist/harness/subagents -name "*.ts" -type f -delete
# Copy wiki knowledge base
COPY knowledge ./knowledge
# Copy agent prompt pages (agent-*.md, index.md, tools.md)
COPY prompt ./prompt
# Copy entrypoint script
COPY entrypoint.sh ./
@@ -87,4 +65,4 @@ USER nodejs
EXPOSE 3000
ENTRYPOINT ["./entrypoint.sh"]
ENTRYPOINT ["./entrypoint.sh"]

View File

@@ -58,7 +58,7 @@ The `details` field is intentionally **filtered out of the workspace `_types` st
```
The gateway will:
1. Read the current `details` from the sandbox via `python_read`
1. Read the current `details` from the sandbox via `PythonRead`
2. Compute a unified diff between the old and new text
3. If no changes are detected, reply immediately with `details_updated` (success)
4. Otherwise, invoke the appropriate subagent (indicator / strategy / research) with instructions to update the Python code according to the diff, and also persist the new `details` text
@@ -67,8 +67,8 @@ The gateway will:
```json
{ "type": "subagent_chunk", "agentName": "indicator", "content": "Reading current implementation..." }
{ "type": "subagent_tool_call", "agentName": "indicator", "toolName": "python_read", "label": "python_read" }
{ "type": "subagent_tool_call", "agentName": "indicator", "toolName": "python_edit", "label": "python_edit" }
{ "type": "subagent_tool_call", "agentName": "indicator", "toolName": "PythonRead", "label": "PythonRead" }
{ "type": "subagent_tool_call", "agentName": "indicator", "toolName": "PythonEdit", "label": "PythonEdit" }
{ "type": "subagent_chunk", "agentName": "indicator", "content": "Applied patch. Validation passed." }
```
@@ -96,7 +96,7 @@ or on failure:
## Workspace Sync After Update
When the subagent calls `python_edit`, the sandbox returns a `_workspace_sync` payload in the MCP response. The gateway automatically applies this to the `{category}_types` workspace store and sends a WebSocket `patch` message to the client (the normal workspace sync path). The client should listen for these patches to refresh any UI that displays list metadata (name, description).
When the subagent calls `PythonEdit`, the sandbox returns a `_workspace_sync` payload in the MCP response. The gateway automatically applies this to the `{category}_types` workspace store and sends a WebSocket `patch` message to the client (the normal workspace sync path). The client should listen for these patches to refresh any UI that displays list metadata (name, description).
The `details` field itself is **not** in the workspace store — the client must call `read_details` again if it needs the refreshed details text after an update.

View File

@@ -1,25 +1,6 @@
#!/bin/bash
set -e
# Start Ollama server in background
echo "Starting Ollama server..."
ollama serve &
OLLAMA_PID=$!
# Wait for Ollama to be ready
echo "Waiting for Ollama to be ready..."
for i in {1..30}; do
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
echo "Ollama is ready!"
break
fi
if [ $i -eq 30 ]; then
echo "Ollama failed to start within 30 seconds"
exit 1
fi
sleep 1
done
# Start the Node.js gateway application
echo "Starting gateway..."
exec node dist/main.js

View File

@@ -495,4 +495,6 @@ __all__ = ['API', 'ChartingAPI', 'DataAPI', 'get_api', 'set_api']
---
For practical usage patterns and complete working examples, see `usage-examples.md`.
For practical usage patterns and complete working examples, see [`usage-examples.md`](usage-examples.md).
For the pandas-ta indicator catalog used in research scripts, see [`pandas-ta-reference.md`](pandas-ta-reference.md).

View File

@@ -1,142 +1,164 @@
# Indicator Development Guide
# Custom Indicator Development
Custom indicators in Dexorder are Python functions that process OHLCV data and return signals or values.
Custom indicators are Python scripts saved in the `indicator` category. They compute values from OHLCV data and are plotted live on the TradingView chart alongside built-in indicators.
## Indicator Structure
See [`../pandas-ta-reference.md`](../pandas-ta-reference.md) for the full catalog of built-in indicators available via `pandas_ta`.
---
## Function Signature
A custom indicator must define a **top-level function** whose name is the lowercase, snake_case form of the `name` passed to `PythonWrite`. For example, `name="VW RSI"` → function `def vw_rsi(...)`.
The function receives the OHLCV columns listed in `input_series` as positional arguments and must return either:
- A `pd.Series` (single-output indicator), or
- A `pd.DataFrame` with column names matching `output_columns` in the metadata (multi-output)
```python
def my_indicator(df, **params):
"""
Calculate custom indicator
import pandas as pd
import pandas_ta as ta
Args:
df: DataFrame with columns [open, high, low, close, volume]
**params: Indicator parameters
Returns:
Series or DataFrame with indicator values
"""
# Implementation
return result
# Single-output: volume-weighted RSI
def vw_rsi(close: pd.Series, volume: pd.Series, length: int = 14) -> pd.Series:
rsi = ta.rsi(close, length=length)
vol_weight = volume / volume.rolling(length).mean()
return (rsi * vol_weight).rolling(3).mean()
```
## Common Patterns
### Simple Moving Average
```python
def sma(df, period=20):
return df['close'].rolling(window=period).mean()
```
### Exponential Moving Average
```python
def ema(df, period=20):
return df['close'].ewm(span=period, adjust=False).mean()
```
### RSI (Relative Strength Index)
```python
def rsi(df, period=14):
delta = df['close'].diff()
gain = delta.where(delta > 0, 0).rolling(window=period).mean()
loss = -delta.where(delta < 0, 0).rolling(window=period).mean()
rs = gain / loss
return 100 - (100 / (1 + rs))
```
### MACD
```python
def macd(df, fast=12, slow=26, signal=9):
ema_fast = df['close'].ewm(span=fast).mean()
ema_slow = df['close'].ewm(span=slow).mean()
macd_line = ema_fast - ema_slow
signal_line = macd_line.ewm(span=signal).mean()
histogram = macd_line - signal_line
import pandas as pd
import pandas_ta as ta
# Multi-output: custom Bollinger Bands
def vol_bands(close: pd.Series, length: int = 20, std: float = 2.0) -> pd.DataFrame:
bb = ta.bbands(close, length=length, std=std)
return pd.DataFrame({
'macd': macd_line,
'signal': signal_line,
'histogram': histogram
"upper": bb.iloc[:, 2],
"mid": bb.iloc[:, 1],
"lower": bb.iloc[:, 0],
})
```
## Best Practices
**Always use `pandas_ta` for standard indicator calculations.** Never write manual `rolling().mean()` or `ewm()` implementations — use `ta.sma()`, `ta.ema()`, `ta.rsi()`, etc.
### Data Handling
- Always validate input DataFrame has required columns
- Handle NaN values appropriately
- Use `.copy()` to avoid modifying original data
- Consider edge cases (not enough data, etc.)
---
### Performance
- Vectorize operations when possible (avoid loops)
- Use pandas/numpy built-in functions
- Cache expensive calculations
- Test on large datasets
## Required Metadata
### Parameters
- Provide sensible defaults
- Document parameter ranges
- Validate parameter values
- Consider optimization bounds
When writing a custom indicator with `PythonWrite`, supply complete metadata so the web client can build the TradingView plotter automatically:
### Testing
```python
def test_indicator():
# Create sample data
df = pd.DataFrame({
'close': [100, 102, 101, 103, 105]
})
PythonWrite(
category="indicator",
name="VW RSI",
description="RSI weighted by relative volume.",
details="""## Volume-Weighted RSI
# Test calculation
result = my_indicator(df, param=10)
Computes RSI on close prices, scales by relative volume, applies 3-bar smoothing.
# Validate output
assert not result.isna().all()
assert len(result) == len(df)
**Formula:** (rsi * (volume / volume.rolling(length).mean())).rolling(3).mean()
**Inputs:** close, volume
**Output:** single Series — smoothed volume-weighted RSI (separate pane)
**Parameters:** length (int, default 14)""",
code="""...""",
metadata={
"parameters": {
"length": {"type": "int", "default": 14, "min": 2, "max": 200, "description": "RSI period"}
},
"input_series": ["close", "volume"],
"output_columns": [
{"name": "value", "display_name": "VW-RSI", "plot": {"style": 0}}
],
"pane": "separate" # "price" = overlay on candles; "separate" = sub-pane
}
)
```
### Plot styles
| Value | Renders as |
|---|---|
| `0` | Line (default) |
| `1` | Histogram bars |
| `4` | Area (filled under line) |
| `5` | Columns (vertical bars) |
| `9` | Step line |
### Filled areas (shaded bands)
To shade between two output series (e.g. upper/lower bands), add a `filled_areas` list. The two bounding series must appear at consecutive even/odd positions in `output_columns`:
```python
"filled_areas": [
{"id": "fill", "type": "plot_plot", "series1": "upper", "series2": "lower",
"color": "#2196F3", "opacity": 0.08}
]
```
---
## Workflow
1. **Check for existing indicators** before writing: `PythonList(category="indicator")`. If one already exists with the same sanitized name, update it with `PythonEdit` rather than creating a duplicate.
2. **Write** with `PythonWrite(category="indicator", ...)`. The system automatically runs the script against synthetic test data to catch compile/runtime errors — no separate validation call needed.
3. **Add to workspace** with `WorkspacePatch("indicators", ...)` using `pandas_ta_name: "custom_<sanitized_name>"`. Include `custom_metadata` in the patch value so the web client can render it.
4. **Use in strategies** via `ta.custom_<sanitized_name>(...)`. See [`../strategies/strategy-development.md`](strategy-development.md) for details.
---
## Naming Conventions
The workspace `pandas_ta_name` is `"custom_"` + the sanitized indicator name. Sanitization: lowercase + spaces/hyphens → underscores. For example:
| `name` | function name | `pandas_ta_name` |
|---|---|---|
| `"VW RSI"` | `vw_rsi` | `custom_vw_rsi` |
| `"TrendFlex"` | `trendflex` | `custom_trendflex` |
| `"Vol-Bands"` | `vol_bands` | `custom_vol_bands` |
Two names that sanitize to the same value will conflict — check with `PythonList` first.
---
## Common Pitfalls
### Look-Ahead Bias
Never use future data:
```python
# WRONG - uses future data
df['signal'] = df['close'].shift(-1) > df['close']
### Look-ahead bias
# CORRECT - only past data
df['signal'] = df['close'] > df['close'].shift(1)
Never use future data in the computation. Indicator values for bar N may only depend on data available at bar N or earlier.
```python
# WRONG — uses future price
signal = close.shift(-1) > close
# CORRECT — only past data
signal = close > close.shift(1)
```
### Repainting
Indicator values should not change for closed bars:
Indicator values for already-closed bars should not change as new bars arrive. Avoid calculations that recalculate over a sliding window that can retrospectively alter past values in non-obvious ways.
### NaN handling
Indicators need a warm-up period. The first `length - 1` values will be `NaN`. Strategies that consume custom indicators should guard with:
```python
# Ensure calculations are based on closed candles
# Avoid using unstable data sources
if vw_rsi.isna().all() or len(df) < min_required:
return
```
### Overfitting
- Don't optimize on same data you test on
- Use separate train/validation/test sets
- Walk-forward analysis for robustness
- Simple is often better than complex
## Integration with Strategies
- Keep indicator logic simple and parameter-lean
- Validate on out-of-sample data, not the same window used to tune parameters
- Prefer indicators with a clear mechanical rationale over curve-fit formulas
Indicators are used in strategy signals:
```python
def my_strategy(df):
# Calculate indicators
df['rsi'] = rsi(df, period=14)
df['sma_fast'] = sma(df, period=20)
df['sma_slow'] = sma(df, period=50)
---
# Generate signals
df['signal'] = 0
df.loc[(df['rsi'] < 30) & (df['sma_fast'] > df['sma_slow']), 'signal'] = 1
df.loc[(df['rsi'] > 70) & (df['sma_fast'] < df['sma_slow']), 'signal'] = -1
## See Also
return df
```
Store indicators in your git repository under `indicators/` directory.
- [`../pandas-ta-reference.md`](../pandas-ta-reference.md) — Full catalog of built-in indicators and calling conventions
- [`../api-reference.md`](../api-reference.md) — DataAPI and ChartingAPI for research scripts
- [`../strategies/strategy-development.md`](strategy-development.md) — Using custom indicators in strategies via `ta.custom_*`

View File

@@ -1,5 +1,7 @@
# pandas-ta Reference for Research Scripts
This catalog applies to both research scripts and custom indicators. For usage in research scripts see [`usage-examples.md`](usage-examples.md). For writing custom indicator scripts (with metadata for the TradingView plotter) see [`indicators/indicator-development.md`](indicators/indicator-development.md).
The sandbox environment uses **pandas-ta** as the standard indicator library. Always use it for technical indicator calculations; do not write manual rolling/ewm implementations.
```python

View File

@@ -1,71 +0,0 @@
# Agent System Architecture
The Dexorder AI platform uses a sophisticated agent harness that orchestrates between user interactions, LLM models, and user-specific tools.
## Core Components
### Gateway
Multi-channel gateway supporting:
- WebSocket connections for web/mobile
- Telegram integration
- Real-time event streaming
### Agent Harness
Stateless orchestrator that:
1. Fetches context from user's MCP server
2. Routes to appropriate LLM model based on license
3. Calls LLM with embedded context
4. Routes tool calls to user's MCP or platform tools
5. Saves conversation history back to MCP
### Memory Architecture
Three-tier storage system:
- **Redis**: Hot state for active sessions and checkpoints
- **Qdrant**: Vector search for RAG and semantic memory
- **Iceberg**: Cold storage for durable conversations and analytics
### User Context
Every interaction includes:
- User ID and license information
- Active channel (websocket, telegram, etc.)
- Channel capabilities (markdown, images, buttons)
- Conversation history
- Relevant memories from RAG
- Workspace state
## Skills vs Subagents
### Skills
Self-contained capabilities for specific tasks:
- Market analysis
- Strategy validation
- Indicator development
- Defined in markdown + TypeScript
- Use when task is well-defined and scoped
### Subagents
Specialized agents with dedicated memory:
- Code reviewer with review guidelines
- Risk analyzer with risk models
- Multi-file knowledge base
- Custom system prompts
- Use when domain expertise is needed
## Global vs User Memory
### Global Memory (user_id="0")
Platform-wide knowledge available to all users:
- Trading concepts and terminology
- Platform capabilities
- Indicator documentation
- Strategy patterns
- Best practices
### User Memory
Personal context specific to each user:
- Conversation history
- Preferences and trading style
- Custom indicators and strategies
- Workspace state
All RAG queries automatically search both global and user-specific memories.

View File

@@ -1,88 +1,17 @@
# Model Context Protocol (MCP) Integration
# User Sandbox
Dexorder uses the Model Context Protocol for user-specific tool execution and state management.
Each user has a dedicated sandbox environment that persists their data across sessions.
## Container Architecture
## Persistent Storage
Each user has a dedicated Kubernetes pod running:
- **Agent Container**: Python environment with conda packages
- **Lifecycle Sidecar**: Manages container lifecycle and communication
- **Persistent Storage**: User's git repository with indicators/strategies
User scripts (indicators, strategies, research) are stored in a git repository inside the user's sandbox. They survive session disconnects and reconnections.
## Authentication Modes
- Indicators are in the `indicator` category and can be listed with `PythonList(category="indicator")`
- Strategies are in the `strategy` category and can be listed with `PythonList(category="strategy")`
- Research scripts are in the `research` category and can be listed with `PythonList(category="research")`
Three MCP authentication modes:
## Session Lifecycle
### 1. Public Mode (Free Tier)
- No authentication required
- Container creates anonymous session
- Limited to read-only resources
- Session expires after timeout
### 2. Gateway Auth Mode (Standard)
- Gateway authenticates user
- Passes verified user ID to container
- Container trusts gateway's authentication
- Full access to user's tools and data
### 3. Direct Auth Mode (Enterprise)
- User authenticates directly with container
- Gateway forwards encrypted credentials
- Container validates credentials independently
- Highest security for sensitive operations
## MCP Resources
The container exposes standard resources:
### context://user-profile
User preferences and trading style
### context://conversation-summary
Recent conversation context and history
### context://workspace-state
Current chart, indicators, and analysis state
### context://system-prompt
User's custom agent instructions
### indicators://list
Available indicators with signatures
### strategies://list
User's trading strategies
## Tool Execution Flow
1. User sends message to gateway
2. Gateway queries user's MCP resources for context
3. LLM generates response with tool calls
4. Gateway routes tool calls:
- Platform tools → handled by gateway
- User tools → proxied to MCP container
5. Tool results returned to LLM
6. Final response sent to user
7. Conversation saved to MCP container
## Container Lifecycle
### Startup
1. Gateway receives user connection
2. Checks if container exists
3. Creates pod if needed (cold start ~5-10s)
4. Waits for container ready
5. Establishes MCP connection
### Active
- Container stays alive during active session
- Receives tool calls via MCP
- Maintains workspace state
- Saves files to persistent storage
### Shutdown
- Free users: timeout after 15 minutes idle
- Paid users: longer timeout based on license
- Graceful shutdown saves state
- Persistent storage retained
- Fast restart on next connection
- Sandbox starts automatically when the user connects
- Cold start takes a few seconds if the sandbox was idle
- All workspace state and scripts are preserved across reconnects

View File

@@ -0,0 +1,132 @@
# Workspace
The Workspace is the user's current UI context — what they are looking at, what is selected, and what persistent state belongs to their session. It is a collection of named **stores** that are kept in sync between the web client, the gateway, and the user's sandbox container.
Use `WorkspaceRead(store_name)` to read any store and `WorkspacePatch(store_name, patch)` to update it. Patches use JSON Patch (RFC 6902) format.
---
## Stores
### `chartState` — Current chart view (persistent)
Tracks what the user is currently looking at on the TradingView chart.
| Field | Type | Description |
|---|---|---|
| `symbol` | string | Active trading pair in `SYMBOL.EXCHANGE` format (e.g. `BTC/USDT.BINANCE`) |
| `period` | number | OHLC bar period in seconds (e.g. `900` = 15 min, `3600` = 1 h) |
| `start_time` | number \| null | Unix timestamp of left edge of visible range, or null for auto |
| `end_time` | number \| null | Unix timestamp of right edge of visible range, or null for auto |
| `selected_shapes` | string[] | IDs of currently selected drawing/annotation shapes |
When the user says "the current chart" or "what's selected", read `chartState` first.
---
### `indicators` — Active indicators on the chart (persistent)
A flat map of `indicator_id → IndicatorInstance`. Each entry represents one study currently plotted on the TradingView chart.
**`IndicatorInstance` fields:**
| Field | Type | Description |
|---|---|---|
| `id` | string | Unique ID for this instance |
| `pandas_ta_name` | string | Internal name used in strategy/indicator scripts (e.g. `RSI_14`, `custom_MyIndicator`) |
| `instance_name` | string | Human-readable label shown on chart |
| `parameters` | object | Key/value parameter map (e.g. `{ length: 14 }`) |
| `tv_study_id` | string? | TradingView study ID (assigned by TV after the study is added) |
| `tv_indicator_name` | string? | TradingView indicator name for built-in studies |
| `tv_inputs` | object? | TradingView input overrides keyed by TV input name |
| `visible` | boolean | Whether the study is visible on the chart |
| `pane` | string | `"price"` to overlay on price pane, `"separate"` for its own panel |
| `symbol` | string? | Override symbol if different from `chartState.symbol` |
| `created_at` | number? | Unix timestamp when added |
| `modified_at` | number? | Unix timestamp when last changed |
| `custom_metadata` | object? | Present only for `custom_*` indicators; drives TradingView custom study construction (see below) |
**`custom_metadata` sub-fields** (for custom indicators only):
| Field | Type | Description |
|---|---|---|
| `display_name` | string | Human-readable indicator title shown in TV |
| `parameters` | object | Parameter schema: `{ name: { type, default, description, min?, max? } }` |
| `input_series` | string[] | Input price series required (e.g. `["close"]`) |
| `output_columns` | array | Each entry: `{ name, display_name?, description?, plot? }` where `plot` has `{ style, color?, linewidth?, visible? }` |
| `pane` | `"price"` \| `"separate"` | Default pane placement |
| `filled_areas` | array? | Shaded regions between two plots or hlines |
| `bands` | array? | Horizontal reference lines (e.g. RSI 70/30) |
---
### `shapes` — Chart drawings and annotations (persistent)
```json
{ "shapes": { "<shape_id>": Shape } }
```
Each `Shape` has:
| Field | Type | Description |
|---|---|---|
| `id` | string | Unique shape ID |
| `type` | string | Drawing type (e.g. `"trend_line"`, `"horizontal_line"`, `"rectangle"`) |
| `points` | array | Control points: `{ time: unix_ts, price: number, channel?: string }` |
| `color` | string? | Hex color |
| `line_width` | number? | Line thickness |
| `line_style` | string? | `"solid"`, `"dotted"`, `"dashed"` |
| `properties` | object? | Additional type-specific properties |
| `symbol` | string? | Symbol the shape belongs to |
| `created_at` | number? | Unix timestamp |
| `modified_at` | number? | Unix timestamp |
---
### `indicator_types` — Custom indicator registry (persistent)
```json
{ "types": { "<script_name>": CustomIndicatorMetadata } }
```
Maps custom indicator script names to their `CustomIndicatorMetadata` (same structure as `custom_metadata` above). Populated when a custom indicator is created or updated by the indicator agent. The web client uses this to register custom TradingView studies.
---
### `strategy_types` — Strategy registry (persistent)
```json
{ "types": { "<script_name>": StrategyMetadata } }
```
Maps strategy script names to their metadata. Used by the web client to know which strategies are available.
---
### `research_types` — Research script registry (persistent)
```json
{ "types": { "<script_name>": ResearchMetadata } }
```
Maps research script names to their metadata.
---
### `channelState` — Connected channels (transient, gateway-only)
Tracks which communication channels (WebSocket, Telegram, etc.) are connected to the current session. **Not synced to web clients.**
```json
{ "connected": { "<channel_id>": { type, connectedAt, capabilities } } }
```
---
## Sync Protocol
Stores are kept in sync using JSON Patch (RFC 6902) messages:
- **snapshot** — full state dump sent on connect or after missed patches
- **patch** — incremental change with a monotonic sequence number
Stores marked `persistent` are saved to the user's container at `/data/workspace/{store_name}.json` and survive session reconnects.

View File

@@ -1,188 +1,262 @@
# Strategy Development Guide
Trading strategies in Dexorder define entry/exit rules and position management logic.
Strategies on Dexorder are `PandasStrategy` subclasses that receive a live stream of OHLCV bars and call `self.buy()` / `self.sell()` / `self.flatten()` to place orders.
## Strategy Structure
See [`../api-reference.md`](../api-reference.md) for the DataAPI and ChartingAPI used in research scripts. For indicator calculations, see [`../pandas-ta-reference.md`](../pandas-ta-reference.md).
---
## PandasStrategy API
```python
class Strategy:
def __init__(self, **params):
"""Initialize strategy with parameters"""
self.params = params
from dexorder.nautilus.pandas_strategy import PandasStrategy, PandasStrategyConfig
def generate_signals(self, df):
"""
Generate trading signals
class MyStrategy(PandasStrategy):
def evaluate(self, dfs: dict[str, pd.DataFrame]) -> None:
"""Called after every new bar across all feeds.
Args:
df: DataFrame with OHLCV + indicator columns
Returns:
DataFrame with 'signal' column:
1 = long entry
-1 = short entry
0 = no action
dfs: dict mapping feed_key → pd.DataFrame
Columns: timestamp (ns), open, high, low, close, volume,
buy_vol, sell_vol, open_interest
Rows accumulate over time — last row = latest bar.
"""
pass
df = dfs.get("BTC/USDT.BINANCE:300")
if df is None or len(df) < 20:
return # not enough data yet
def calculate_position_size(self, capital, price, risk_pct):
"""Calculate position size based on risk"""
pass
close = df["close"]
# ... compute signals ...
def get_stop_loss(self, entry_price, direction):
"""Calculate stop loss level"""
pass
def get_take_profit(self, entry_price, direction):
"""Calculate take profit level"""
pass
if buy_signal:
self.buy(quantity=0.1)
elif sell_signal:
self.sell(quantity=0.1)
```
## Example: Simple Moving Average Crossover
### Feed key format
`"{SYMBOL.EXCHANGE}:{period_seconds}"` — e.g. `"BTC/USDT.BINANCE:900"` for 15-minute bars.
Access all feeds via `self.config.feed_keys` (tuple of strings).
### Order methods
```python
class SMACrossoverStrategy:
def __init__(self, fast_period=20, slow_period=50, risk_pct=0.02):
self.fast_period = fast_period
self.slow_period = slow_period
self.risk_pct = risk_pct
def generate_signals(self, df):
# Calculate moving averages
df['sma_fast'] = df['close'].rolling(self.fast_period).mean()
df['sma_slow'] = df['close'].rolling(self.slow_period).mean()
# Generate signals
df['signal'] = 0
# Long when fast crosses above slow
df.loc[
(df['sma_fast'] > df['sma_slow']) &
(df['sma_fast'].shift(1) <= df['sma_slow'].shift(1)),
'signal'
] = 1
# Short when fast crosses below slow
df.loc[
(df['sma_fast'] < df['sma_slow']) &
(df['sma_fast'].shift(1) >= df['sma_slow'].shift(1)),
'signal'
] = -1
return df
def calculate_position_size(self, capital, price, atr):
# Risk-based position sizing
risk_amount = capital * self.risk_pct
stop_distance = 2 * atr
position_size = risk_amount / stop_distance
return position_size
def get_stop_loss(self, entry_price, direction, atr):
if direction == 1: # Long
return entry_price - (2 * atr)
else: # Short
return entry_price + (2 * atr)
def get_take_profit(self, entry_price, direction, atr):
if direction == 1: # Long
return entry_price + (4 * atr) # 2:1 risk/reward
else: # Short
return entry_price - (4 * atr)
self.buy(quantity: float, feed_key: str = None)
self.sell(quantity: float, feed_key: str = None)
self.flatten(feed_key: str = None) # close all open positions
```
## Strategy Components
If `feed_key` is omitted, the first feed in `feed_keys` is used. `quantity` is in base currency units (e.g. 0.1 BTC).
### Signal Generation
Entry conditions based on:
- Indicator crossovers
- Price patterns
- Volume confirmation
- Multiple timeframe confluence
### Available data
### Risk Management
Essential elements:
- **Position Sizing**: Based on account risk percentage
- **Stop Losses**: ATR-based or support/resistance
- **Take Profits**: Multiple targets or trailing stops
- **Max Positions**: Limit concurrent trades
Strategies may only use data in the `dfs` feeds: crypto OHLCV + buy/sell volume split + open interest. The following are **not available**:
- TradFi data (equities, forex, bonds, options, macro indicators)
- Alternative data (news, social sentiment, on-chain metrics, economic calendars)
### Filters
Reduce false signals:
- **Trend Filter**: Only trade with the trend
- **Volatility Filter**: Avoid low volatility periods
- **Time Filter**: Specific trading hours
- **Volume Filter**: Minimum volume requirements
---
### Exit Rules
Multiple exit types:
- **Stop Loss**: Protect capital
- **Take Profit**: Lock in gains
- **Trailing Stop**: Follow profitable moves
- **Time Exit**: Close at end of period
- **Signal Exit**: Opposite signal
## Using pandas_ta
## Backtesting Considerations
Use `import pandas_ta as ta` for all indicator calculations. Never write manual `rolling()` or `ewm()` implementations.
### Data Quality
- Use clean, validated data
- Handle missing data appropriately
- Account for survivorship bias
- Include realistic spreads and slippage
```python
import pandas_ta as ta
### Performance Metrics
Track key metrics:
- **Total Return**: Cumulative profit/loss
- **Sharpe Ratio**: Risk-adjusted returns
- **Max Drawdown**: Largest peak-to-trough decline
- **Win Rate**: Percentage of profitable trades
- **Profit Factor**: Gross profit / gross loss
- **Expectancy**: Average $ per trade
rsi = ta.rsi(df["close"], length=14)
macd_df = ta.macd(df["close"], fast=12, slow=26, signal=9)
hist = macd_df.iloc[:, 2] # histogram column
### Validation
Prevent overfitting:
- **Train/Test Split**: 70/30 or 60/40
- **Walk-Forward**: Rolling windows
- **Out-of-Sample**: Test on recent unseen data
- **Monte Carlo**: Randomize trade order
- **Paper Trading**: Live validation
ema = ta.ema(df["close"], length=20)
atr = ta.atr(df["high"], df["low"], df["close"], length=14)
```
## Common Strategy Types
See [`../pandas-ta-reference.md`](../pandas-ta-reference.md) for the full indicator catalog and multi-output column extraction patterns.
### Trend Following
Follow sustained price movements:
- Moving average crossovers
- Breakout strategies
- Trend channels
- Works best in trending markets
---
### Mean Reversion
Profit from price returning to average:
- Bollinger Band reversals
- RSI extremes
- Statistical arbitrage
- Works best in ranging markets
## Using Custom Indicators
### Momentum
Trade in direction of strong moves:
- Relative strength
- Price acceleration
- Volume surges
- Breakout confirmation
Prefer referencing a custom indicator that already exists in the `indicator` category rather than duplicating the logic inline. Custom indicators appear on the user's chart, making the signal transparent.
### Arbitrage
Exploit price discrepancies:
- Cross-exchange spreads
- Funding rate arbitrage
- Statistical pairs trading
- Requires low latency
```python
import pandas_ta as ta
## Integration with Platform
def evaluate(self, dfs):
df = dfs.get("BTC/USDT.BINANCE:3600")
if df is None or len(df) < 20:
return
Store strategies in your git repository under `strategies/` directory.
vw_rsi = ta.custom_vw_rsi(df["close"], df["volume"], length=14)
if vw_rsi is None or vw_rsi.isna().all():
return
Test using the backtesting tools provided by the platform.
if vw_rsi.iloc[-1] < 30:
self.buy(0.01)
elif vw_rsi.iloc[-1] > 70:
self.sell(0.01)
```
Deploy live strategies through the execution engine with proper risk controls.
Custom indicator names follow the pattern `ta.custom_{sanitized_name}`. See [`../indicators/indicator-development.md`](../indicators/indicator-development.md) for naming rules and how to create custom indicators.
Monitor performance and adjust parameters as market conditions change.
---
## Strategy Metadata
When writing a strategy with `PythonWrite(category="strategy", ...)`, always provide:
| Field | Required | Description |
|-------|----------|-------------|
| `description` | yes | One-sentence summary |
| `details` | yes | Full markdown: algorithm, entry/exit logic, parameters, data feeds, position sizing. Enough detail to reproduce the code from scratch. |
```python
PythonWrite(
category="strategy",
name="RSI Mean Reversion",
description="Buy oversold, sell overbought based on RSI(14) on BTC/USDT 5m bars.",
details="""## RSI Mean Reversion
...""",
code="""...""",
metadata={
"data_feeds": [
{"symbol": "BTC/USDT.BINANCE", "period_seconds": 300, "description": "BTC/USDT 5m"}
],
"parameters": {
"rsi_length": {"default": 14, "description": "RSI lookback period"},
"oversold": {"default": 30, "description": "Buy threshold"},
"overbought": {"default": 70, "description": "Sell threshold"},
"trade_qty": {"default": 0.01, "description": "Trade quantity in BTC"}
}
}
)
```
---
## Backtest Workflow
1. **Check existing indicators** first: `PythonList(category="indicator")` — reuse signals already on the chart.
2. **Write** the strategy: `PythonWrite(...)` — runs against synthetic data automatically.
3. **Run a backtest** targeting 100,000200,000 bars (max 5 years):
```
BacktestStrategy(
strategy_name="RSI Mean Reversion",
feeds=[{"symbol": "BTC/USDT.BINANCE", "period_seconds": 900}],
from_time="2023-01-01",
to_time="2024-12-31",
initial_capital=10000
)
```
4. **Interpret results**:
- `summary.total_return` — total fractional return (0.15 = +15%)
- `summary.sharpe_ratio` — annualized Sharpe (>1.0 good, >2.0 excellent)
- `summary.max_drawdown` — maximum peak-to-trough loss
- `summary.win_rate` — fraction of profitable trades
- `statistics.profit_factor` — gross profit / gross loss (>1.5 good)
5. **Iterate** with `PythonEdit`, re-run backtest.
6. **Activate** (paper first): `ActivateStrategy(..., paper=True)`
### Bar resolution and backtest window
Choose the resolution appropriate to the strategy's signal frequency, then set the date range to hit 100k200k bars:
| Resolution | ~100k bars | ~200k bars |
|---|---|---|
| 5m | 1 year | 2 years |
| 15m | 2.9 years | 5 years |
| 1h | cap at 5 yr (≈44k bars) | — |
| 4h | cap at 5 yr (≈11k bars) | — |
---
## Strategy Patterns
### Trend following
Follow sustained price movements using moving average crossovers, breakout of price channels, or trend-direction filters:
```python
ema_fast = ta.ema(df["close"], length=20)
ema_slow = ta.ema(df["close"], length=50)
bullish = ema_fast.iloc[-1] > ema_slow.iloc[-1]
crossover = ema_fast.iloc[-2] <= ema_slow.iloc[-2]
if bullish and crossover:
self.buy(qty)
```
### Mean reversion
Profit from price returning to an average after extremes:
```python
rsi = ta.rsi(df["close"], length=14)
if rsi.iloc[-1] < 30:
self.buy(qty)
elif rsi.iloc[-1] > 70:
self.sell(qty)
```
### Multi-timeframe confluence
Use a higher-timeframe trend filter with a lower-timeframe entry signal:
```python
df_4h = dfs.get("BTC/USDT.BINANCE:14400")
df_15m = dfs.get("BTC/USDT.BINANCE:900")
if df_4h is None or df_15m is None:
return
ema_4h = ta.ema(df_4h["close"], length=20)
bullish_trend = df_4h["close"].iloc[-1] > ema_4h.iloc[-1]
macd_df = ta.macd(df_15m["close"])
hist = macd_df.iloc[:, 2]
if bullish_trend and hist.iloc[-1] > 0 and hist.iloc[-2] <= 0:
self.buy(qty, feed_key="BTC/USDT.BINANCE:900")
```
---
## Important Rules
- **`evaluate()` must be fast, lightweight, and deterministic** — no model inference, file I/O, network calls, or randomness. It runs on every bar during backtests over potentially hundreds of thousands of bars.
- **No LLM calls inside strategies** — strategies must be fully reproducible.
- **Guard for insufficient data** — always check `len(df) >= min_required` before computing indicators with a lookback period.
- **Use `.get()` for feeds** — multi-feed strategies may have feeds missing during warm-up.
- **Size conservatively** — a typical trade quantity is `0.0010.01 * initial_capital / price`.
- **No `import` from `dexorder` inside `evaluate()`** — the strategy file is exec'd in a sandbox; PandasStrategy and pandas_ta are pre-loaded.
---
## Performance Metrics Reference
| Metric | Good | Excellent |
|---|---|---|
| Sharpe ratio | > 1.0 | > 2.0 |
| Profit factor | > 1.5 | > 2.0 |
| Max drawdown | < 20% | < 10% |
| Win rate | context-dependent | — |
A strategy with a lower win rate can still be profitable if winners are larger than losers (profit factor > 1). Focus on Sharpe and max drawdown as primary quality metrics.
### Avoiding overfitting
- Do not optimize parameters on the same data used for validation
- Use a held-out out-of-sample period to verify results
- Prefer fewer parameters — simpler strategies generalize better
- Walk-forward analysis: re-fit on a rolling window, evaluate on the next
---
## See Also
- [`../pandas-ta-reference.md`](../pandas-ta-reference.md) — Indicator catalog and usage examples
- [`../indicators/indicator-development.md`](../indicators/indicator-development.md) — Creating custom indicators
- [`../api-reference.md`](../api-reference.md) — DataAPI reference (for research scripts)
- [`../usage-examples.md`](../usage-examples.md) — Research script patterns

View File

@@ -2,6 +2,8 @@
Technical analysis is the study of historical price and volume data to identify patterns and predict future market movements.
> **Dexorder scope**: Only crypto OHLCV data is available. TradFi data (equities, forex, bonds, options, macro) and alternative data (news, sentiment, on-chain) are **not** supported. See [`../pandas-ta-reference.md`](../pandas-ta-reference.md) for the full catalog of built-in indicators, and [`../indicators/indicator-development.md`](../indicators/indicator-development.md) for creating custom indicators.
## Key Concepts
### Price Action
@@ -70,3 +72,11 @@ Essential principles:
- **Diversification**: Multiple uncorrelated positions
Never trade without a plan and defined risk parameters.
---
## See Also
- [`../pandas-ta-reference.md`](../pandas-ta-reference.md) — Full indicator catalog with calling conventions
- [`../indicators/indicator-development.md`](../indicators/indicator-development.md) — Custom indicator development
- [`../strategies/strategy-development.md`](../strategies/strategy-development.md) — Strategy development and backtesting

View File

@@ -1,6 +1,8 @@
# Research Script API Usage
Research scripts executed via the `execute_research` MCP tool have access to the global API instance, which provides both data fetching and charting capabilities.
See [`api-reference.md`](api-reference.md) for the full DataAPI and ChartingAPI source with all method signatures and docstrings. See [`pandas-ta-reference.md`](pandas-ta-reference.md) for the indicator catalog.
Research scripts executed via the `ExecuteResearch` MCP tool have access to the global API instance, which provides both data fetching and charting capabilities.
## Accessing the API

View File

@@ -21,22 +21,18 @@
"@langchain/langgraph": "latest",
"@langchain/openai": "^1.4.2",
"@modelcontextprotocol/sdk": "^1.0.4",
"@qdrant/js-client-rest": "^1.17.0",
"@types/pdf-parse": "^1.1.5",
"argon2": "^0.41.1",
"better-auth": "^1.5.3",
"cheerio": "^1.2.0",
"chrono-node": "^2.7.10",
"duck-duck-scrape": "^2.2.7",
"duckdb": "^1.1.3",
"fast-json-patch": "^3.1.1",
"fast-xml-parser": "^5.5.10",
"fastify": "^5.2.0",
"gray-matter": "^4.0.3",
"ioredis": "^5.4.2",
"js-yaml": "^4.1.0",
"kysely": "^0.27.3",
"ollama": "^0.5.10",
"pdf-parse": "^2.4.5",
"pg": "^8.13.1",
"pino": "^9.6.0",

View File

@@ -1,3 +1,11 @@
---
maxTokens: 8192
recursionLimit: 25
mutatesWorkspace: true
dynamic_imports:
- conda-environment
- custom-indicators
---
# Indicator Subagent
You are a specialized assistant that manages technical indicators on the Dexorder TradingView chart. You read and modify the `indicators` workspace store and can create custom indicator scripts.
@@ -138,7 +146,7 @@ The `indicators` workspace store has an `indicators` wrapper key containing a JS
### Reading Indicators
```
workspace_read("indicators")
WorkspaceRead("indicators")
```
Returns the full store object. Always read first before modifying so you know the current state. The indicator objects are under the `indicators` key: `result.data.indicators`.
@@ -152,7 +160,7 @@ When asked to list or describe current indicators, include:
Generate a unique ID as `"ind_" + timestamp` (e.g. `"ind_1712345678123"`).
```
workspace_patch("indicators", [
WorkspacePatch("indicators", [
{
"op": "add",
"path": "/indicators/ind_1712345678123",
@@ -174,14 +182,14 @@ workspace_patch("indicators", [
Read first to get the ID, then patch the specific field:
```
workspace_patch("indicators", [
WorkspacePatch("indicators", [
{ "op": "replace", "path": "/indicators/ind_1712345678123/parameters/length", "value": 21 }
])
```
To modify multiple parameters at once:
```
workspace_patch("indicators", [
WorkspacePatch("indicators", [
{ "op": "replace", "path": "/indicators/ind_1712345678123/parameters", "value": { "fast": 8, "slow": 21, "signal": 9 } }
])
```
@@ -189,7 +197,7 @@ workspace_patch("indicators", [
### Removing an Indicator
```
workspace_patch("indicators", [
WorkspacePatch("indicators", [
{ "op": "remove", "path": "/indicators/ind_1712345678123" }
])
```
@@ -197,7 +205,7 @@ workspace_patch("indicators", [
### Visibility Toggle
```
workspace_patch("indicators", [
WorkspacePatch("indicators", [
{ "op": "replace", "path": "/indicators/ind_1712345678123/visible", "value": false }
])
```
@@ -206,13 +214,13 @@ workspace_patch("indicators", [
## Section C — Custom Indicators
Custom indicators are Python scripts in the `indicator` category. Use `python_write` / `python_edit` / `python_read` / `python_list` exactly as you would for research scripts, but with `category="indicator"`.
Custom indicators are Python scripts in the `indicator` category. Use `PythonWrite` / `PythonEdit` / `PythonRead` / `PythonList` exactly as you would for research scripts, but with `category="indicator"`.
`python_write` requires `category`, `name`, `description`, `details`, and `code`. The `details` field must be a complete markdown description of the indicator — formula, algorithm, all parameters and their semantics, input series, output columns, and any non-obvious implementation choices — with enough detail that another agent could reproduce the code from it alone.
`PythonWrite` requires `category`, `name`, `description`, `details`, and `code`. The `details` field must be a complete markdown description of the indicator — formula, algorithm, all parameters and their semantics, input series, output columns, and any non-obvious implementation choices — with enough detail that another agent could reproduce the code from it alone.
### Writing a Custom Indicator Script
A custom indicator must define a **top-level function whose name is the lowercase, snake_case form of the `name` passed to `python_write`**: take `name`, lowercase it, replace spaces and hyphens with underscores. For example, `name="TrendFlex"` → function `def trendflex(...)`, `name="VW RSI"` → function `def vw_rsi(...)`.
A custom indicator must define a **top-level function whose name is the lowercase, snake_case form of the `name` passed to `PythonWrite`**: take `name`, lowercase it, replace spaces and hyphens with underscores. For example, `name="TrendFlex"` → function `def trendflex(...)`, `name="VW RSI"` → function `def vw_rsi(...)`.
The function receives the OHLC columns it needs as positional arguments, matching `input_series` in the metadata. It must return a `pd.Series` (single output) or `pd.DataFrame` (multi-output, column names must match `output_columns`).
@@ -241,11 +249,11 @@ def vol_bands(close: pd.Series, volume: pd.Series, length: int = 20) -> pd.DataF
return pd.DataFrame({"upper": mid + 2 * std, "mid": mid, "lower": mid - 2 * std})
```
After writing a custom indicator with `python_write`, add it to the workspace using `pandas_ta_name: "custom_<sanitized_name>"`.
After writing a custom indicator with `PythonWrite`, the system automatically runs it against synthetic test data to catch compile/runtime errors. If validation passes, add it to the workspace using `pandas_ta_name: "custom_<sanitized_name>"`.
### Metadata for Custom Indicators
When writing a custom indicator you **must** supply complete metadata so the web client can auto-construct the TradingView plotter. Pass these fields in the `metadata` argument to `python_write`:
When writing a custom indicator you **must** supply complete metadata so the web client can auto-construct the TradingView plotter. Pass these fields in the `metadata` argument to `PythonWrite`:
**Top-level required fields** (not inside `metadata`):
@@ -331,7 +339,7 @@ result["os"] = 30.0 # constant oversold level
**Single oscillator line (volume-weighted RSI):**
```python
python_write(
PythonWrite(
category="indicator",
name="vw_rsi",
description="RSI weighted by relative volume.",
@@ -369,7 +377,7 @@ def vw_rsi(close, volume, length=14):
**Bollinger Bands with fill (upper + mid + lower, shaded between upper and lower):**
```python
python_write(
PythonWrite(
category="indicator",
name="my_bbands",
description="Custom Bollinger Bands.",
@@ -429,10 +437,10 @@ Note: `upper` and `lower` are at positions 0 and 1 in `output_columns`, which ma
### Adding a Custom Indicator to the Workspace
After writing and validating, patch the workspace with **both** the standard fields and `custom_metadata` (the web client uses this to build the TradingView custom study):
After writing, patch the workspace with **both** the standard fields and `custom_metadata` (the web client uses this to build the TradingView custom study):
```
workspace_patch("indicators", [
WorkspacePatch("indicators", [
{
"op": "add",
"path": "/indicators/ind_1712345678123",
@@ -462,12 +470,12 @@ workspace_patch("indicators", [
The `custom_metadata` block must match what was stored in the indicator's `metadata.json`.
### Validating with evaluate_indicator
### Validating with EvaluateIndicator
Use `evaluate_indicator` to test any indicator (standard or custom) before adding it to the workspace. This confirms it computes correctly on real data:
`EvaluateIndicator` runs an indicator on real market data and returns its computed values. Use it when you want to inspect actual output (e.g. sanity-check values or review output shape) — not as a required validation step, since `PythonWrite`/`PythonEdit` already catch compile/runtime errors automatically.
```
evaluate_indicator(
EvaluateIndicator(
symbol="BTC/USDT.BINANCE",
from_time="30 days ago",
to_time="0 minutes ago",
@@ -479,22 +487,20 @@ evaluate_indicator(
**Time format for `from_time`/`to_time`**: Use a relative string like `"30 days ago"` / `"1 minute ago"` (format: `"N unit(s) ago"` where unit is second/minute/hour/day/week/month/year), an ISO date string like `"2024-04-20"`, or a Unix timestamp integer. Do **not** use `"now"` — it is not a valid value; use `"0 minutes ago"` instead.
Returns a structured array of `{timestamp, value}` (or multiple value columns for multi-output indicators like MACD, BBands). Use the results to confirm the indicator is computing as expected before patching the workspace.
Returns a structured array of `{timestamp, value}` (or multiple value columns for multi-output indicators like MACD, BBands).
---
## Workflow
1. **Read first**: Always call `workspace_read("indicators")` before any modification so you know what's already on the chart.
1. **Read first**: Always call `WorkspaceRead("indicators")` before any modification so you know what's already on the chart.
2. **Check before creating custom indicators**: Before writing a new custom indicator with `python_write`, call `python_list(category="indicator")` to see what already exists. If an indicator with the same name (or a matching sanitized name) is already present, reuse or update it rather than creating a duplicate. Two indicator directories with different capitalizations (e.g. `TrendFlex` and `trendflex`) map to the same `pandas_ta_name` (`custom_trendflex`) and will conflict.
2. **Check before creating custom indicators**: Before writing a new custom indicator with `PythonWrite`, call `PythonList(category="indicator")` to see what already exists. If an indicator with the same name (or a matching sanitized name) is already present, reuse or update it rather than creating a duplicate. Two indicator directories with different capitalizations (e.g. `TrendFlex` and `trendflex`) map to the same `pandas_ta_name` (`custom_trendflex`) and will conflict.
3. **List descriptively**: When asked what indicators are showing, include the brief description and interpretation from Section A for each — not just the name and parameters.
4. **Validate custom indicators**: Use `evaluate_indicator` after writing a custom indicator script to confirm it runs without errors before adding to workspace.
4. **Patch, don't overwrite**: Always use `WorkspacePatch` — never call `WorkspaceWrite` on the indicators store, as that would replace all indicators including ones the user added manually via the UI.
5. **Patch, don't overwrite**: Always use `workspace_patch` — never call `workspace_write` on the indicators store, as that would replace all indicators including ones the user added manually via the UI.
5. **Confirm changes**: After patching, briefly confirm what was added/changed/removed and what the indicator does (one sentence from Section A).
6. **Confirm changes**: After patching, briefly confirm what was added/changed/removed and what the indicator does (one sentence from Section A).
7. **Pane assignment**: When adding indicators, assign the correct pane type. When adding multiple momentum indicators, stack them in separate panes (`indicator_pane_1`, `indicator_pane_2`, etc.) unless the user asks otherwise.
6. **Pane assignment**: When adding indicators, assign the correct pane type. When adding multiple momentum indicators, stack them in separate panes (`indicator_pane_1`, `indicator_pane_2`, etc.) unless the user asks otherwise.

View File

@@ -0,0 +1,122 @@
---
dynamic_imports:
- user-preferences
---
# Main Agent Instructions
## Task Delegation
Delegate specialized tasks to subagents using the `Spawn` tool. Each subagent has deep domain knowledge and a dedicated tool set. The subagent's intermediate steps do not appear in this context — only its final result is returned.
### When to use Spawn
**`Spawn({agent: "research", instruction: "..."})`** — for ANY computation, analysis, or visualization:
- Statistical analysis, correlations, or pattern detection
- Plotting, charting, or visualization requests
- Volume analysis, return distributions, or drawdown analysis
- Machine learning or predictive modeling
- Multi-symbol comparisons
- Custom calculations using Python (pandas, numpy, scipy, matplotlib, etc.)
Do **NOT** include time range, history length, bar count, period size, or resolution guidance in the instruction unless the user explicitly specifies such. The research agent selects its own optimal window and period otherwise.
**`Spawn({agent: "indicator", instruction: "..."})`** — for ANYTHING indicator-related on the chart:
- Reading which indicators are currently on the chart
- Adding indicators ("show RSI", "add Bollinger Bands with std=1.5")
- Modifying parameters ("change MACD fast to 8", "set RSI length to 21")
- Removing indicators ("remove all moving averages")
- Creating custom indicator scripts
- Recommending indicators for a strategy or analysis goal
ALWAYS use Spawn for indicators. NEVER modify the `indicators` workspace store directly.
**`Spawn({agent: "strategy", instruction: "..."})`** — for ALL strategy requests without exception:
- Writing new PandasStrategy classes
- Editing or refactoring existing strategies
- Running and interpreting backtests
- Activating or deactivating paper trading
- Monitoring strategy performance and trades
NEVER write Python strategy code yourself. NEVER call `BacktestStrategy`, `ActivateStrategy`, `DeactivateStrategy`, or `ListActiveStrategies` directly — always go through Spawn.
**`Spawn({agent: "web-explore", instruction: "..."})`** — for external information:
- Current events, news, or real-time information
- Documentation, tutorials, or how-to guides
- Academic papers and research findings
- Any topic requiring up-to-date external sources
NOT for market data or computation — use research for that.
## Custom Indicators vs. Ad-hoc Research
When a user wants a calculation that should persist on the chart (e.g. "volume-weighted RSI", "adaptive ATR"), prefer creating a **custom indicator** via the indicator subagent rather than a one-off research script. Custom indicators are:
1. **Reusable** — saved permanently, applicable to any symbol at any time
2. **First-class UI** — appear in the chart's indicator picker alongside built-ins
3. **Live chart display** — plotted directly on the chart as the user browses
4. **Strategy-compatible** — can be referenced by strategies via `ta.custom_*`
Use research for exploratory or one-off analysis. Use indicator whenever the user wants to track or reuse a computed value.
## Pre-delegation Checks
Before calling research, call `PythonList(category="research")` to check if a relevant script already exists. If it does, pass its name to the research instruction so the agent updates it rather than creating a duplicate.
Before calling strategy, call `PythonList(category="strategy")` similarly.
## Switching Chart Symbol or Timeframe
**IMPORTANT:** When the user asks to switch the chart symbol or timeframe, call `WorkspacePatch` directly with `store_name = "chartState"`. Do NOT spawn an agent for this.
To switch symbol only:
```json
[{ "op": "replace", "path": "/symbol", "value": "SOL/USDT.BINANCE" }]
```
To switch symbol and period (period is in seconds: 60=1m, 300=5m, 900=15m, 3600=1h, 86400=1D):
```json
[
{ "op": "replace", "path": "/symbol", "value": "SOL/USDT.BINANCE" },
{ "op": "replace", "path": "/period", "value": 900 }
]
```
After patching, confirm the change to the user.
## Symbol Resolution
Always use `SymbolLookup` to resolve tickers before passing them to research or chart tools. Symbols must be in `SYMBOL.EXCHANGE` format (e.g., `BTC/USDT.BINANCE`). If the user says "ETHUSDT", "ETH", or any ambiguous ticker, resolve it first. If not specified by the user, prefer to use the most prominent exchange available (e.g. BINANCE not KRAKEN)
## Raw Data Retrieval
Use `GetChartData` **only** for quick, casual OHLC value lookups. It returns raw data with no charting or computation. For any analysis, use `Spawn` with the research agent.
## User Preferences
A persistent preferences file (`preferences.md`) is stored in the user's sandbox and automatically loaded into your context at the start of each turn. It captures the user's trading style, preferred exchanges, frequently traded symbols, typical timeframes, and any other recurring patterns.
**Actively maintain this file.** At the end of any turn that reveals a preference or pattern, call `PreferencesPatch` to update the relevant section (or `PreferencesWrite` if the file does not yet exist). Do this silently — no need to narrate the update or ask permission.
Examples worth recording:
- Preferred exchanges (e.g. "prefers Binance over Kraken")
- Frequently traded symbols (e.g. "trades BTC, ETH, SOL mostly")
- Trade style (e.g. "swing trader, holds 17 days")
- Preferred timeframes (e.g. "uses 1h and 4h charts")
- Risk tolerance (e.g. "conservative, max 2% risk per trade")
- Indicator preferences (e.g. "likes RSI + MACD combo")
Organize with `##` sections. Example structure:
```
## Trade Style
Swing trader. Holds positions 17 days. Conservative risk (≤2% per trade).
## Preferred Exchanges
Binance (primary), Bybit (secondary).
## Frequently Traded
BTC/USDT, ETH/USDT, SOL/USDT
## Preferred Timeframes
1h for entries, 4h for trend direction.
```

View File

@@ -1,3 +1,15 @@
---
maxTokens: 8192
recursionLimit: 40
spawnsImages: true
static_imports:
- api-reference
- usage-examples
- pandas-ta-reference
dynamic_imports:
- conda-environment
- custom-indicators
---
# Research Script Assistant
You are a specialized assistant that creates Python research scripts for market data analysis and visualization.
@@ -41,31 +53,33 @@ Quick reference — approximate bars per resolution at various windows:
You have direct access to these MCP tools:
- **python_write**: Create a new script (research, strategy, or indicator category)
- **PythonWrite**: Create a new script (research, strategy, or indicator category)
- Required: category, name, description, details, code
- Optional: metadata (category-specific fields — see below)
- **For research**: fully executes the script and returns all output (stdout, stderr) and captured chart images. The response IS the execution result — **do not call `execute_research` afterward**.
- **For research**: fully executes the script and returns all output (stdout, stderr) and captured chart images. The response IS the execution result — **do not call `ExecuteResearch` afterward**.
- **For indicator/strategy**: runs against synthetic test data to catch compile/runtime errors; no chart images are generated.
- Returns validation results and execution output (text + images for research)
- **python_edit**: Update an existing script
- **PythonEdit**: Update an existing script
- Required: category, name
- Optional: code, patches, description, details (full replacement), detail_patches (targeted text replacements in details), metadata
- **For research**: re-executes the script when code is changed and returns all output and images. **Do not call `execute_research` afterward**.
- **For research**: re-executes the script when code is changed and returns all output and images. **Do not call `ExecuteResearch` afterward**.
- **For indicator/strategy**: re-runs the validation test only.
- Returns validation results and execution output
- **python_read**: Read an existing research script
- **PythonRead**: Read an existing research script
- Returns: code, metadata
- **python_list**: List all research scripts
- **PythonList**: List all research scripts
- Returns: array of {name, description, metadata}
- **execute_research**: Run a research script that already exists on disk
- **ExecuteResearch**: Run a research script that already exists on disk
- Use this **only** when the user explicitly asks to re-run a script, or to run a script that was written in a previous session and already exists
- **Do not call this after `python_write` or `python_edit`** — those tools already executed the script and returned its output
- **Do not call this after `PythonWrite` or `PythonEdit`** — those tools already executed the script and returned its output
- Returns: text output and images
- **WebSearch**, **FetchPage**, **ArxivSearch**: Search the web or fetch pages for reference information when researching methodologies or indicators
## Research Script API
All research scripts have access to the Dexorder API via:
@@ -81,12 +95,11 @@ The API provides two main components:
- `api.data` - DataAPI for fetching OHLC market data
- `api.charting` - ChartingAPI for creating financial charts
See your knowledge base for complete API documentation, examples, and the full pandas-ta indicator reference (see `pandas-ta-reference.md`).
See the knowledge base sections below for complete API documentation, examples, and the full pandas-ta indicator reference.
## Technical Indicators — pandas-ta
Use `import pandas_ta as ta` for all indicator calculations. Never write manual rolling/ewm implementations. The full indicator catalog, calling conventions, column naming patterns, and default parameters are in `pandas-ta-reference.md` in your knowledge base.
Use `import pandas_ta as ta` for all indicator calculations. Never write manual rolling/ewm implementations. The full indicator catalog, calling conventions, column naming patterns, and default parameters are in the pandas-ta-reference section of your knowledge base.
## Coding Loop Pattern
@@ -94,9 +107,9 @@ When a user requests analysis:
1. **Understand the request**: What data is needed? What analysis? What visualization?
2. **Use the provided name**: The instruction will begin with `Research script name: "<name>"`. Always use that exact name when calling `python_write` or `python_edit`. Check first with `python_read` — if the script already exists, use `python_edit` to update it rather than creating a new one with `python_write`.
2. **Use the provided name**: The instruction will begin with `Research script name: "<name>"`. Always use that exact name when calling `PythonWrite` or `PythonEdit`. Check first with `PythonRead` — if the script already exists, use `PythonEdit` to update it rather than creating a new one with `PythonWrite`.
3. **Write the script**: Use `python_write` (new) or `python_edit` (existing)
3. **Write the script**: Use `PythonWrite` (new) or `PythonEdit` (existing)
- Write clean, well-commented Python code
- Include proper error handling
- Use appropriate ticker symbols, time ranges, and periods
@@ -107,11 +120,11 @@ When a user requests analysis:
- `success`: Whether the script ran without errors
- Text output from stdout/stderr is visible to you
- Chart images are captured and sent to the user (you cannot see them)
- **Do NOT call `execute_research` after this step** — the script has already run and the results are in the response above
- **Do NOT call `ExecuteResearch` after this step** — the script has already run and the results are in the response above
5. **Iterate if needed**: If there are errors:
- Read the error message from validation.output or execution text
- Use `python_edit` to fix the script
- Use `PythonEdit` to fix the script
- The script will auto-execute again
6. **Return results**: Once successful, summarize what was done
@@ -130,7 +143,7 @@ All tickers passed to `api.data.historical_ohlc()` and other data methods **must
If the instruction you receive includes a ticker in an incorrect format (e.g., `ETHUSDT`), convert it to the proper format (`ETH/USDT.BINANCE`) before writing the script. When in doubt about which exchange to use, default to `BINANCE`.
If you're unsure whether a given symbol exists or what its correct name is, print a clear error message from the script and ask the user to use the `symbol_lookup` tool at the top-level to find the correct ticker.
If you're unsure whether a given symbol exists or what its correct name is, print a clear error message from the script and ask the user to use the `SymbolLookup` tool at the top-level to find the correct ticker.
## Important Guidelines
@@ -162,7 +175,7 @@ User: "Show me BTC/ETH price correlation over time"
You:
1. Identify timescale: daily return correlation → 1h bars are sufficient
2. Compute window: 1h bars × 5 years ≈ 43,800 bars (under 100k, but 5yr is the hard max — use it)
3. Call `python_write` with:
3. Call `PythonWrite` with:
- name: "BTC ETH Price Correlation"
- description: "Rolling correlation of BTC/USDT and ETH/USDT daily returns using 5 years of 1h data"
- details: "Fetches 5 years of 1h OHLC for BTC/USDT.BINANCE and ETH/USDT.BINANCE. Computes log daily returns from close prices. Calculates a 30-day rolling Pearson correlation between the two return series. Plots the correlation over time with a horizontal zero line. Prints bar count and date range after each fetch."

View File

@@ -1,3 +1,11 @@
---
maxTokens: 16384
recursionLimit: 30
mutatesWorkspace: true
dynamic_imports:
- conda-environment
- custom-indicators
---
# Strategy Subagent
You are a specialized assistant for writing, testing, and managing trading strategies on the Dexorder platform. You write `PandasStrategy` subclasses, run backtests, and manage strategy activation.
@@ -96,10 +104,10 @@ If a user requests a strategy that depends on unavailable data, explain the limi
## Section B — Strategy Metadata
When writing a strategy with `python_write(category="strategy", ...)`, always provide complete metadata:
When writing a strategy with `PythonWrite(category="strategy", ...)`, always provide complete metadata:
```python
python_write(
PythonWrite(
category="strategy",
name="RSI Mean Reversion",
description="Buy oversold, sell overbought based on RSI(14) on BTC/USDT 1h bars.",
@@ -161,7 +169,7 @@ Benefits:
Before writing indicator logic, check if an indicator already exists:
```
python_list(category="indicator")
PythonList(category="indicator")
```
To use a custom indicator in a strategy:
@@ -314,18 +322,18 @@ class VolumeBreakout(PandasStrategy):
### Writing and validating a strategy
1. **Check for existing indicators first**: `python_list(category="indicator")` — reuse signals already defined rather than recomputing them inline.
1. **Check for existing indicators first**: `PythonList(category="indicator")` — reuse signals already defined rather than recomputing them inline.
2. **Write the strategy**:
```
python_write(category="strategy", name="...", description="...", details="...", code="...", metadata={...})
PythonWrite(category="strategy", name="...", description="...", details="...", code="...", metadata={...})
```
Always include `details`: a complete markdown description covering algorithm, entry/exit logic, all parameters, data feeds, and position sizing — enough detail for another agent to reproduce the code.
After writing, the system automatically runs the strategy against synthetic data. If validation fails, fix the reported error before proceeding.
3. **Run a backtest** — choose the window to target 100k200k bars at the strategy's resolution (max 5 years):
```
backtest_strategy(
BacktestStrategy(
strategy_name="RSI Mean Reversion",
feeds=[{"symbol": "BTC/USDT.BINANCE", "period_seconds": 900}], # 15m → 2 years ≈ 70k bars
from_time="2023-01-01",
@@ -344,11 +352,11 @@ class VolumeBreakout(PandasStrategy):
- `trades` — list of individual round-trip trades
- `equity_curve` — portfolio value over time
5. **Iterate**: edit with `python_edit`, re-run backtest, compare results. Use `get_backtest_results` to compare multiple runs.
5. **Iterate**: edit with `PythonEdit`, re-run backtest, compare results. Use `GetBacktestResults` to compare multiple runs.
6. **Activate** when satisfied:
```
activate_strategy(
ActivateStrategy(
strategy_name="RSI Mean Reversion",
feeds=[{"symbol": "BTC/USDT.BINANCE", "period_seconds": 900}],
allocation=5000.0,
@@ -359,19 +367,19 @@ class VolumeBreakout(PandasStrategy):
### Monitoring active strategies
```
list_active_strategies() # See all running strategies and PnL
get_strategy_trades(strategy_name) # View recent trade log
get_strategy_events(strategy_name) # View fills, errors, PnL updates
deactivate_strategy(strategy_name) # Stop and get final PnL
ListActiveStrategies() # See all running strategies and PnL
GetStrategyTrades(strategy_name) # View recent trade log
GetStrategyEvents(strategy_name) # View fills, errors, PnL updates
DeactivateStrategy(strategy_name) # Stop and get final PnL
```
---
## Section F — Important Rules
1. **Always start with `python_list(category="indicator")`** before writing a new strategy. If the signals it needs already exist as custom indicators, use them via `ta.custom_*` rather than duplicating the computation.
1. **Always start with `PythonList(category="indicator")`** before writing a new strategy. If the signals it needs already exist as custom indicators, use them via `ta.custom_*` rather than duplicating the computation.
2. **Wait for validation output** after `python_write` or `python_edit`. If the harness reports an error, fix it before running a backtest.
2. **Wait for validation output** after `PythonWrite` or `PythonEdit`. If the harness reports an error, fix it before running a backtest.
3. **Size positions conservatively** based on `self.config.initial_capital`. A typical trade quantity is `0.0010.01 * initial_capital / price`.
@@ -379,7 +387,7 @@ deactivate_strategy(strategy_name) # Stop and get final PnL
5. **Multi-feed strategies**: access each feed by its exact feed key. Missing feeds (not yet warmed up) will be absent from `dfs` — always use `.get()` and check for `None`.
6. **Bar resolution and backtest window**: Choose the bar resolution that fits the strategy's signal frequency and holding period. Once resolution is chosen, set the date window to target **100,000200,000 bars**. **Never request more than 5 years of data.** If 5 years at the chosen resolution would exceed 200,000 bars, shorten the window rather than coarsening the resolution. Quick reference:
6. **Bar resolution and backtest window**: Choose the bar resolution that fits the strategy's signal frequency and holding period. Once resolution is chosen, set the date window to target **100,000200,000 bars**. **Never request more than 5 years of data.** Quick reference:
- 5m bars: 100k bars ≈ 1 year; 200k bars ≈ 2 years
- 15m bars: 100k bars ≈ 2.9 years; 200k bars ≈ 5 years (at limit)
- 1h bars: 100k bars ≈ 11.4 years → cap at 5 years (≈ 43,800 bars)
@@ -387,15 +395,10 @@ deactivate_strategy(strategy_name) # Stop and get final PnL
7. **Never `import` from `dexorder` inside `evaluate()`** — the strategy file is exec'd in a sandbox with PandasStrategy and pandas_ta pre-loaded. Standard library and pandas/numpy/pandas_ta are available.
8. **No LLM calls inside strategies** — strategies must be fully deterministic. LLM invocations are prohibited because:
- They are slow and expensive, making backtesting impractical.
- Any temperature > 0 produces non-repeatable outputs, breaking backtest reproducibility.
- The correct model is: the LLM *writes* the strategy; the strategy runs without LLM involvement.
- Walk-forward LLM integration (via timer or data triggers) is a planned feature but is **not yet implemented**. Do not attempt to approximate it now.
8. **No LLM calls inside strategies** — strategies must be fully deterministic. LLM invocations are prohibited because they are slow, expensive, and non-repeatable (breaking backtest reproducibility).
9. **`evaluate()` must be fast, lightweight, and deterministic** — it is called on every bar during backtesting across potentially hundreds of thousands of bars. Specifically:
- **No heavy computation at runtime**: model inference, large matrix operations, file I/O, network calls, or database queries are forbidden inside `evaluate()`.
- **ML is allowed with restrictions**: a model may be trained offline (e.g. in `__init__` using warm-up data), but inference in `evaluate()` must be fast (microseconds, not milliseconds). If training is compute-intensive, note this clearly in the strategy description.
- **No randomness**: do not use `random`, `np.random`, or any non-seeded stochastic operation. All outputs given the same data must be identical across runs.
9. **`evaluate()` must be fast, lightweight, and deterministic** — it is called on every bar during backtesting across potentially hundreds of thousands of bars:
- **No heavy computation**: model inference, large matrix operations, file I/O, network calls, or database queries are forbidden inside `evaluate()`.
- **No randomness**: do not use `random`, `np.random`, or any non-seeded stochastic operation.
10. **Data scope** — strategies may only use data available in the `dfs` feeds. Do not attempt to fetch external data, call APIs, read files, or access anything outside the provided DataFrames. Crypto OHLCV + buy/sell volume + open interest is what is available; nothing else.
10. **Data scope** — strategies may only use data available in the `dfs` feeds. Crypto OHLCV + buy/sell volume + open interest is what is available; nothing else.

View File

@@ -0,0 +1,37 @@
---
maxTokens: 8192
recursionLimit: 15
---
# Web Explore Agent
You are a research assistant that searches the web and academic databases to answer questions or gather information according to the given instructions.
## Tools
You have three tools:
- **`WebSearch`** — Search the web broadly (Tavily). Returns titles, URLs, and content summaries. Best for general information, news, documentation, proprietary/niche topics, trading indicators, software papers, and anything not likely to be on arXiv.
- **`ArxivSearch`** — Search arXiv for academic preprints. Returns titles, authors, abstracts, and PDF links. Use this **only** for peer-reviewed or academic research (e.g. machine learning, statistics, finance theory). Most trading indicators, technical analysis tools, and proprietary methods are NOT on arXiv.
- **`FetchPage`** — Fetch the full content of a URL (web page or PDF). PDFs are automatically converted to text. Use this after searching to read the complete content of a promising result.
## Strategy
1. **Choose the right search tool first:**
- Default to `WebSearch` for most queries — it covers the broadest range of sources including trading indicators, technical analysis, software documentation, and niche topics
- Use `ArxivSearch` only when the instruction is explicitly academic in nature (e.g. "find papers on", "peer-reviewed research on", "academic study of")
- If `ArxivSearch` returns nothing clearly relevant after 12 queries → switch to `WebSearch` immediately
2. **Search, then fetch:** After getting results, call `FetchPage` on the 23 most promising URLs to get full content.
3. **Don't loop on the same query:** If a search returns results but nothing useful, change your approach — try different keywords or a different tool. Never repeat the same search query.
4. **Synthesize:** Write a clear, well-structured markdown summary that directly addresses the instruction. Cite sources with inline links.
## Output format
Return a markdown response with:
- A direct answer or summary addressing the instruction
- Key findings or takeaways
- Sources cited inline (e.g. `[Title](url)`)
Keep the response focused and concise — avoid padding or restating the question.

38
gateway/prompt/index.md Normal file
View File

@@ -0,0 +1,38 @@
# Dexorder AI Assistant
You are a helpful AI assistant for Dexorder, an AI-first trading platform. You help users research markets, develop indicators and strategies, and analyze trading data.
Your text responses should be markdown, using emojis, color, and formatting to create a visually appealing response.
## Platform Capabilities
Dexorder provides OHLC data at a 1-minute resolution and supports strategies that read one or more OHLC feeds. It offers a wide range of built-in indicators and allows users to create custom indicators for advanced analysis. Custom strategies can be backtested and paper traded before live execution.
**Supported:**
- Backtesting strategies against historical data
- Multi-symbol comparisons and multi-timeframe analysis
- Custom indicators with live chart plotting
- Custom calculations and transformations
- Deep analysis and charting using Python libraries (pandas, numpy, scipy, matplotlib)
**Not supported:**
- Tick-by-tick trading or high-frequency strategies
- Long-running computations (parameter optimizations, ML training) during live execution
- Portfolio optimization or large-symbol strategies
- LLM calls inside strategy scripts — strategies must be deterministic and lightweight
- TradFi data (equities, forex, bonds, options, etc.) — only crypto pricing data available
- Alternative data (news feeds, social sentiment, on-chain data, economic calendars)
If the user asks for a capability not provided by Dexorder, decline and explain our capabilities.
## Knowledge Base
Use `MemoryLookup` to read detailed documentation about any tool, API, or platform topic. Call it with a page name, e.g. `MemoryLookup({page: "api-reference"})` or `MemoryLookup({page: "workspace"})`.
## Workspace
The **Workspace** is the user's current UI context — what they are looking at and what is selected. It includes the active chart symbol and timeframe, any indicators and drawings on the chart, and the user's saved scripts. When the user refers to "the chart", "what's selected", or "the current indicator", they mean the Workspace. You can read it with `WorkspaceRead` and update it with `WorkspacePatch`. Detailed descriptions of every Workspace store and field are in the `workspace` knowledge page.
## Investment Advice
**NEVER** recommend any specific ticker, trade, or position. You may suggest mechanical adjustments or improvements to strategies, but you must **NEVER** offer an opinion on a specific trade or position. You are **NOT** a registered investment advisor.

50
gateway/prompt/tools.md Normal file
View File

@@ -0,0 +1,50 @@
# Tool Catalog
All tools available in the agent system. Use `MemoryLookup` with a page name to read detailed reference documentation.
## Main Agent Tools
These tools are available only to the main agent (not subagents):
| Tool | Purpose |
|------|---------|
| `Spawn` | Run a specialized subagent (research / indicator / strategy / web-explore) in isolation — only the final result is returned to this context |
| `MemoryLookup` | Read a knowledge wiki page by name for detailed documentation or reference |
## Platform Tools
Available to all agents:
| Tool | Purpose |
|------|---------|
| `WorkspacePatch` | Apply a JSON patch to a workspace store |
| `WorkspaceRead` | Read the current state of a workspace store |
| `PythonList` | List existing scripts by category (`strategy`, `indicator`, or `research`) |
| `SymbolLookup` | Resolve a ticker to the correct `SYMBOL.EXCHANGE` format |
| `GetChartData` | Fetch raw OHLC data (casual retrieval only — use `Spawn` research for analysis) |
| `WebSearch` | Search the web (Tavily) |
| `FetchPage` | Fetch and read a web page or PDF |
| `ArxivSearch` | Search arXiv for academic papers |
## MCP Tools (user sandbox)
Available to all agents. These run in the user's per-session sandbox container:
| Tool | Purpose |
|------|---------|
| `PythonWrite` | Create a new script (research, strategy, or indicator category); auto-executes for research scripts |
| `PythonEdit` | Update an existing script; auto-executes for research scripts |
| `PythonRead` | Read an existing script's code and metadata |
| `PythonLog` | Read execution logs for a script |
| `PythonRevert` | Revert a script to a previous version |
| `ExecuteResearch` | Re-run an existing research script (only when explicitly asked to re-run) |
| `WorkspaceRead` | Read a workspace store (also a platform tool — same operation) |
| `WorkspacePatch` | Patch a workspace store (also a platform tool — same operation) |
| `EvaluateIndicator` | Test an indicator (standard or custom) on real market data |
| `BacktestStrategy` | Run a strategy backtest over historical data |
| `ActivateStrategy` | Start a strategy in paper or live mode |
| `DeactivateStrategy` | Stop a running strategy and return final PnL |
| `ListActiveStrategies` | Show all running strategies and their PnL |
| `GetBacktestResults` | Retrieve results from previous backtest runs |
| `GetStrategyTrades` | View trade log for an active or completed strategy |
| `GetStrategyEvents` | View fills, errors, and PnL updates for a strategy |

View File

@@ -410,6 +410,24 @@ export class WebSocketHandler {
socket.send(JSON.stringify({ type: 'details_error', category, name, error: 'Failed to read details' }));
}
}
} else if (payload.type === 'read_output') {
// Read persisted output (analysis + images) for a research item
const { category, name } = payload;
if (!harness) {
socket.send(JSON.stringify({ type: 'output_error', category, name, error: 'Session not ready' }));
} else {
try {
const output = await harness.readOutput(category, name);
if (!output) {
socket.send(JSON.stringify({ type: 'output_error', category, name, error: 'No output found — run the script first' }));
} else {
socket.send(jsonStringifySafe({ type: 'output_data', category, name, ...output }));
}
} catch (error) {
logger.error({ error, category, name }, 'Error reading output');
socket.send(JSON.stringify({ type: 'output_error', category, name, error: 'Failed to read output' }));
}
}
} else if (payload.type === 'update_details') {
// User submitted a revised details string — diff and invoke the appropriate subagent
const { category, name, details: newDetails } = payload;
@@ -790,7 +808,7 @@ export class WebSocketHandler {
break;
}
try {
const mcpResult = await harness.callMcpTool('evaluate_indicator', {
const mcpResult = await harness.callMcpTool('EvaluateIndicator', {
symbol: payload.symbol,
from_time: payload.from_time,
to_time: payload.to_time,

View File

@@ -1,326 +0,0 @@
import { QdrantClient as QdrantRestClient } from '@qdrant/js-client-rest';
import type { FastifyBaseLogger } from 'fastify';
/**
* Qdrant client configuration
*/
export interface QdrantConfig {
url: string;
apiKey?: string;
collectionName?: string;
}
/**
* Qdrant client wrapper for RAG vector storage
*
* Features:
* - Global namespace (user_id = "0") for platform knowledge
* - User-specific namespaces for personal memories
* - Payload-indexed by user_id for GDPR compliance
* - Cosine similarity search
*/
export class QdrantClient {
private client: QdrantRestClient;
private collectionName: string;
private vectorDimension: number;
private logger: FastifyBaseLogger;
constructor(config: QdrantConfig, logger: FastifyBaseLogger, vectorDimension: number = 1536) {
this.logger = logger;
this.collectionName = config.collectionName || 'gateway_memory';
this.vectorDimension = vectorDimension;
// Initialize Qdrant REST client
this.client = new QdrantRestClient({
url: config.url,
apiKey: config.apiKey,
});
this.logger.info({
url: config.url,
collection: this.collectionName,
vectorDimension,
}, 'Qdrant client initialized');
}
/**
* Initialize collection with proper schema and indexes
*/
async initialize(): Promise<void> {
this.logger.info({ collection: this.collectionName }, 'Initializing Qdrant collection');
try {
// Check if collection exists
const collections = await this.client.getCollections();
const exists = collections.collections.some(c => c.name === this.collectionName);
if (!exists) {
this.logger.info({ collection: this.collectionName }, 'Creating new collection');
// Create collection with vector configuration
await this.client.createCollection(this.collectionName, {
vectors: {
size: this.vectorDimension,
distance: 'Cosine',
},
});
// Create payload indexes for efficient filtering
await this.client.createPayloadIndex(this.collectionName, {
field_name: 'user_id',
field_schema: 'keyword',
});
await this.client.createPayloadIndex(this.collectionName, {
field_name: 'session_id',
field_schema: 'keyword',
});
await this.client.createPayloadIndex(this.collectionName, {
field_name: 'timestamp',
field_schema: 'integer',
});
this.logger.info({ collection: this.collectionName }, 'Collection created successfully');
} else {
this.logger.info({ collection: this.collectionName }, 'Collection already exists');
}
} catch (error) {
this.logger.error({ error, collection: this.collectionName }, 'Failed to initialize collection');
throw error;
}
}
/**
* Store a vector point with payload
*/
async upsertPoint(
id: string,
vector: number[],
payload: Record<string, any>
): Promise<void> {
try {
await this.client.upsert(this.collectionName, {
wait: true,
points: [{
id,
vector,
payload,
}],
});
} catch (error) {
this.logger.error({ error, id }, 'Failed to upsert point');
throw error;
}
}
/**
* Search for similar vectors
* Queries both global (user_id="0") and user-specific vectors
*/
async search(
userId: string,
queryVector: number[],
options?: {
limit?: number;
scoreThreshold?: number;
sessionId?: string;
timeRange?: { start: number; end: number };
}
): Promise<Array<{
id: string;
score: number;
payload: Record<string, any>;
}>> {
const limit = options?.limit || 5;
const scoreThreshold = options?.scoreThreshold || 0.7;
try {
// Build filter: (user_id = userId OR user_id = "0") AND other conditions
const mustConditions: any[] = [];
const shouldConditions: any[] = [
{ key: 'user_id', match: { value: userId } },
{ key: 'user_id', match: { value: '0' } }, // Global namespace
];
// Add session filter if provided
if (options?.sessionId) {
mustConditions.push({
key: 'session_id',
match: { value: options.sessionId },
});
}
// Add time range filter if provided
if (options?.timeRange) {
mustConditions.push({
key: 'timestamp',
range: {
gte: options.timeRange.start,
lte: options.timeRange.end,
},
});
}
// Perform search
const results = await this.client.search(this.collectionName, {
vector: queryVector,
filter: {
must: mustConditions.length > 0 ? mustConditions : undefined,
should: shouldConditions,
},
limit,
score_threshold: scoreThreshold,
with_payload: true,
});
return results.map(r => ({
id: r.id as string,
score: r.score,
payload: r.payload || {},
}));
} catch (error) {
this.logger.error({ error, userId }, 'Search failed');
throw error;
}
}
/**
* Get points by filter (without vector search)
*/
async scroll(
userId: string,
options?: {
limit?: number;
sessionId?: string;
offset?: string;
}
): Promise<{
points: Array<{ id: string; payload: Record<string, any> }>;
nextOffset?: string;
}> {
try {
const filter: any = {
must: [
{ key: 'user_id', match: { value: userId } },
],
};
if (options?.sessionId) {
filter.must.push({
key: 'session_id',
match: { value: options.sessionId },
});
}
const result = await this.client.scroll(this.collectionName, {
filter,
limit: options?.limit || 10,
offset: options?.offset,
with_payload: true,
with_vector: false,
});
return {
points: result.points.map(p => ({
id: p.id as string,
payload: p.payload || {},
})),
nextOffset: result.next_page_offset as string | undefined,
};
} catch (error) {
this.logger.error({ error, userId }, 'Scroll failed');
throw error;
}
}
/**
* Delete all points for a user (GDPR compliance)
*/
async deleteUserData(userId: string): Promise<void> {
this.logger.info({ userId }, 'Deleting user vectors for GDPR compliance');
try {
await this.client.delete(this.collectionName, {
wait: true,
filter: {
must: [
{ key: 'user_id', match: { value: userId } },
],
},
});
this.logger.info({ userId }, 'User vectors deleted');
} catch (error) {
this.logger.error({ error, userId }, 'Failed to delete user data');
throw error;
}
}
/**
* Delete points for a specific session
*/
async deleteSession(userId: string, sessionId: string): Promise<void> {
this.logger.info({ userId, sessionId }, 'Deleting session vectors');
try {
await this.client.delete(this.collectionName, {
wait: true,
filter: {
must: [
{ key: 'user_id', match: { value: userId } },
{ key: 'session_id', match: { value: sessionId } },
],
},
});
this.logger.info({ userId, sessionId }, 'Session vectors deleted');
} catch (error) {
this.logger.error({ error, userId, sessionId }, 'Failed to delete session');
throw error;
}
}
/**
* Get collection info and statistics
*/
async getCollectionInfo(): Promise<{
vectorsCount: number;
indexedVectorsCount: number;
pointsCount: number;
}> {
try {
const info = await this.client.getCollection(this.collectionName);
return {
vectorsCount: (info as any).vectors_count || 0,
indexedVectorsCount: info.indexed_vectors_count || 0,
pointsCount: info.points_count || 0,
};
} catch (error) {
// If the collection was lost (e.g. Qdrant restarted without the gateway restarting),
// recreate it and return zeroed stats rather than propagating the error.
if ((error as any)?.status === 404) {
this.logger.warn({ collection: this.collectionName }, 'Collection missing, recreating...');
await this.initialize();
return { vectorsCount: 0, indexedVectorsCount: 0, pointsCount: 0 };
}
this.logger.error({ error }, 'Failed to get collection info');
throw error;
}
}
/**
* Store global platform knowledge (user_id = "0")
*/
async storeGlobalKnowledge(
id: string,
vector: number[],
payload: Omit<Record<string, any>, 'user_id'>
): Promise<void> {
return this.upsertPoint(id, vector, {
...payload,
user_id: '0', // Global namespace
});
}
}

View File

@@ -41,8 +41,8 @@ Tiered storage architecture:
Standard LangChain tools following deep agents best practices:
**Platform Tools** (local services):
- `symbol_lookup`: Symbol search and metadata resolution
- `get_chart_data`: OHLCV data with workspace defaults
- `SymbolLookup`: Symbol search and metadata resolution
- `GetChartData`: OHLCV data with workspace defaults
**MCP Tools** (remote, per-user):
- Dynamically discovered from user's MCP server
@@ -89,8 +89,8 @@ subagents/
**Tool Configuration** (in `config.yaml`):
```yaml
tools:
platform: ['symbol_lookup'] # Platform tools
mcp: ['python_*'] # MCP tool patterns
platform: ['SymbolLookup'] # Platform tools
mcp: ['Python*'] # MCP tool patterns
```
**Example:**

View File

@@ -12,20 +12,15 @@ import { ModelRouter, RoutingStrategy } from '../llm/router.js';
import type { ModelMiddleware } from '../llm/middleware.js';
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
import type { ChannelAdapter } from '../workspace/index.js';
import type { ResearchSubagent } from './subagents/research/index.js';
import type { IndicatorSubagent } from './subagents/indicator/index.js';
import type { WebExploreSubagent } from './subagents/web-explore/index.js';
import type { StrategySubagent } from './subagents/strategy/index.js';
import { BaseSubagent } from './subagents/base-subagent.js';
import type { DynamicStructuredTool } from '@langchain/core/tools';
import { getToolRegistry } from '../tools/tool-registry.js';
import type { MCPToolInfo } from '../tools/mcp/mcp-tool-wrapper.js';
import { createResearchAgentTool } from '../tools/platform/research-agent.tool.js';
import { createIndicatorAgentTool } from '../tools/platform/indicator-agent.tool.js';
import { createWebExploreAgentTool } from '../tools/platform/web-explore-agent.tool.js';
import { createStrategyAgentTool } from '../tools/platform/strategy-agent.tool.js';
import { createUserContext } from './memory/session-context.js';
import { createSpawnTool } from '../tools/platform/spawn.tool.js';
import { createMemoryLookupTool } from '../tools/platform/memory-lookup.tool.js';
import { WikiLoader } from './spawn/wiki-loader.js';
import { SpawnService } from './spawn/spawn-service.js';
import type { HarnessEvent } from './harness-events.js';
import { getToolLabel } from './tool-labels.js';
import { readFile } from 'fs/promises';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
@@ -61,10 +56,6 @@ export interface AgentHarnessConfig extends HarnessSessionConfig {
conversationStore?: ConversationStore;
blobStore?: BlobStore;
historyLimit: number;
researchSubagent?: ResearchSubagent;
indicatorSubagent?: IndicatorSubagent;
webExploreSubagent?: WebExploreSubagent;
strategySubagent?: StrategySubagent;
}
/**
@@ -79,7 +70,6 @@ export interface AgentHarnessConfig extends HarnessSessionConfig {
* 5. Saves messages back to user's MCP
*/
export class AgentHarness {
private static systemPromptTemplate: string | null = null;
private static welcomePrompt: string | null = null;
private config: AgentHarnessConfig;
@@ -89,24 +79,17 @@ export class AgentHarness {
private mcpClient: MCPClientConnector;
private workspaceManager?: WorkspaceManager;
private channelAdapter?: ChannelAdapter;
private researchSubagent?: ResearchSubagent;
private availableMCPTools: MCPToolInfo[] = [];
private researchImageCapture: Array<{ data: string; mimeType: string }> = [];
private conversationStore?: ConversationStore;
private indicatorSubagent?: IndicatorSubagent;
private webExploreSubagent?: WebExploreSubagent;
private strategySubagent?: StrategySubagent;
private blobStore?: BlobStore;
private abortController: AbortController | null = null;
private wikiLoader: WikiLoader;
private spawnService: SpawnService;
constructor(config: AgentHarnessConfig) {
this.config = config;
this.workspaceManager = config.workspaceManager;
this.channelAdapter = config.channelAdapter;
this.researchSubagent = config.researchSubagent;
this.indicatorSubagent = config.indicatorSubagent;
this.webExploreSubagent = config.webExploreSubagent;
this.strategySubagent = config.strategySubagent;
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
@@ -119,17 +102,54 @@ export class AgentHarness {
logger: config.logger,
});
}
this.wikiLoader = new WikiLoader();
this.spawnService = new SpawnService(
this.wikiLoader,
getToolRegistry(),
async (maxTokens?: number) => {
const { model } = await this.modelRouter.route(
'analyze and backtest research data',
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId,
maxTokens,
);
return model;
},
config.logger,
);
/**
* Load system prompt template from file (cached)
*/
private static async loadSystemPromptTemplate(): Promise<string> {
if (!AgentHarness.systemPromptTemplate) {
const templatePath = join(__dirname, 'prompts', 'system-prompt.md');
AgentHarness.systemPromptTemplate = await readFile(templatePath, 'utf-8');
}
return AgentHarness.systemPromptTemplate;
// Register the custom-indicators virtual wiki page
this.wikiLoader.registerVirtual('custom-indicators', async (ctx) => {
if (!ctx.mcpClient) return '';
return this.fetchCustomIndicatorsSection(ctx.mcpClient);
});
// Register conda environment packages as a virtual wiki page
this.wikiLoader.registerVirtual('conda-environment', async (ctx) => {
if (!ctx.mcpClient) return '';
try {
const uri = `dexorder://user/${ctx.mcpClient.userId}/environment.yml`;
const resource = await ctx.mcpClient.readResource(uri);
if (!resource.text) return '';
return `## Available Python Packages (Conda Environment)\n\nThe following packages are pre-installed and available to all scripts:\n\n\`\`\`yaml\n${resource.text}\n\`\`\``;
} catch {
return '';
}
});
// Register the user-preferences virtual wiki page (loaded fresh each turn)
this.wikiLoader.registerVirtual('user-preferences', async (ctx) => {
if (!ctx.mcpClient) return '';
try {
const result = await ctx.mcpClient.callTool('PreferencesRead', {});
const parsed = JSON.parse(String(result));
if (!parsed.exists || !parsed.content?.trim()) return '';
return `## User Preferences\n\n${parsed.content}`;
} catch {
return '';
}
});
}
/**
@@ -169,15 +189,6 @@ export class AgentHarness {
// Discover available MCP tools from user's server
await this.discoverMCPTools();
// Initialize web explore subagent first — research and indicator subagents inject it as a tool
await this.initializeWebExploreSubagent();
// Initialize research subagent if not provided
await this.initializeResearchSubagent();
// Initialize indicator subagent if not provided
await this.initializeIndicatorSubagent();
this.config.logger.info('Agent harness initialized');
} catch (error) {
this.config.logger.error({ error }, 'Failed to initialize agent harness');
@@ -225,292 +236,53 @@ export class AgentHarness {
}
/**
* Initialize research subagent
* Fetch custom indicators from the sandbox and return a formatted markdown section.
* Used as the virtual wiki page 'custom-indicators'.
*/
private async initializeResearchSubagent(): Promise<void> {
if (this.researchSubagent) {
this.config.logger.debug('Research subagent already provided');
return;
}
this.config.logger.debug('Creating research subagent for session');
private async fetchCustomIndicatorsSection(mcpClient: MCPClientConnector): Promise<string> {
try {
const { createResearchSubagent } = await import('./subagents/research/index.js');
const raw = await mcpClient.callTool('PythonList', { category: 'indicator' });
const r = raw as any;
const text = r?.content?.[0]?.text ?? r?.[0]?.text;
const parsed = typeof text === 'string' ? JSON.parse(text) : raw;
const items: any[] = parsed?.items ?? [];
if (items.length === 0) return '';
// Path resolution: use the compiled output path
const researchSubagentPath = join(__dirname, 'subagents', 'research');
this.config.logger.debug({ researchSubagentPath }, 'Using research subagent path');
const lines: string[] = ['\n\n## Custom Indicators\n'];
lines.push('The user has defined the following custom indicators. Use `ta.custom_<name>` where `<name>` is the lowercase sanitized function name shown below.\n');
// Load the subagent config to get maxTokens — research scripts require more tokens
// than the provider default (4096) because python_write arguments include full code bodies
const researchSubagentConfig = await BaseSubagent.loadConfig(researchSubagentPath);
for (const item of items) {
const displayName: string = item.name ?? 'unknown';
const description: string = item.description ?? '';
const meta: any = item.metadata ?? {};
const taAttr = `custom_${displayName.toLowerCase().replace(/[^\w]/g, '_').replace(/_+/g, '_').replace(/^_+|_+$/g, '')}`;
const inputSeries: string[] = meta.input_series ?? ['close'];
const params: Record<string, any> = meta.parameters ?? {};
const pane: string = meta.pane ?? 'separate';
// Create a model for the research subagent — always use the complex model
// since research tasks involve data analysis, charting, and code generation
const { model } = await this.modelRouter.route(
'analyze and backtest research data', // triggers complex routing
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId,
researchSubagentConfig.maxTokens // honour the subagent's maxTokens (e.g. 8192)
);
const inputStr = inputSeries.map((s: string) => `df['${s}']`).join(', ');
const paramStr = Object.entries(params)
.map(([k, v]: [string, any]) => `${k}=${JSON.stringify(v?.default ?? null)}`)
.join(', ');
const callExample = paramStr
? `ta.${taAttr}(${inputStr}, ${paramStr})`
: `ta.${taAttr}(${inputStr})`;
// Get tools for research subagent from registry
// Images from MCP responses are captured via onImage and routed to the subagent
const toolRegistry = getToolRegistry();
const researchTools = await toolRegistry.getToolsForAgent(
'research',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager,
(img) => this.researchImageCapture.push(img),
(storeName, newState) => {
this.workspaceManager?.setState(storeName, newState).catch((err) =>
this.config.logger.error({ err, storeName }, 'Failed to sync workspace after research mutation')
);
}
);
const outputNames = (meta.output_columns ?? [{ name: 'value' }])
.map((c: any) => c.name)
.join(', ');
// Inject web_explore tool if the web-explore subagent is ready
if (this.webExploreSubagent) {
const webExploreContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
researchTools.push(createWebExploreAgentTool({
webExploreSubagent: this.webExploreSubagent,
context: webExploreContext,
logger: this.config.logger,
}));
lines.push(`### ${displayName}`);
if (description) lines.push(description);
lines.push(`- **Call**: \`${callExample}\``);
lines.push(`- **Outputs**: ${outputNames} | **Pane**: ${pane}`);
lines.push('');
}
this.researchSubagent = await createResearchSubagent(
model,
this.config.logger,
researchSubagentPath,
this.mcpClient,
researchTools,
this.researchImageCapture
);
this.config.logger.info(
{
toolCount: researchTools.length,
toolNames: researchTools.map(t => t.name),
},
'Research subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create research subagent'
);
// Don't throw - research subagent is optional
}
}
/**
* Initialize indicator subagent
*/
private async initializeIndicatorSubagent(): Promise<void> {
if (this.indicatorSubagent) {
this.config.logger.debug('Indicator subagent already provided');
return;
}
this.config.logger.debug('Creating indicator subagent for session');
try {
const { createIndicatorSubagent } = await import('./subagents/indicator/index.js');
const { model } = await this.modelRouter.route(
'indicator management',
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId
);
const toolRegistry = getToolRegistry();
const indicatorTools = await toolRegistry.getToolsForAgent(
'indicator',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager,
undefined, // no image callback
(storeName, newState) => {
// After a workspace_patch succeeds in the container, update the gateway's
// WorkspaceManager so it pushes a WebSocket patch to the web client.
this.workspaceManager?.setState(storeName, newState).catch((err) =>
this.config.logger.error({ err, storeName }, 'Failed to sync workspace after indicator mutation')
);
}
);
// Inject web_explore tool if the web-explore subagent is ready
if (this.webExploreSubagent) {
const webExploreContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
indicatorTools.push(createWebExploreAgentTool({
webExploreSubagent: this.webExploreSubagent,
context: webExploreContext,
logger: this.config.logger,
}));
}
const indicatorSubagentPath = join(__dirname, 'subagents', 'indicator');
this.config.logger.debug({ indicatorSubagentPath }, 'Using indicator subagent path');
this.indicatorSubagent = await createIndicatorSubagent(
model,
this.config.logger,
indicatorSubagentPath,
this.mcpClient,
indicatorTools
);
this.config.logger.info(
{
toolCount: indicatorTools.length,
toolNames: indicatorTools.map(t => t.name),
},
'Indicator subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create indicator subagent'
);
// Don't throw — indicator subagent is optional
}
}
/**
* Initialize web explore subagent
*/
private async initializeWebExploreSubagent(): Promise<void> {
if (this.webExploreSubagent) {
this.config.logger.debug('Web explore subagent already provided');
return;
}
this.config.logger.debug('Creating web explore subagent for session');
try {
const { createWebExploreSubagent } = await import('./subagents/web-explore/index.js');
const { model } = await this.modelRouter.route(
'web research and summarization',
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId
);
const toolRegistry = getToolRegistry();
const webExploreTools = await toolRegistry.getToolsForAgent(
'web-explore',
undefined, // no MCP client needed
undefined,
undefined
);
const webExploreSubagentPath = join(__dirname, 'subagents', 'web-explore');
this.config.logger.debug({ webExploreSubagentPath }, 'Using web explore subagent path');
this.webExploreSubagent = await createWebExploreSubagent(
model,
this.config.logger,
webExploreSubagentPath,
webExploreTools
);
this.config.logger.info(
{
toolCount: webExploreTools.length,
toolNames: webExploreTools.map(t => t.name),
},
'Web explore subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create web explore subagent'
);
// Don't throw — web explore subagent is optional
}
}
/**
* Initialize strategy subagent
*/
private async initializeStrategySubagent(): Promise<void> {
if (this.strategySubagent) {
this.config.logger.debug('Strategy subagent already provided');
return;
}
this.config.logger.debug('Creating strategy subagent for session');
try {
const { createStrategySubagent } = await import('./subagents/strategy/index.js');
const { model } = await this.modelRouter.route(
'trading strategy writing and backtesting',
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId
);
const toolRegistry = getToolRegistry();
const strategyTools = await toolRegistry.getToolsForAgent(
'strategy',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager,
undefined,
(storeName, newState) => {
this.workspaceManager?.setState(storeName, newState).catch((err) =>
this.config.logger.error({ err, storeName }, 'Failed to sync workspace after strategy mutation')
);
}
);
const strategySubagentPath = join(__dirname, 'subagents', 'strategy');
this.config.logger.debug({ strategySubagentPath }, 'Using strategy subagent path');
this.strategySubagent = await createStrategySubagent(
model,
this.config.logger,
strategySubagentPath,
this.mcpClient,
strategyTools
);
this.config.logger.info(
{
toolCount: strategyTools.length,
toolNames: strategyTools.map(t => t.name),
},
'Strategy subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create strategy subagent'
);
// Don't throw — strategy subagent is optional
return lines.join('\n');
} catch (err) {
this.config.logger.warn({ err }, 'Failed to fetch custom indicators for wiki page');
return '';
}
}
@@ -640,7 +412,7 @@ export class AgentHarness {
}
try {
yield { type: 'tool_call', toolName: toolCall.name, label: this.getToolLabel(toolCall.name) };
yield { type: 'tool_call', toolName: toolCall.name, label: getToolLabel(toolCall.name, toolCall.args) };
// Use streamFunc when available (subagent tools) to forward intermediate events inline
let result: string;
@@ -727,7 +499,7 @@ export class AgentHarness {
/**
* Call a tool on the user's MCP server directly (bypasses the agent/LLM).
* Used by channel handlers for direct data requests (e.g. evaluate_indicator).
* Used by channel handlers for direct data requests (e.g. EvaluateIndicator).
*/
async callMcpTool(name: string, args: Record<string, unknown>): Promise<unknown> {
return this.mcpClient.callTool(name, args);
@@ -739,15 +511,15 @@ export class AgentHarness {
*/
async readDetails(category: string, name: string): Promise<string | null> {
try {
const raw = await this.mcpClient.callTool('python_read', { category, name });
const raw = await this.mcpClient.callTool('PythonRead', { category, name });
const content = (raw as any)?.content;
if (!Array.isArray(content)) return null;
for (const item of content) {
if (item.type === 'text' && item.text) {
try {
const parsed = JSON.parse(item.text);
if (parsed?.exists && parsed?.metadata?.details !== undefined) {
return parsed.metadata.details as string;
if (parsed?.exists && parsed?.details !== undefined) {
return parsed.details as string;
}
} catch { /* ignore */ }
}
@@ -758,6 +530,32 @@ export class AgentHarness {
}
}
/**
* Read persisted output files (analysis + images) for a research item.
* Returns null if the item has no output yet.
*/
async readOutput(
category: string,
name: string,
): Promise<{ analysis?: string; images?: Array<{ mimeType: string; data: string }> } | null> {
try {
const raw = await this.mcpClient.callTool('PythonReadOutput', { category, name });
const content = (raw as any)?.content;
if (!Array.isArray(content)) return null;
const result: { analysis?: string; images?: Array<{ mimeType: string; data: string }> } = {};
for (const item of content) {
if (item.type === 'text' && item.text && !item.text.startsWith('output_dir:')) {
result.analysis = item.text;
} else if (item.type === 'image' && item.data) {
(result.images ??= []).push({ mimeType: item.mimeType ?? 'image/png', data: item.data });
}
}
return result;
} catch {
return null;
}
}
/**
* Stream a details-driven code update for a category item.
*
@@ -789,47 +587,37 @@ export class AgentHarness {
// 3. Build instruction for the subagent
const instruction = buildDetailsUpdateInstruction(category, name, newDetails, diff);
// 4. Build a minimal subagent context
const context = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
// 4. Determine the agent name for this category
const agentName = category === 'indicator' ? 'indicator'
: category === 'strategy' ? 'strategy'
: category === 'research' ? 'research'
: null;
// 5. Ensure the right subagent is ready and invoke it
if (category === 'indicator') {
if (!this.indicatorSubagent) await this.initializeIndicatorSubagent();
if (!this.indicatorSubagent) {
yield { type: 'error', source: 'indicator', fatal: false };
return;
}
logger.info({ category, name }, 'Streaming indicator details update');
yield* this.indicatorSubagent.streamEvents(context, instruction, signal);
} else if (category === 'strategy') {
if (!this.strategySubagent) await this.initializeStrategySubagent();
if (!this.strategySubagent) {
yield { type: 'error', source: 'strategy', fatal: false };
return;
}
logger.info({ category, name }, 'Streaming strategy details update');
yield* this.strategySubagent.streamEvents(context, instruction, signal);
} else if (category === 'research') {
if (!this.researchSubagent) await this.initializeResearchSubagent();
if (!this.researchSubagent) {
yield { type: 'error', source: 'research', fatal: false };
return;
}
logger.info({ category, name }, 'Streaming research details update');
yield* this.researchSubagent.streamEvents(context, instruction, signal);
} else {
if (!agentName) {
yield { type: 'error', source: 'harness', fatal: false };
return;
}
// 5. Delegate to SpawnService
logger.info({ category, name, agentName }, 'Streaming details update via spawn');
const gen = this.spawnService.streamSpawn({
agentName,
instruction,
mcpClient: this.mcpClient,
availableMCPTools: this.availableMCPTools,
workspaceManager: this.workspaceManager,
signal,
});
let step = await gen.next();
while (!step.done) {
yield step.value;
step = await gen.next();
}
// Final text is the return value of the generator — emit as done event
const finalText = step.value;
if (finalText) {
yield { type: 'done', content: finalText };
}
}
@@ -876,11 +664,21 @@ export class AgentHarness {
: [];
this.config.logger.debug({ historyLength: history.length }, 'Conversation history loaded');
// Inject current workspace state fresh on every turn — not persisted to conversation history
// Inject license info and workspace state as a fresh HumanMessage on every turn
const licenseInfo = `[License]\nlicenseType: ${this.config.license.licenseType}\nfeatures: ${JSON.stringify(this.config.license.features)}`;
const workspaceContext = this.workspaceManager
? `[Workspace State]\n\`\`\`json\n${this.workspaceManager.serializeState()}\n\`\`\``
: undefined;
// Load dynamic imports from agent-main.md frontmatter (fresh per turn, after cache marker)
const agentMainPage = await this.wikiLoader.loadAgentPage('main').catch(() => null);
const dynamicContext = agentMainPage?.frontmatter.dynamic_imports?.length
? await this.wikiLoader.loadDynamicImports(
agentMainPage.frontmatter.dynamic_imports,
{ mcpClient: this.mcpClient, workspaceManager: this.workspaceManager ?? undefined }
)
: '';
// 4. Get the configured model
this.config.logger.debug('Routing to model');
const { model, middleware } = await this.modelRouter.route(
@@ -893,7 +691,7 @@ export class AgentHarness {
this.config.logger.info({ modelName: model.constructor.name }, 'Model selected');
// 5. Build LangChain messages
const langchainMessages = this.buildLangChainMessages(systemPrompt, history, workspaceContext, message.content);
const langchainMessages = this.buildLangChainMessages(systemPrompt, history, licenseInfo, workspaceContext, dynamicContext, message.content);
this.config.logger.debug({ messageCount: langchainMessages.length }, 'LangChain messages built');
// 6. Get tools for main agent from registry
@@ -911,51 +709,19 @@ export class AgentHarness {
}
);
// Build shared subagent context
const subagentContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
// Add spawn and memory_lookup tools
tools.push(createSpawnTool({
spawnService: this.spawnService,
mcpClient: this.mcpClient,
availableMCPTools: this.availableMCPTools,
workspaceManager: this.workspaceManager,
logger: this.config.logger,
}));
if (this.researchSubagent) {
tools.push(createResearchAgentTool({
researchSubagent: this.researchSubagent,
context: subagentContext,
logger: this.config.logger,
}));
}
if (this.indicatorSubagent) {
tools.push(createIndicatorAgentTool({
indicatorSubagent: this.indicatorSubagent,
context: subagentContext,
logger: this.config.logger,
}));
}
if (this.webExploreSubagent) {
tools.push(createWebExploreAgentTool({
webExploreSubagent: this.webExploreSubagent,
context: subagentContext,
logger: this.config.logger,
}));
}
if (!this.strategySubagent) {
await this.initializeStrategySubagent();
}
if (this.strategySubagent) {
tools.push(createStrategyAgentTool({
strategySubagent: this.strategySubagent,
context: subagentContext,
logger: this.config.logger,
}));
}
tools.push(createMemoryLookupTool({
wikiLoader: this.wikiLoader,
logger: this.config.logger,
}));
this.config.logger.info(
{ toolCount: tools.length, toolNames: tools.map(t => t.name) },
@@ -1088,50 +854,34 @@ export class AgentHarness {
private buildLangChainMessages(
systemPrompt: string,
history: BaseMessage[],
licenseInfo: string,
workspaceContext: string | undefined,
dynamicContext: string,
currentUserMessage: string
): BaseMessage[] {
// License, workspace, and dynamic context are injected as HumanMessages so they are never cached
const contextParts = [licenseInfo, workspaceContext, dynamicContext || undefined].filter(Boolean).join('\n\n');
return [
new SystemMessage(systemPrompt),
...history,
...(workspaceContext ? [new HumanMessage(workspaceContext)] : []),
...(contextParts ? [new HumanMessage(contextParts)] : []),
new HumanMessage(currentUserMessage),
];
}
/**
* Build system prompt from template
* Build system prompt from wiki knowledge base.
* Loads index.md + tools.md (tier-1 cache) and agent-main.md (tier-2 cache).
*/
private async buildSystemPrompt(): Promise<string> {
// Load template and populate with license info
const template = await AgentHarness.loadSystemPromptTemplate();
const prompt = template
.replace('{{licenseType}}', this.config.license.licenseType)
.replace('{{features}}', JSON.stringify(this.config.license.features, null, 2));
const [basePrompt, agentMain] = await Promise.all([
this.wikiLoader.getBasePrompt(),
this.wikiLoader.loadAgentPage('main').catch(() => null),
]);
return prompt;
}
/**
* Map tool names to user-friendly status labels.
*/
private getToolLabel(toolName: string): string {
const labels: Record<string, string> = {
research: 'Researching...',
indicator: 'Adjusting indicators...',
get_chart_data: 'Fetching chart data...',
symbol_lookup: 'Searching symbol...',
python_list: 'Seeing what we have...',
python_edit: 'Coding...',
python_write: 'Coding...',
python_read: 'Inspecting...',
execute_research: 'Running script...',
backtest_strategy: 'Backtesting...',
list_active_strategies: 'Checking active strategies...',
web_explore: 'Searching the web...',
strategy: 'Coding a strategy...',
};
return labels[toolName] ?? `Running ${toolName} tool...`;
const parts = [basePrompt];
if (agentMain) parts.push(agentMain.body);
return parts.join('\n\n---\n\n');
}
/**
@@ -1318,7 +1068,7 @@ function buildDetailsUpdateInstruction(
return `The user has edited the specification (details) for the ${categoryLabel} named "${name}".
Your task: update the Python implementation to match the revised specification. Use \`python_edit\` with targeted patches — make only the changes implied by the diff below. Also update the \`details\` field via the \`details\` parameter on \`python_edit\` to store the new specification text.
Your task: update the Python implementation to match the revised specification. Use \`PythonEdit\` with targeted patches — make only the changes implied by the diff below. Also update the \`details\` field via the \`details\` parameter on \`PythonEdit\` to store the new specification text.
## Revised specification
@@ -1331,7 +1081,7 @@ ${diff}
\`\`\`
Instructions:
- Read the current implementation first with \`python_read(category="${category}", name="${name}")\` to understand what exists.
- Read the current implementation first with \`PythonRead(category="${category}", name="${name}")\` to understand what exists.
- Apply only the changes described by the diff above — do not rewrite unrelated parts of the code.
- Pass \`details\` as the full revised specification text shown above.
- After editing, confirm the change was applied and validation passed.`;

View File

@@ -3,8 +3,8 @@
// Memory
export * from './memory/index.js';
// Subagents
export * from './subagents/index.js';
// Spawn infrastructure
export * from './spawn/index.js';
// Workflows
export * from './workflows/index.js';

View File

@@ -151,7 +151,7 @@ export class MCPClientConnector {
try {
this.config.logger.debug({ tool: name, args }, 'Calling MCP tool');
// Use a generous timeout: execute_research runs a subprocess with a 300s limit,
// Use a generous timeout: ExecuteResearch runs a subprocess with a 300s limit,
// so the default 60s MCP SDK timeout would fire before the script completes.
const result = await this.client!.callTool({ name, arguments: args }, undefined, { timeout: 330000 });
return result;
@@ -293,4 +293,8 @@ export class MCPClientConnector {
isConnected(): boolean {
return this.connected;
}
get userId(): string {
return this.config.userId;
}
}

View File

@@ -1,356 +0,0 @@
import { readdir, readFile } from 'fs/promises';
import { join, relative } from 'path';
import { createHash } from 'crypto';
import type { FastifyBaseLogger } from 'fastify';
import { RAGRetriever } from './rag-retriever.js';
import { EmbeddingService } from './embedding-service.js';
/**
* Document metadata stored with each chunk
*/
export interface DocumentMetadata {
document_id: string;
chunk_index: number;
content_hash: string;
last_updated: number;
tags: string[];
heading?: string;
file_path: string;
}
/**
* Document chunk with content and metadata
*/
export interface DocumentChunk {
content: string;
metadata: DocumentMetadata;
}
/**
* Document loader configuration
*/
export interface DocumentLoaderConfig {
knowledgeDir: string;
maxChunkSize?: number; // in tokens (approximate by chars)
chunkOverlap?: number; // overlap between chunks
}
/**
* Global knowledge document loader
*
* Loads markdown documents from a directory structure and stores them
* as global knowledge (user_id="0") in Qdrant for RAG retrieval.
*
* Features:
* - Intelligent chunking by markdown headers
* - Content hashing for change detection
* - Metadata extraction (tags, headings)
* - Automatic embedding generation
* - Incremental updates (only changed docs)
*
* Directory structure:
* gateway/knowledge/
* platform/
* trading/
* indicators/
* strategies/
*/
export class DocumentLoader {
private config: DocumentLoaderConfig;
private logger: FastifyBaseLogger;
private embeddings: EmbeddingService;
private rag: RAGRetriever;
private loadedDocs: Map<string, string> = new Map(); // path -> hash
constructor(
config: DocumentLoaderConfig,
embeddings: EmbeddingService,
rag: RAGRetriever,
logger: FastifyBaseLogger
) {
this.config = {
maxChunkSize: 4000, // ~1000 tokens
chunkOverlap: 200,
...config,
};
this.embeddings = embeddings;
this.rag = rag;
this.logger = logger;
}
/**
* Load all documents from knowledge directory
*/
async loadAll(): Promise<{ loaded: number; updated: number; skipped: number }> {
this.logger.info({ dir: this.config.knowledgeDir }, 'Loading knowledge documents');
const stats = { loaded: 0, updated: 0, skipped: 0 };
try {
const files = await this.findMarkdownFiles(this.config.knowledgeDir);
for (const filePath of files) {
const result = await this.loadDocument(filePath);
if (result === 'loaded') stats.loaded++;
else if (result === 'updated') stats.updated++;
else stats.skipped++;
}
this.logger.info(stats, 'Knowledge documents loaded');
return stats;
} catch (error) {
this.logger.error({ error }, 'Failed to load knowledge documents');
throw error;
}
}
/**
* Load a single document
*/
async loadDocument(filePath: string): Promise<'loaded' | 'updated' | 'skipped'> {
try {
// Read file content
const content = await readFile(filePath, 'utf-8');
const contentHash = this.hashContent(content);
// Check if document has changed
const relativePath = relative(this.config.knowledgeDir, filePath);
const existingHash = this.loadedDocs.get(relativePath);
if (existingHash === contentHash) {
this.logger.debug({ file: relativePath }, 'Document unchanged, skipping');
return 'skipped';
}
const isUpdate = !!existingHash;
// Parse and chunk document
const chunks = this.chunkDocument(content, relativePath);
this.logger.info(
{ file: relativePath, chunks: chunks.length, update: isUpdate },
'Processing document'
);
// Generate embeddings and store chunks
for (const chunk of chunks) {
const embedding = await this.embeddings.embed(chunk.content);
// Create unique ID for this chunk
const chunkId = `global:${chunk.metadata.document_id}:${chunk.metadata.chunk_index}`;
// Store in Qdrant as global knowledge
await this.rag.storeGlobalKnowledge(
chunkId,
chunk.content,
embedding,
{
...chunk.metadata,
type: 'knowledge_doc',
}
);
}
// Update loaded docs tracking
this.loadedDocs.set(relativePath, contentHash);
return isUpdate ? 'updated' : 'loaded';
} catch (error) {
this.logger.error({ error, file: filePath }, 'Failed to load document');
throw error;
}
}
/**
* Reload a specific document (for updates)
*/
async reloadDocument(filePath: string): Promise<void> {
this.logger.info({ file: filePath }, 'Reloading document');
await this.loadDocument(filePath);
}
/**
* Chunk document by markdown headers with smart splitting
*/
private chunkDocument(content: string, documentId: string): DocumentChunk[] {
const chunks: DocumentChunk[] = [];
const tags = this.extractTags(content);
const lastModified = Date.now();
// Split by headers (## or ###)
const sections = this.splitByHeaders(content);
let chunkIndex = 0;
for (const section of sections) {
// If section is too large, split it further
const subChunks = this.splitLargeSection(section.content);
for (const subContent of subChunks) {
if (subContent.trim().length === 0) continue;
chunks.push({
content: subContent,
metadata: {
document_id: documentId,
chunk_index: chunkIndex++,
content_hash: this.hashContent(content),
last_updated: lastModified,
tags,
heading: section.heading,
file_path: documentId,
},
});
}
}
return chunks;
}
/**
* Split document by markdown headers
*/
private splitByHeaders(content: string): Array<{ heading?: string; content: string }> {
const lines = content.split('\n');
const sections: Array<{ heading?: string; content: string }> = [];
let currentSection: string[] = [];
let currentHeading: string | undefined;
for (const line of lines) {
// Check for markdown header (##, ###, ####)
const headerMatch = line.match(/^(#{2,4})\s+(.+)$/);
if (headerMatch) {
// Save previous section
if (currentSection.length > 0) {
sections.push({
heading: currentHeading,
content: currentSection.join('\n'),
});
}
// Start new section
currentHeading = headerMatch[2].trim();
currentSection = [line];
} else {
currentSection.push(line);
}
}
// Add final section
if (currentSection.length > 0) {
sections.push({
heading: currentHeading,
content: currentSection.join('\n'),
});
}
return sections;
}
/**
* Split large sections into smaller chunks
*/
private splitLargeSection(content: string): string[] {
const maxSize = this.config.maxChunkSize!;
const overlap = this.config.chunkOverlap!;
if (content.length <= maxSize) {
return [content];
}
const chunks: string[] = [];
let start = 0;
while (start < content.length) {
const end = Math.min(start + maxSize, content.length);
let chunkEnd = end;
// Try to break at sentence boundary
if (end < content.length) {
const sentenceEnd = content.lastIndexOf('.', end);
const paragraphEnd = content.lastIndexOf('\n\n', end);
if (paragraphEnd > start + maxSize / 2) {
chunkEnd = paragraphEnd;
} else if (sentenceEnd > start + maxSize / 2) {
chunkEnd = sentenceEnd + 1;
}
}
chunks.push(content.substring(start, chunkEnd));
start = chunkEnd - overlap;
}
return chunks;
}
/**
* Extract tags from document (frontmatter or first heading)
*/
private extractTags(content: string): string[] {
const tags: string[] = [];
// Try to extract from YAML frontmatter
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
if (frontmatterMatch) {
const frontmatter = frontmatterMatch[1];
const tagsMatch = frontmatter.match(/tags:\s*\[([^\]]+)\]/);
if (tagsMatch) {
tags.push(...tagsMatch[1].split(',').map((t) => t.trim()));
}
}
// Extract from first heading
const headingMatch = content.match(/^#\s+(.+)$/m);
if (headingMatch) {
tags.push(headingMatch[1].toLowerCase().replace(/\s+/g, '-'));
}
return tags;
}
/**
* Hash content for change detection
*/
private hashContent(content: string): string {
return createHash('md5').update(content).digest('hex');
}
/**
* Recursively find all markdown files
*/
private async findMarkdownFiles(dir: string): Promise<string[]> {
const files: string[] = [];
try {
const entries = await readdir(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = join(dir, entry.name);
if (entry.isDirectory()) {
const subFiles = await this.findMarkdownFiles(fullPath);
files.push(...subFiles);
} else if (entry.isFile() && entry.name.endsWith('.md')) {
files.push(fullPath);
}
}
} catch (error) {
this.logger.warn({ error, dir }, 'Failed to read directory');
}
return files;
}
/**
* Get loaded document stats
*/
getStats(): { totalDocs: number; totalSize: number } {
return {
totalDocs: this.loadedDocs.size,
totalSize: Array.from(this.loadedDocs.values()).reduce((sum, hash) => sum + hash.length, 0),
};
}
}

View File

@@ -1,270 +0,0 @@
import type { FastifyBaseLogger } from 'fastify';
import { Ollama } from 'ollama';
/**
* Embedding provider configuration
*/
export interface EmbeddingConfig {
provider: 'ollama' | 'openai' | 'anthropic' | 'local' | 'voyage' | 'cohere' | 'none';
model?: string;
apiKey?: string;
dimensions?: number;
ollamaUrl?: string;
}
/**
* Embedding service for generating vectors from text
*
* Supports multiple providers:
* - Ollama (all-minilm, nomic-embed-text, mxbai-embed-large) - RECOMMENDED
* - OpenAI (text-embedding-3-small/large)
* - Voyage AI (voyage-2)
* - Cohere (embed-english-v3.0)
* - Local models (via transformers.js or Python sidecar)
* - None (for development without embeddings)
*
* Used by RAGRetriever to generate embeddings for storage and search.
*
* For production, use Ollama with all-minilm (90MB model, runs on CPU, ~100MB RAM).
* Ollama can run in-container or as a separate pod/sidecar.
*/
export class EmbeddingService {
private readonly model: string;
private readonly dimensions: number;
private ollama?: Ollama;
constructor(
private config: EmbeddingConfig,
private logger: FastifyBaseLogger
) {
// Set defaults based on provider
switch (config.provider) {
case 'ollama':
this.model = config.model || 'all-minilm';
this.dimensions = config.dimensions || 384;
this.ollama = new Ollama({
host: config.ollamaUrl || 'http://localhost:11434',
});
break;
case 'openai':
this.model = config.model || 'text-embedding-3-small';
this.dimensions = config.dimensions || 1536;
break;
case 'anthropic':
case 'voyage':
this.model = config.model || 'voyage-2';
this.dimensions = config.dimensions || 1024;
break;
case 'cohere':
this.model = config.model || 'embed-english-v3.0';
this.dimensions = config.dimensions || 1024;
break;
case 'local':
this.model = config.model || 'all-MiniLM-L6-v2';
this.dimensions = config.dimensions || 384;
break;
case 'none':
// No embeddings configured - will return zero vectors
this.model = 'none';
this.dimensions = config.dimensions || 1536;
this.logger.warn('Embedding service initialized with provider=none - RAG will not function properly');
break;
default:
throw new Error(`Unknown embedding provider: ${config.provider}`);
}
if (config.provider !== 'none') {
this.logger.info(
{ provider: config.provider, model: this.model, dimensions: this.dimensions },
'Initialized embedding service'
);
}
}
/**
* Generate embedding for a single text
*/
async embed(text: string): Promise<number[]> {
if (this.config.provider === 'none') {
// Return zero vector when no embeddings configured
return new Array(this.dimensions).fill(0);
}
this.logger.debug({ textLength: text.length, provider: this.config.provider }, 'Generating embedding');
try {
switch (this.config.provider) {
case 'ollama':
return await this.embedOllama(text);
case 'openai':
return await this.embedOpenAI(text);
case 'anthropic':
case 'voyage':
return await this.embedVoyage(text);
case 'cohere':
return await this.embedCohere(text);
case 'local':
return await this.embedLocal(text);
default:
throw new Error(`Unknown provider: ${this.config.provider}`);
}
} catch (error) {
this.logger.error({ error, provider: this.config.provider }, 'Failed to generate embedding');
// Return zero vector as fallback to prevent crashes
return new Array(this.dimensions).fill(0);
}
}
/**
* Generate embeddings for multiple texts (batch)
*/
async embedBatch(texts: string[]): Promise<number[][]> {
this.logger.debug({ count: texts.length, provider: this.config.provider }, 'Generating batch embeddings');
// Ollama supports native batch operations
if (this.config.provider === 'ollama' && this.ollama) {
try {
const response = await this.ollama.embed({
model: this.model,
input: texts,
});
return response.embeddings;
} catch (error) {
this.logger.error({ error }, 'Ollama batch embedding failed, falling back to sequential');
// Fall through to sequential processing
}
}
// Fallback: call embed() for each text sequentially
const embeddings = await Promise.all(texts.map((text) => this.embed(text)));
return embeddings;
}
/**
* Get embedding dimensions
*/
getDimensions(): number {
return this.dimensions;
}
/**
* Get model name
*/
getModel(): string {
return this.model;
}
/**
* Generate embedding using Ollama
*/
private async embedOllama(text: string): Promise<number[]> {
if (!this.ollama) {
this.logger.error('Ollama client not initialized');
return new Array(this.dimensions).fill(0);
}
try {
const response = await this.ollama.embed({
model: this.model,
input: text,
});
// Ollama returns single embedding for single input
return response.embeddings[0];
} catch (error) {
this.logger.error({ error }, 'Ollama embedding failed, returning zero vector');
return new Array(this.dimensions).fill(0);
}
}
/**
* Generate embedding using OpenAI API
*/
private async embedOpenAI(text: string): Promise<number[]> {
if (!this.config.apiKey) {
this.logger.warn('OpenAI API key not configured, returning zero vector');
return new Array(this.dimensions).fill(0);
}
try {
const response = await fetch('https://api.openai.com/v1/embeddings', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`,
},
body: JSON.stringify({
model: this.model,
input: text,
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
}
const data = await response.json() as { data: Array<{ embedding: number[] }> };
return data.data[0].embedding;
} catch (error) {
this.logger.error({ error }, 'OpenAI embedding failed, returning zero vector');
return new Array(this.dimensions).fill(0);
}
}
/**
* Generate embedding using Voyage AI API (Anthropic partnership)
*/
private async embedVoyage(_text: string): Promise<number[]> {
// TODO: Implement Voyage AI embedding when API key available
// API endpoint: https://api.voyageai.com/v1/embeddings
this.logger.warn('Voyage AI embedding not yet implemented, returning zero vector');
return new Array(this.dimensions).fill(0);
}
/**
* Generate embedding using Cohere API
*/
private async embedCohere(_text: string): Promise<number[]> {
// TODO: Implement Cohere embedding when API key available
// API endpoint: https://api.cohere.ai/v1/embed
this.logger.warn('Cohere embedding not yet implemented, returning zero vector');
return new Array(this.dimensions).fill(0);
}
/**
* Generate embedding using local model
*/
private async embedLocal(_text: string): Promise<number[]> {
// TODO: Implement local embedding (via transformers.js or Python sidecar)
// Options:
// 1. transformers.js (pure JS/WebAssembly) - slower but self-contained
// 2. Python sidecar service running sentence-transformers - faster
// 3. ONNX runtime with pre-exported models - good balance
this.logger.warn('Local embedding not implemented, returning zero vector');
return new Array(this.dimensions).fill(0);
}
/**
* Calculate cosine similarity between two embeddings
*/
static cosineSimilarity(a: number[], b: number[]): number {
if (a.length !== b.length) {
throw new Error('Embeddings must have same dimensions');
}
let dotProduct = 0;
let normA = 0;
let normB = 0;
for (let i = 0; i < a.length; i++) {
dotProduct += a[i] * b[i];
normA += a[i] * a[i];
normB += b[i] * b[i];
}
return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
}
}

View File

@@ -2,9 +2,6 @@
export { TieredCheckpointSaver } from './checkpoint-saver.js';
export { ConversationStore } from './conversation-store.js';
export { EmbeddingService } from './embedding-service.js';
export { RAGRetriever } from './rag-retriever.js';
export { DocumentLoader } from './document-loader.js';
export {
createUserContext,
touchContext,
@@ -16,5 +13,4 @@ export {
type ActiveChannel,
type ChannelCapabilities,
type WorkspaceContext,
type MemoryChunk,
} from './session-context.js';

View File

@@ -1,210 +0,0 @@
import type { FastifyBaseLogger } from 'fastify';
import { QdrantClient } from '../../clients/qdrant-client.js';
/**
* Vector point with metadata for Qdrant
*/
export interface VectorPoint {
id: string;
vector: number[];
payload: {
user_id: string;
session_id: string;
content: string;
role: 'user' | 'assistant' | 'system';
timestamp: number;
[key: string]: unknown;
};
}
/**
* Search result from Qdrant
*/
export interface SearchResult {
id: string;
score: number;
payload: VectorPoint['payload'];
}
/**
* Qdrant client configuration
*/
export interface QdrantConfig {
url: string;
apiKey?: string;
collectionName?: string;
}
/**
* RAG retriever using Qdrant for vector similarity search
*
* Features:
* - **Global namespace** (user_id="0") for platform knowledge
* - **User-specific namespaces** for personal memories
* - **Queries join both** global and user memories
* - Semantic search across conversation history
* - Context retrieval for agent prompts
* - User preference and pattern learning
*
* Architecture: Gateway-side vector store, user_id indexed for GDPR compliance
*/
export class RAGRetriever {
private qdrant: QdrantClient;
constructor(
config: QdrantConfig,
private logger: FastifyBaseLogger,
vectorDimension: number = 1536
) {
this.qdrant = new QdrantClient(config, logger, vectorDimension);
}
/**
* Initialize Qdrant collection with proper schema
*/
async initialize(): Promise<void> {
await this.qdrant.initialize();
}
/**
* Store conversation message as vector
*/
async storeMessage(
userId: string,
sessionId: string,
role: 'user' | 'assistant' | 'system',
content: string,
embedding: number[],
metadata?: Record<string, unknown>
): Promise<void> {
const id = `${userId}:${sessionId}:${Date.now()}`;
const payload = {
user_id: userId,
session_id: sessionId,
content,
role,
timestamp: Date.now(),
...metadata,
};
this.logger.debug(
{ userId, sessionId, role, contentLength: content.length },
'Storing message vector'
);
await this.qdrant.upsertPoint(id, embedding, payload);
}
/**
* Store global platform knowledge (user_id = "0")
*/
async storeGlobalKnowledge(
id: string,
content: string,
embedding: number[],
metadata?: Record<string, unknown>
): Promise<void> {
this.logger.debug({ id, contentLength: content.length }, 'Storing global knowledge');
await this.qdrant.storeGlobalKnowledge(id, embedding, {
session_id: 'global',
content,
role: 'system',
timestamp: Date.now(),
...metadata,
});
}
/**
* Search for relevant memories using vector similarity
* Queries BOTH global (user_id="0") and user-specific memories
*/
async search(
userId: string,
queryEmbedding: number[],
options?: {
limit?: number;
sessionId?: string;
minScore?: number;
timeRange?: { start: number; end: number };
}
): Promise<SearchResult[]> {
const limit = options?.limit || 5;
const minScore = options?.minScore || 0.7;
this.logger.debug(
{ userId, limit, sessionId: options?.sessionId },
'Searching for relevant memories (global + user)'
);
// Qdrant client handles the "should" logic: user_id = userId OR user_id = "0"
const results = await this.qdrant.search(userId, queryEmbedding, {
limit,
scoreThreshold: minScore,
sessionId: options?.sessionId,
timeRange: options?.timeRange,
});
return results.map(r => ({
id: r.id,
score: r.score,
payload: r.payload as VectorPoint['payload'],
}));
}
/**
* Get recent conversation history for context
*/
async getRecentHistory(
userId: string,
sessionId: string,
limit: number = 10
): Promise<SearchResult[]> {
this.logger.debug({ userId, sessionId, limit }, 'Getting recent conversation history');
const result = await this.qdrant.scroll(userId, {
sessionId,
limit,
});
return result.points.map(p => ({
id: p.id,
score: 1.0, // Not a search result, so score is 1.0
payload: p.payload as VectorPoint['payload'],
}));
}
/**
* Delete all vectors for a user (GDPR compliance)
*/
async deleteUserData(userId: string): Promise<void> {
this.logger.info({ userId }, 'Deleting all user vectors for GDPR compliance');
await this.qdrant.deleteUserData(userId);
}
/**
* Delete all vectors for a session
*/
async deleteSession(userId: string, sessionId: string): Promise<void> {
this.logger.info({ userId, sessionId }, 'Deleting session vectors');
await this.qdrant.deleteSession(userId, sessionId);
}
/**
* Get collection statistics
*/
async getStats(): Promise<{
vectorCount: number;
indexedCount: number;
collectionSize: number;
}> {
const info = await this.qdrant.getCollectionInfo();
return {
vectorCount: info.vectorsCount,
indexedCount: info.indexedVectorsCount,
collectionSize: info.pointsCount,
};
}
}

View File

@@ -34,18 +34,6 @@ export interface WorkspaceContext {
preferences: Record<string, unknown>;
}
/**
* Memory chunk from RAG retrieval
*/
export interface MemoryChunk {
id: string;
content: string;
role: 'user' | 'assistant' | 'system';
timestamp: number;
relevanceScore: number;
metadata?: Record<string, unknown>;
}
/**
* Enhanced user context for agent harness
*
@@ -53,7 +41,6 @@ export interface MemoryChunk {
* - User identity and license
* - Active channel info (for multi-channel support)
* - Conversation state and history
* - RAG-retrieved relevant memories
* - Workspace state
*
* This object is passed to all agent nodes and tools.
@@ -71,9 +58,6 @@ export interface UserContext {
conversationHistory: BaseMessage[];
currentMessage?: string;
// RAG context
relevantMemories: MemoryChunk[];
// Workspace state
workspaceState: WorkspaceContext;
@@ -167,7 +151,6 @@ export function createUserContext(params: {
capabilities,
},
conversationHistory: [],
relevantMemories: [],
workspaceState: {
activeIndicators: [],
activeStrategies: [],

View File

@@ -1,219 +0,0 @@
# Dexorder AI Assistant System Prompt
You are a helpful AI assistant for Dexorder, an AI-first trading platform.
You help users research markets, develop indicators and strategies, and analyze trading data.
Your text responses should be markdown, using emojiis, color, and formatting to create a visually appealing response.
# User Information
**User License:** {{licenseType}}
**Available Features:**
{{features}}
# Platform Capabilities
Dexorder trading platform provides OHLC data at a 1-minute resolution and supports strategies that read one or more OHLC feeds. It also offers a wide range of built-in indicators and allows users to create custom indicators for advanced analysis. Custom strategies can be backtested and paper traded before live execution.
Dexorder does not support:
* tick-by-tick trading or high-frequency strategies.
* long-running computations like parameter optimizations or training machine learning models during live execution.
* portfolio optimization or trading strategies that require a large number of symbols.
* LLM calls inside strategy scripts — strategies must be deterministic and lightweight for backtesting to be reliable and repeatable. LLMs are slow, expensive, and introduce temperature-based non-determinism that breaks backtesting. (Walk-forward LLM integration via timer/data triggers is planned but not yet available.)
* TradFi data (equities, forex, bonds, options, etc.) — only crypto pricing data is available.
* Alternative data sources such as news feeds, Twitter/social sentiment, on-chain data, or economic calendars — these are not yet available.
Dexorder does support:
* backtesting strategies against historical data.
* multi-symbol comparisons.
* multi-timeframe analysis.
* custom indicators with plotting
* custom calculations and transformations.
* deep analysis and charting using Python libraries
If the user asks for a capability not provided by Dexorder, decline and explain our capabilities.
# Important Instructions
## Switching Chart Symbol or Timeframe
**IMPORTANT: When the user asks to switch, change, or update the chart symbol or timeframe, you MUST call `workspace_patch` directly. Do NOT use web_explore, do NOT delegate to the indicator tool.**
Call `workspace_patch` with `store_name = "chartState"` and the appropriate JSON patch:
To switch symbol only:
```json
[{ "op": "replace", "path": "/symbol", "value": "SOL/USDT.BINANCE" }]
```
To switch symbol and period (period is seconds: 60=1m, 300=5m, 900=15m, 3600=1h, 86400=1D):
```json
[
{ "op": "replace", "path": "/symbol", "value": "SOL/USDT.BINANCE" },
{ "op": "replace", "path": "/period", "value": 900 }
]
```
You already know this format — do not search for it. After patching, confirm the change to the user.
## Investment Advice
**NEVER** recommend any specific ticker, trade, or position. You may suggest mechanical adjustments or improvements to strategies, but you must **NEVER** offer an opinion on a specific trade or position. You are **NOT** a registered investment advisor.
## Task Delegation
- For ANY research questions, deep analysis, statistical analysis, charting requests, or market data queries that require computation, you MUST use the 'research' tool
- For ANYTHING related to indicators on the chart — reading, adding, removing, modifying, or creating custom indicators — you MUST use the 'indicator' tool
- For ANY request about trading strategies — writing, editing, backtesting, interpreting results, activating, deactivating, or monitoring — you MUST use the 'strategy' tool; NEVER write strategy Python code yourself
- NEVER write Python code directly in your responses to the user
- NEVER show code to the user — delegate to the research, indicator, or strategy tool instead
- NEVER attempt to do analysis yourself — let the subagents handle it
## Available Tools
### indicator
**Use this tool for all indicator-related requests.**
The indicator subagent manages the chart's indicators: it reads the current indicator set, adds or removes indicators, modifies parameters, and can create custom indicator scripts.
**ALWAYS use indicator for:**
- "What indicators do I have on the chart?" → read and describe current indicators
- "Show RSI" / "Add Bollinger Bands" → add indicators to chart
- "Change MACD fast period to 8" → modify indicator parameters
- "Remove all moving averages" → remove indicators
- "Create a custom volume-weighted RSI" → write custom indicator
- Any question about what an indicator means or how it's configured
- Recommending indicators for a given strategy
**Custom indicators vs. ad-hoc research scripts:**
When a user asks for a calculation (e.g. "volume-weighted RSI", "adaptive ATR", "sector relative strength"), prefer creating a **custom indicator** via this tool over writing a one-off pandas/Python script in the research tool. Custom indicators are better because:
1. **Reusable** — saved permanently and can be applied to any symbol at any time
2. **First-class UI** — appear in the chart's Indicator picker alongside built-in indicators
3. **Live chart display** — their values are plotted directly on the chart as the user browses
4. **Watchlist & trigger support** — can be used to filter symbols (watchlists) and fire alerts/triggers (coming soon)
Use the research tool for exploratory or one-off analysis. Use the indicator tool whenever the user wants to *track* or *reuse* a computed value.
**NEVER modify workspace indicators yourself** — always delegate to the indicator tool.
### web_explore
**Use this tool to search the web or academic databases.**
The web-explore subagent searches the web (or arXiv for academic topics), fetches relevant pages, and returns a markdown summary with cited sources.
**ALWAYS use web_explore for:**
- Questions about current events, news, or real-time information
- Documentation, tutorials, or how-to guides
- Academic papers, research findings, or scientific topics
- Any topic that requires up-to-date external sources
**NOT for market data or computation** — use the research tool for analysis, and get_chart_data for OHLC values.
### research
**This is your PRIMARY tool for data analysis, computation, and charting.**
Creates and runs Python research scripts via a specialized research subagent.
The subagent autonomously writes code, executes it, handles errors, and generates charts.
**ALWAYS use research for:**
- Any plotting, charting, or visualization requests
- Price action analysis and correlations
- Statistical analysis of market data
- Volume analysis and patterns
- Machine learning or predictive modeling
- Any data-intensive computations
- Multi-symbol comparisons
- Custom calculations or transformations
- Deep analysis requiring Python libraries (pandas, numpy, scipy, matplotlib, etc.)
**NOT for indicator management** — use the indicator tool for that.
**NEVER attempt to do analysis yourself in the chat.**
Let the research subagent write and execute the Python code.
Parameters:
- instruction: Natural language description of the analysis to perform (be specific!)
- name: A unique name for the research script (e.g., "BTC Weekly Analysis")
**Do NOT include any time range, history length, bar count, period size, resolution, or timestamp guidance in the instruction** — not as numbers, not as natural language ("3-6 months", "1 year", "sufficient data"), not at all. The research subagent has its own rules for selecting resolution and history window. If you add time guidance, the subagent will follow yours instead of its own (which uses much more data). Only pass time constraints if the user explicitly asked for a specific period (e.g. "last week", "show me 2023").
Example usage:
- User: "Does Friday price action correlate with Monday?"
- You: Call research tool with instruction="Analyze correlation between Friday and Monday price action during NY trading hours (9:30-4:00 ET)", name="Friday-Monday Correlation"
- WRONG: "...use hourly data and at least 3-6 months..." ← never add this
### strategy
**Use this tool for ALL trading strategy requests without exception.**
The strategy subagent handles the complete strategy lifecycle: writing PandasStrategy classes, running backtests, interpreting results, and activating/deactivating paper trading.
**ALWAYS use strategy for:**
- "Create a strategy that buys when RSI < 30" write a new strategy
- "Edit my momentum strategy to use a tighter stop" modify existing strategy
- "Backtest my RSI strategy over the last year" run backtest
- "How did this strategy perform on BTC?" interpret results
- "Activate my strategy for paper trading" start paper trading
- "What strategies are running?" list active strategies
- "Stop my momentum strategy" deactivate a strategy
- Any question about a strategy's PnL, trades, or performance
**NEVER call `backtest_strategy`, `activate_strategy`, `deactivate_strategy`, or `list_active_strategies` directly** always go through the strategy tool.
**Custom indicators in strategies:**
When writing a new strategy, the strategy subagent will first check for existing custom indicators via `python_list(category="indicator")`. Prefer using custom indicators (via `ta.custom_*`) over computing signals inline this promotes reuse and gives users better visibility into strategy components. If a needed indicator doesn't exist yet, the strategy subagent will create it first via the indicator workflow.
### backtest_strategy
*(Called internally by the strategy tool do not call this directly.)*
Runs a saved trading strategy against historical OHLC data using the Nautilus Trader backtesting engine.
Returns structured performance metrics including trade list, Sortino/Calmar ratios, and equity curve.
### list_active_strategies
*(Called internally by the strategy tool do not call this directly.)*
Lists all currently active (live or paper) strategies and their status.
### python_list
List existing scripts in a category ("strategy", "indicator", or "research").
Use this before calling the research tool to check whether a relevant script already exists.
If one does, pass its exact name to the research tool so the subagent updates it rather than creating a new one.
The strategy tool uses this internally to check strategy names before backtesting.
### symbol-lookup
Look up trading symbols and get metadata.
Use this when users mention tickers or need symbol information.
**Always use symbol_lookup to resolve a proper ticker before passing it to the research or get-chart-data tools.** Symbols must be in `SYMBOL.EXCHANGE` format (e.g., `BTC/USDT.BINANCE`). If the user says "ETHUSDT", "ETH", or any ambiguous ticker, resolve it first with symbol_lookup so the correct formatted ticker is passed downstream.
### get-chart-data
**IMPORTANT: This is for QUICK, CASUAL information ONLY. This tool just returns raw data - it does NOT create charts or plots.**
Use ONLY when the user wants to:
- Quickly glance at recent price data
- Get a rough sense of current market conditions
- Check basic OHLC values
- Retrieve raw data without any processing
**DO NOT use get-chart-data for:**
- Plotting, charting, or any visualization
- Statistical analysis or correlations
- Calculations or data transformations
- Multi-symbol comparisons
- Volume analysis or patterns
- Any non-trivial computation
- Technical indicators or overlays
**For anything beyond casual data retrieval, use the 'research' tool instead.**
The research tool can create proper analysis with charts, statistics, and computations.
**Time Parameters:** Both from_time and to_time accept:
- Unix timestamps as numbers (e.g., 1774126800)
- Unix timestamps as strings (e.g., "1774126800")
- Date strings (e.g., "2 days ago", "2024-01-01", "yesterday")
## Workspace Tools (MCP)
You also have access to workspace persistence tools via MCP:
- **workspace_read(store_name)**: Read a workspace store (returns JSON object)
- **workspace_write(store_name, data)**: Write/overwrite a workspace store
- **workspace_patch(store_name, patch)**: Apply JSON patch to a workspace store
These are useful for persisting user preferences, analysis results, and custom data across sessions.
For the `indicators` store specifically, always use the indicator tool rather than calling workspace tools directly.

View File

@@ -0,0 +1,4 @@
export { WikiLoader } from './wiki-loader.js';
export type { WikiFrontmatter, WikiPage, SpawnContext, VirtualPageFn } from './wiki-loader.js';
export { SpawnService } from './spawn-service.js';
export type { SpawnInput } from './spawn-service.js';

View File

@@ -0,0 +1,222 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { SystemMessage, HumanMessage } from '@langchain/core/messages';
/** All platform tool names available to every subagent. */
const ALL_PLATFORM_TOOLS = ['SymbolLookup', 'GetChartData', 'WebSearch', 'FetchPage', 'ArxivSearch'];
import type { FastifyBaseLogger } from 'fastify';
import { createReactAgent } from '@langchain/langgraph/prebuilt';
import type { HarnessEvent, SubagentChunkEvent, SubagentThinkingEvent } from '../harness-events.js';
import { getToolLabel } from '../tool-labels.js';
import type { MCPClientConnector } from '../mcp-client.js';
import type { WorkspaceManager } from '../../workspace/workspace-manager.js';
import type { ToolRegistry } from '../../tools/tool-registry.js';
import type { MCPToolInfo } from '../../tools/mcp/mcp-tool-wrapper.js';
import { WikiLoader, type SpawnContext } from './wiki-loader.js';
export interface SpawnInput {
agentName: string;
instruction: string;
mcpClient?: MCPClientConnector;
availableMCPTools?: MCPToolInfo[];
workspaceManager?: WorkspaceManager;
signal?: AbortSignal;
}
/**
* SpawnService creates isolated subagent invocations on demand.
*
* Each call to streamSpawn():
* 1. Loads the agent's wiki page (frontmatter + body)
* 2. Builds a SystemMessage from base prompt + agent body + static imports
* 3. Loads dynamic imports as a HumanMessage prefix (never cached)
* 4. Resolves tools from the frontmatter tool lists
* 5. Creates a fresh createReactAgent and streams events
*
* This replaces the old per-subagent BaseSubagent pattern with a stateless
* factory that reads configuration from markdown frontmatter.
*/
export class SpawnService {
constructor(
private readonly wikiLoader: WikiLoader,
private readonly toolRegistry: ToolRegistry,
private readonly modelFn: (maxTokens?: number) => Promise<BaseChatModel>,
private readonly logger: FastifyBaseLogger,
) {}
/**
* Stream events from a subagent invocation.
* Yields HarnessEvents (subagent_chunk, subagent_thinking, subagent_tool_call).
* Returns the final text result (or JSON with images when spawnsImages is set).
*/
async *streamSpawn(input: SpawnInput): AsyncGenerator<HarnessEvent, string> {
const { agentName, instruction, mcpClient, availableMCPTools, workspaceManager, signal } = input;
this.logger.info({ agentName, instruction: instruction.substring(0, 100) }, 'SpawnService: starting');
// Load agent wiki page
const agentPage = await this.wikiLoader.loadAgentPage(agentName);
const fm = agentPage.frontmatter;
// Build SpawnContext for virtual pages
const ctx: SpawnContext = { mcpClient, workspaceManager };
// Load base prompt (index.md + tools.md) — stable, tier-1 cacheable
const basePrompt = await this.wikiLoader.getBasePrompt();
// Load static imports (appended to agent body, tier-2 cacheable together)
const staticImports = fm.static_imports?.length
? await this.wikiLoader.loadStaticImports(fm.static_imports)
: '';
// Build the static SystemMessage (base + agent body + static imports)
const staticContent = [basePrompt, agentPage.body, staticImports]
.filter(Boolean)
.join('\n\n---\n\n');
const systemMessage = new SystemMessage(staticContent);
// Load dynamic imports (never cached, injected as a HumanMessage prefix)
const dynamicContent = fm.dynamic_imports?.length
? await this.wikiLoader.loadDynamicImports(fm.dynamic_imports, ctx)
: '';
// Build HumanMessage: dynamic context (if any) + instruction
const humanContent = dynamicContent
? `${dynamicContent}\n\n---\n\n${instruction}`
: instruction;
const humanMessage = new HumanMessage(humanContent);
// Set up image capture array (per-call, not shared mutable state)
const imageCapture: Array<{ data: string; mimeType: string }> = [];
const onImage = fm.spawnsImages
? (img: { data: string; mimeType: string }) => imageCapture.push(img)
: undefined;
const onWorkspaceMutation = workspaceManager
? (storeName: string, newState: unknown) => {
workspaceManager.setState(storeName, newState).catch((err: Error) => {
this.logger.error({ err, storeName }, 'Failed to sync workspace after spawn mutation');
});
}
: undefined;
// All subagents get all platform tools and all MCP tools.
// Per-agent tool restrictions via frontmatter are no longer used.
const tools = await this.toolRegistry.resolveTools(
ALL_PLATFORM_TOOLS,
['*'],
mcpClient,
availableMCPTools,
workspaceManager,
onImage,
onWorkspaceMutation,
);
this.logger.info(
{ agentName, toolCount: tools.length, toolNames: tools.map(t => t.name) },
'SpawnService: tools resolved'
);
// Create model (respecting per-agent maxTokens)
const model = await this.modelFn(fm.maxTokens);
// Create a fresh ReactAgent for this invocation
const agent = createReactAgent({
llm: model,
tools,
prompt: systemMessage,
});
const recursionLimit = fm.recursionLimit ?? 30;
// Emit an initial indicator so the UI shows the subagent has started
yield { type: 'subagent_tool_call', agentName, toolName: 'Thinking...', label: 'Thinking...' };
const stream = agent.stream(
{ messages: [humanMessage] },
{ streamMode: ['messages', 'updates'], recursionLimit, signal }
);
let finalText = '';
for await (const [mode, data] of await stream) {
if (signal?.aborted) break;
if (mode === 'messages') {
for (const chunk of SpawnService.extractStreamChunks(data, agentName)) {
yield chunk;
}
} else if (mode === 'updates') {
if ((data as any).agent?.messages) {
for (const msg of (data as any).agent.messages as any[]) {
if (msg.tool_calls?.length) {
for (const tc of msg.tool_calls) {
yield {
type: 'subagent_tool_call',
agentName,
toolName: tc.name,
label: getToolLabel(tc.name),
};
}
} else {
const content = SpawnService.extractFinalText(msg);
if (content) finalText = content;
}
}
}
}
}
this.logger.info(
{ agentName, textLength: finalText.length, imageCount: imageCapture.length },
'SpawnService: finished'
);
// If this agent captures images, return JSON with text + images
if (fm.spawnsImages && imageCapture.length > 0) {
return JSON.stringify({ text: finalText, images: imageCapture });
}
return finalText;
}
/**
* Extract subagent_chunk / subagent_thinking events from a LangGraph `messages` stream datum.
*/
static extractStreamChunks(
data: unknown,
agentName: string,
): Array<SubagentChunkEvent | SubagentThinkingEvent> {
const msg = Array.isArray(data) ? (data as unknown[])[0] : data;
const content = (msg as any)?.content;
if (typeof content === 'string') {
return content ? [{ type: 'subagent_chunk', agentName, content }] : [];
}
if (Array.isArray(content)) {
const chunks: Array<SubagentChunkEvent | SubagentThinkingEvent> = [];
for (const block of content as any[]) {
if (block?.type === 'thinking' && typeof block.thinking === 'string' && block.thinking) {
chunks.push({ type: 'subagent_thinking', agentName, content: block.thinking });
} else if (block?.type === 'text' && typeof block.text === 'string' && block.text) {
chunks.push({ type: 'subagent_chunk', agentName, content: block.text });
}
}
return chunks;
}
return [];
}
/**
* Extract the final text from an `updates`-mode agent message.
*/
static extractFinalText(msg: any): string {
if (typeof msg?.content === 'string') return msg.content;
if (Array.isArray(msg?.content)) {
return (msg.content as any[])
.filter((b: any) => b?.type === 'text' && typeof b.text === 'string')
.map((b: any) => b.text as string)
.join('');
}
return '';
}
}

View File

@@ -0,0 +1,204 @@
import { readFile, readdir } from 'fs/promises';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import yaml from 'js-yaml';
import type { MCPClientConnector } from '../mcp-client.js';
import type { WorkspaceManager } from '../../workspace/workspace-manager.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Knowledge directory: gateway/knowledge/ (3 levels up from harness/spawn/)
const KNOWLEDGE_DIR = join(__dirname, '..', '..', '..', 'knowledge');
// Prompt directory: gateway/prompt/ — base prompts and agent system prompts
// These are injected directly into context and are not surfaced via memory_lookup.
const PROMPT_DIR = join(__dirname, '..', '..', '..', 'prompt');
export interface WikiFrontmatter {
maxTokens?: number;
recursionLimit?: number;
spawnsImages?: boolean;
static_imports?: string[];
dynamic_imports?: string[];
}
export interface WikiPage {
frontmatter: WikiFrontmatter;
body: string;
}
export interface SpawnContext {
mcpClient?: MCPClientConnector;
workspaceManager?: WorkspaceManager;
}
export type VirtualPageFn = (ctx: SpawnContext) => Promise<string>;
/**
* WikiLoader loads markdown knowledge pages from disk, caches them in memory,
* and supports virtual pages that are generated dynamically at spawn time.
*
* Page resolution order: virtual registry first, then disk.
*/
export class WikiLoader {
private readonly pageCache = new Map<string, WikiPage>();
private readonly virtualPages = new Map<string, VirtualPageFn>();
private basePromptCache: string | null = null;
/**
* Register a virtual page that generates markdown dynamically.
* Virtual pages are never cached — they run fresh every time they are imported.
*/
registerVirtual(name: string, fn: VirtualPageFn): void {
this.virtualPages.set(name, fn);
}
/**
* Load and cache a wiki page from disk.
* Looks in KNOWLEDGE_DIR only — prompt files are not surfaced here.
* Returns null if the file does not exist.
*/
async loadPage(name: string): Promise<WikiPage | null> {
if (this.pageCache.has(name)) {
return this.pageCache.get(name)!;
}
const filePath = join(KNOWLEDGE_DIR, `${name}.md`);
let content: string;
try {
content = await readFile(filePath, 'utf-8');
} catch {
return null;
}
const page = this.parsePage(content);
this.pageCache.set(name, page);
return page;
}
/**
* Load a prompt file from PROMPT_DIR (never exposed via memory_lookup).
* Returns null if the file does not exist.
*/
private async loadPromptPage(name: string): Promise<WikiPage | null> {
const cacheKey = `__prompt__${name}`;
if (this.pageCache.has(cacheKey)) {
return this.pageCache.get(cacheKey)!;
}
const filePath = join(PROMPT_DIR, `${name}.md`);
let content: string;
try {
content = await readFile(filePath, 'utf-8');
} catch {
return null;
}
const page = this.parsePage(content);
this.pageCache.set(cacheKey, page);
return page;
}
/**
* Load an agent-specific page from PROMPT_DIR (throws if not found).
*/
async loadAgentPage(agentName: string): Promise<WikiPage> {
const page = await this.loadPromptPage(`agent-${agentName}`);
if (!page) {
throw new Error(`Agent prompt page not found: prompt/agent-${agentName}.md`);
}
return page;
}
/**
* Load static imports (real .md files) and concatenate their bodies.
* Results are cached via individual page caches.
*/
async loadStaticImports(names: string[]): Promise<string> {
const parts: string[] = [];
for (const name of names) {
const page = await this.loadPage(name);
if (page) {
parts.push(page.body);
}
}
return parts.join('\n\n---\n\n');
}
/**
* Load dynamic imports (virtual pages or disk fallback, never cached).
* Failures are non-fatal and logged to stderr.
*/
async loadDynamicImports(names: string[], ctx: SpawnContext): Promise<string> {
const parts: string[] = [];
for (const name of names) {
const fn = this.virtualPages.get(name);
if (fn) {
try {
const content = await fn(ctx);
if (content) parts.push(content);
} catch (err) {
console.warn(`[WikiLoader] Virtual page '${name}' failed:`, err);
}
} else {
// Fall back to disk (dynamic import, so bypass the in-memory cache)
const filePath = join(KNOWLEDGE_DIR, `${name}.md`);
try {
const raw = await readFile(filePath, 'utf-8');
const page = this.parsePage(raw);
parts.push(page.body);
} catch {
console.warn(`[WikiLoader] Dynamic page '${name}' not found on disk or in registry`);
}
}
}
return parts.join('\n\n---\n\n');
}
/**
* Return the base prompt text: prompt/index.md body + prompt/tools.md body, concatenated.
* Result is cached for the lifetime of this WikiLoader instance.
*/
async getBasePrompt(): Promise<string> {
if (this.basePromptCache !== null) return this.basePromptCache;
const [index, tools] = await Promise.all([
this.loadPromptPage('index'),
this.loadPromptPage('tools'),
]);
const parts: string[] = [];
if (index) parts.push(index.body);
if (tools) parts.push(tools.body);
this.basePromptCache = parts.join('\n\n');
return this.basePromptCache;
}
/**
* List all .md file names available in the knowledge directory (without extension).
*/
async listPages(): Promise<string[]> {
try {
const files = await readdir(KNOWLEDGE_DIR);
return files
.filter(f => f.endsWith('.md'))
.map(f => f.slice(0, -3));
} catch {
return [];
}
}
/**
* Parse a markdown file with optional YAML frontmatter.
*/
private parsePage(content: string): WikiPage {
const fmMatch = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n([\s\S]*)$/);
if (fmMatch) {
const frontmatter = (yaml.load(fmMatch[1]) as WikiFrontmatter) ?? {};
const body = fmMatch[2].trim();
return { frontmatter, body };
}
return { frontmatter: {}, body: content.trim() };
}
}

View File

@@ -1,273 +0,0 @@
# Subagents
Specialized agents with dedicated knowledge bases and system prompts.
## What are Subagents?
Subagents are focused AI agents designed for specific tasks. Unlike general-purpose agents, each subagent has:
- **Specialized knowledge**: Multi-file memory directory with domain-specific info
- **Custom system prompt**: Tailored instructions for the task
- **Model override**: Can use different models than the main agent
- **Capability tags**: Declare what they can do
## Directory Structure
```
subagents/
├── base-subagent.ts # Base class
├── {subagent-name}/
│ ├── config.yaml # Configuration
│ ├── system-prompt.md # System instructions
│ ├── memory/ # Knowledge base (multi-file)
│ │ ├── file1.md
│ │ ├── file2.md
│ │ └── file3.md
│ └── index.ts # Implementation
└── README.md # This file
```
## Creating a New Subagent
### 1. Create Directory Structure
```bash
mkdir -p subagents/my-subagent/memory
```
### 2. Create config.yaml
```yaml
name: my-subagent
description: What it does
# Model override (optional)
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 4096
# Memory files to load
memoryFiles:
- guidelines.md
- examples.md
- best-practices.md
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities
capabilities:
- capability1
- capability2
```
### 3. Write system-prompt.md
```markdown
# My Subagent System Prompt
You are an expert in [domain].
## Your Role
[What the subagent does]
## Approach
1. [Step 1]
2. [Step 2]
## Output Format
[How to structure responses]
```
### 4. Create Memory Files
Split knowledge into logical files:
```markdown
<!-- memory/guidelines.md -->
# Guidelines
## What to Check
- Thing 1
- Thing 2
## What to Avoid
- Anti-pattern 1
- Anti-pattern 2
```
### 5. Implement Subagent
```typescript
// index.ts
import { BaseSubagent, SubagentConfig, SubagentContext } from '../base-subagent.js';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { FastifyBaseLogger } from 'fastify';
export class MySubagent extends BaseSubagent {
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger) {
super(config, model, logger);
}
async execute(context: SubagentContext, input: string): Promise<string> {
this.logger.info({ subagent: this.getName() }, 'Executing subagent');
const messages = this.buildMessages(context, input);
const response = await this.model.invoke(messages);
return response.content as string;
}
}
// Factory function
export async function createMySubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string
): Promise<MySubagent> {
const { readFile } = await import('fs/promises');
const { join } = await import('path');
const yaml = await import('js-yaml');
const configPath = join(basePath, 'config.yaml');
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as SubagentConfig;
const subagent = new MySubagent(config, model, logger);
await subagent.initialize(basePath);
return subagent;
}
```
### 6. Export from index.ts
```typescript
// subagents/index.ts
export { MySubagent, createMySubagent } from './my-subagent/index.js';
```
## Using Subagents
### Direct Usage
```typescript
import { createMySubagent } from './harness/subagents';
const subagent = await createMySubagent(model, logger, basePath);
const result = await subagent.execute({ userContext }, 'input text');
```
### In Workflows
```typescript
const analyzeNode = async (state) => {
const result = await mySubagent.execute(
{ userContext: state.userContext },
state.input
);
return { analysis: result };
};
```
### With Routing
Add to `config/subagent-routing.yaml`:
```yaml
subagents:
my-subagent:
enabled: true
path: src/harness/subagents/my-subagent
triggers:
keywords:
- "keyword1"
- "keyword2"
patterns:
- "pattern.*regex"
priority: medium
timeout: 30000
```
## Multi-File Memory Benefits
### Why Split Memory?
1. **Organization**: Easier to maintain separate concerns
2. **Versioning**: Update specific files without touching others
3. **Collaboration**: Multiple people can work on different files
4. **Context Management**: LLM sees structured knowledge
### Example Split
For a code reviewer:
- `review-guidelines.md`: What to check
- `common-patterns.md`: Good/bad examples
- `best-practices.md`: Industry standards
All files are loaded and concatenated at initialization.
## Best Practices
### Memory Files
- **Be Specific**: Include concrete examples, not just theory
- **Use Markdown**: Tables, lists, code blocks for clarity
- **Keep Focused**: Each file should have a clear purpose
- **Update Regularly**: Improve based on real usage
### System Prompts
- **Define Role Clearly**: "You are a [specific role]"
- **Specify Output Format**: Show examples of expected output
- **Set Constraints**: What to do, what not to do
- **Give Context**: Why this subagent exists
### Configuration
- **Model Selection**: Use faster models for simple tasks
- **Temperature**: Lower (0.2-0.3) for precise work, higher (0.7-0.9) for creative
- **Capabilities**: Tag accurately for routing
## Available Subagents
### code-reviewer
Reviews trading strategy code for bugs, performance, and best practices.
**Capabilities:**
- `static_analysis`
- `performance_review`
- `security_audit`
- `code_quality`
**Memory:**
- Review guidelines
- Common patterns
- Best practices
### risk-analyzer (TODO)
Analyzes trading risk and exposure.
### market-analyst (TODO)
Provides market analysis and insights.
## Troubleshooting
### Memory Files Not Loading
- Check file paths in config.yaml
- Ensure files exist in memory/ directory
- Check file permissions
### Subagent Not Being Routed
- Verify triggers in subagent-routing.yaml
- Check priority (higher priority matches first)
- Ensure enabled: true
### Model Errors
- Verify API keys in environment
- Check model override is valid
- Ensure token limits not exceeded

View File

@@ -1,447 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { SystemMessage, HumanMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify';
import type { UserContext } from '../memory/session-context.js';
import type { MCPClientConnector } from '../mcp-client.js';
import type { DynamicStructuredTool } from '@langchain/core/tools';
import { readFile } from 'fs/promises';
import { join } from 'path';
import type { HarnessEvent, SubagentChunkEvent, SubagentThinkingEvent } from '../harness-events.js';
import { createReactAgent } from '@langchain/langgraph/prebuilt';
import yaml from 'js-yaml';
/**
* Subagent configuration (loaded from config.yaml)
*/
export interface SubagentConfig {
name: string;
model?: string; // Override default model
temperature?: number;
maxTokens?: number;
memoryFiles: string[]; // Memory files to load from memory/ directory
capabilities: string[];
systemPromptFile?: string; // Path to system-prompt.md
tools?: {
platform?: string[]; // Platform tool names
mcp?: string[]; // MCP tool patterns/names
};
}
/**
* Subagent execution context
*/
export interface SubagentContext {
userContext: UserContext;
conversationHistory?: BaseMessage[];
}
/**
* Base subagent class
*
* Subagents are specialized agents with:
* - Dedicated system prompts
* - Multi-file memory (guidelines, patterns, best practices)
* - Optional model override
* - Specific capabilities
*
* Structure:
* subagents/
* research/
* config.yaml
* system-prompt.md
* index.ts
*/
export abstract class BaseSubagent {
protected logger: FastifyBaseLogger;
protected model: BaseChatModel;
protected config: SubagentConfig;
protected systemPrompt?: string;
protected memoryContext: string[] = [];
protected mcpClient?: MCPClientConnector;
protected tools: DynamicStructuredTool[] = [];
constructor(
config: SubagentConfig,
model: BaseChatModel,
logger: FastifyBaseLogger,
mcpClient?: MCPClientConnector,
tools?: DynamicStructuredTool[]
) {
this.config = config;
this.model = model;
this.logger = logger;
this.mcpClient = mcpClient;
this.tools = tools || [];
}
/** Per-subagent recursion limit for the LangGraph agent loop */
protected abstract getRecursionLimit(): number;
/** Fallback text returned when the agent produces no output */
protected abstract getFallbackText(): string;
/** Whether an MCP client is required; defaults true. Override to false for tool-only subagents. */
protected requiresMCPClient(): boolean {
return true;
}
/**
* Build the system message and final human message for agent invocation.
* Subclasses may override to augment the system message (e.g. injecting dynamic context).
*/
protected async buildSystemMessage(
context: SubagentContext,
instruction: string
): Promise<{ systemMessage: SystemMessage; humanMessage: BaseMessage }> {
const msgs = this.buildMessages(context, instruction);
return {
systemMessage: msgs[0] as SystemMessage,
humanMessage: msgs[msgs.length - 1],
};
}
/**
* Shared execute body. Subclasses that need pre/post hooks (e.g. image capture)
* override execute() and call this method internally.
*/
protected async executeAgent(context: SubagentContext, instruction: string): Promise<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
instruction: instruction.substring(0, 200),
toolCount: this.tools.length,
toolNames: this.tools.map(t => t.name),
},
`${this.config.name} subagent starting`
);
if (this.requiresMCPClient() && !this.hasMCPClient()) {
throw new Error(`MCP client not available for ${this.config.name} subagent`);
}
if (this.requiresMCPClient() && this.tools.length === 0) {
this.logger.warn(`${this.config.name} subagent has no tools`);
}
const { systemMessage, humanMessage } = await this.buildSystemMessage(context, instruction);
const agent = createReactAgent({
llm: this.model,
tools: this.tools,
prompt: systemMessage,
});
const result = await agent.invoke(
{ messages: [humanMessage] },
{ recursionLimit: this.getRecursionLimit() }
);
const allMessages: any[] = result.messages ?? [];
this.logger.info(
{ messageCount: allMessages.length },
`${this.config.name} subagent graph completed`
);
const lastAI = [...allMessages].reverse().find(
(m: any) => m.constructor?.name === 'AIMessage' || m._getType?.() === 'ai'
);
const finalText = lastAI
? (typeof lastAI.content === 'string' ? lastAI.content : JSON.stringify(lastAI.content))
: this.getFallbackText();
this.logger.info({ textLength: finalText.length }, `${this.config.name} subagent finished`);
return finalText;
}
/**
* Execute subagent. Delegates to executeAgent by default;
* subclasses with side-effects (image capture, etc.) override this.
*/
async execute(context: SubagentContext, instruction: string): Promise<string> {
return this.executeAgent(context, instruction);
}
/**
* Shared streamEvents loop. Subclasses that need pre/post hooks override streamEvents()
* and delegate here via `yield* this.streamEventsCore(...)`.
*/
protected async *streamEventsCore(
context: SubagentContext,
instruction: string,
signal?: AbortSignal,
): AsyncGenerator<HarnessEvent, string> {
this.logger.info({ subagent: this.getName() }, 'streamEvents starting');
if (this.requiresMCPClient() && !this.hasMCPClient()) {
throw new Error(`MCP client not available for ${this.config.name} subagent`);
}
const { systemMessage, humanMessage } = await this.buildSystemMessage(context, instruction);
const agent = createReactAgent({
llm: this.model,
tools: this.tools,
prompt: systemMessage,
});
const stream = agent.stream(
{ messages: [humanMessage] },
{ streamMode: ['messages', 'updates'], recursionLimit: this.getRecursionLimit(), signal }
);
let finalText = '';
for await (const [mode, data] of await stream) {
if (signal?.aborted) break;
if (mode === 'messages') {
for (const chunk of BaseSubagent.extractStreamChunks(data, this.config.name)) {
yield chunk;
}
} else if (mode === 'updates') {
if ((data as any).agent?.messages) {
for (const msg of (data as any).agent.messages as any[]) {
if (msg.tool_calls?.length) {
for (const tc of msg.tool_calls) {
yield { type: 'subagent_tool_call', agentName: this.config.name, toolName: tc.name, label: tc.name };
}
} else {
const content = BaseSubagent.extractFinalText(msg);
if (content) finalText = content;
}
}
}
}
}
this.logger.info({ textLength: finalText.length }, 'streamEvents finished');
return finalText;
}
/**
* Stream typed HarnessEvents during execution. Delegates to streamEventsCore by default.
* Subclasses with pre/post logic override this and use `yield* this.streamEventsCore(...)`.
*/
async *streamEvents(
context: SubagentContext,
input: string,
signal?: AbortSignal,
): AsyncGenerator<HarnessEvent, string> {
return yield* this.streamEventsCore(context, input, signal);
}
/**
* Stream execution (optional, default to non-streaming)
*/
async *stream(
context: SubagentContext,
input: string
): AsyncGenerator<string> {
const result = await this.execute(context, input);
yield result;
}
/**
* Extract subagent_chunk / subagent_thinking events from a LangGraph `messages` stream datum.
*
* LangGraph emits `[message_chunk, metadata]` tuples in `messages` mode. The message content
* can be a plain string (normal text token) or an array of content blocks (extended thinking
* responses with `{type:"thinking", thinking:"..."}` and `{type:"text", text:"..."}`).
*/
static extractStreamChunks(
data: unknown,
agentName: string,
): Array<SubagentChunkEvent | SubagentThinkingEvent> {
const msg = Array.isArray(data) ? (data as unknown[])[0] : data;
const content = (msg as any)?.content;
if (typeof content === 'string') {
return content ? [{ type: 'subagent_chunk', agentName, content }] : [];
}
if (Array.isArray(content)) {
const chunks: Array<SubagentChunkEvent | SubagentThinkingEvent> = [];
for (const block of content as any[]) {
if (block?.type === 'thinking' && typeof block.thinking === 'string' && block.thinking) {
chunks.push({ type: 'subagent_thinking', agentName, content: block.thinking });
} else if (block?.type === 'text' && typeof block.text === 'string' && block.text) {
chunks.push({ type: 'subagent_chunk', agentName, content: block.text });
}
}
return chunks;
}
return [];
}
/**
* Extract the final text from an `updates`-mode agent message.
* Handles both plain string content and array content blocks (extended thinking).
*/
static extractFinalText(msg: any): string {
if (typeof msg?.content === 'string') return msg.content;
if (Array.isArray(msg?.content)) {
return (msg.content as any[])
.filter((b: any) => b?.type === 'text' && typeof b.text === 'string')
.map((b: any) => b.text as string)
.join('');
}
return '';
}
/**
* Initialize subagent: load system prompt and memory files
*/
async initialize(basePath: string): Promise<void> {
this.logger.info({ subagent: this.config.name }, 'Initializing subagent');
// Load system prompt
if (this.config.systemPromptFile) {
const promptPath = join(basePath, this.config.systemPromptFile);
this.systemPrompt = await this.loadFile(promptPath);
}
// Load memory files
for (const memoryFile of this.config.memoryFiles) {
const memoryPath = join(basePath, 'memory', memoryFile);
const content = await this.loadFile(memoryPath);
if (content) {
this.memoryContext.push(`# ${memoryFile}\n\n${content}`);
}
}
this.logger.info(
{
subagent: this.config.name,
memoryFiles: this.config.memoryFiles.length,
systemPromptLoaded: !!this.systemPrompt,
},
'Subagent initialized'
);
}
/**
* Load config.yaml from basePath and parse it.
*/
static async loadConfig(basePath: string): Promise<SubagentConfig> {
const configContent = await readFile(join(basePath, 'config.yaml'), 'utf-8');
return yaml.load(configContent) as SubagentConfig;
}
/**
* Build messages with system prompt and memory context
*/
protected buildMessages(
context: SubagentContext,
currentInput: string
): BaseMessage[] {
const messages: BaseMessage[] = [];
// System prompt with memory context
let systemContent = this.systemPrompt || `You are ${this.config.name}.`;
if (this.memoryContext.length > 0) {
systemContent += '\n\n# Knowledge Base\n\n';
systemContent += this.memoryContext.join('\n\n---\n\n');
}
messages.push(new SystemMessage(systemContent));
// Add conversation history if provided
if (context.conversationHistory && context.conversationHistory.length > 0) {
messages.push(...context.conversationHistory);
}
// Add current input
messages.push(new HumanMessage(currentInput));
return messages;
}
/**
* Load file content
*/
private async loadFile(path: string): Promise<string | undefined> {
try {
const content = await readFile(path, 'utf-8');
return content;
} catch (error) {
this.logger.warn({ error, path }, 'Failed to load file');
return undefined;
}
}
/**
* Get subagent name
*/
getName(): string {
return this.config.name;
}
/**
* Get subagent capabilities
*/
getCapabilities(): string[] {
return this.config.capabilities;
}
/**
* Check if subagent has a specific capability
*/
hasCapability(capability: string): boolean {
return this.config.capabilities.includes(capability);
}
/**
* Call a tool on the user's MCP server
*
* @param name Tool name
* @param args Tool arguments
* @returns Tool result
* @throws Error if MCP client not available or tool call fails
*/
protected async callMCPTool(name: string, args: Record<string, unknown>): Promise<unknown> {
if (!this.mcpClient) {
throw new Error('MCP client not available for this subagent');
}
try {
this.logger.debug({ tool: name, args }, 'Calling MCP tool from subagent');
const result = await this.mcpClient.callTool(name, args);
return result;
} catch (error) {
this.logger.error({ error, tool: name }, 'MCP tool call failed');
throw error;
}
}
/**
* Check if MCP client is available
*/
protected hasMCPClient(): boolean {
return this.mcpClient !== undefined;
}
/**
* Get tools available to this subagent
*/
getTools(): DynamicStructuredTool[] {
return this.tools;
}
/**
* Set tools for this subagent (used during initialization)
*/
setTools(tools: DynamicStructuredTool[]): void {
this.tools = tools;
this.logger.debug(
{
subagent: this.config.name,
toolCount: tools.length,
toolNames: tools.map(t => t.name),
},
'Tools set for subagent'
);
}
}

View File

@@ -1,18 +0,0 @@
// Subagents exports
export {
BaseSubagent,
type SubagentConfig,
type SubagentContext,
} from './base-subagent.js';
export {
ResearchSubagent,
createResearchSubagent,
type ResearchResult,
} from './research/index.js';
export {
StrategySubagent,
createStrategySubagent,
} from './strategy/index.js';

View File

@@ -1,30 +0,0 @@
# Indicator Subagent Configuration
name: indicator
description: Manages TradingView indicators in the workspace and creates custom indicator scripts
# Model configuration
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 8192
# No memory files — all indicator knowledge is inline in the system prompt
memoryFiles: []
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- indicator_management
- workspace_manipulation
- custom_indicators
# Tools available to this subagent
tools:
platform: []
mcp:
- workspace_read # Read current indicators store
- workspace_patch # Add/update/remove indicators (no workspace_write — patch only)
- category_* # Write/edit/read/list custom indicator scripts
- evaluate_indicator # Evaluate any indicator against real OHLC data

View File

@@ -1,34 +0,0 @@
import { BaseSubagent } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
import type { MCPClientConnector } from '../../mcp-client.js';
/**
* Indicator Subagent
*
* Specialized agent for managing TradingView indicators in the workspace.
* Uses workspace_read/patch MCP tools to:
* - Read, add, modify, and remove indicators from the indicators store
* - Create custom indicator scripts via python_* tools
* - Validate indicators using the evaluate_indicator tool
*/
export class IndicatorSubagent extends BaseSubagent {
protected getRecursionLimit() { return 25; }
protected getFallbackText() { return 'Indicator update completed.'; }
}
/**
* Factory function to create and initialize IndicatorSubagent
*/
export async function createIndicatorSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
mcpClient?: MCPClientConnector,
tools?: any[]
): Promise<IndicatorSubagent> {
const config = await BaseSubagent.loadConfig(basePath);
const subagent = new IndicatorSubagent(config, model, logger, mcpClient, tools);
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -1,2 +0,0 @@
# Auto-generated at build time by bin/build
api-source/

View File

@@ -1,32 +0,0 @@
# Research Subagent Configuration
name: research
description: Creates and runs Python research scripts for market analysis, charting, and statistical analysis
# Model configuration
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 8192
# Memory files to load from memory/ directory
memoryFiles:
- api-reference.md
- usage-examples.md
- pandas-ta-reference.md
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- research_scripting
- data_analysis
- charting
- statistical_analysis
# Tools available to this subagent
tools:
platform: [] # No platform tools needed (works at script level)
mcp:
- category_* # All category_ tools (write, edit, read, list)
- execute_research # Script execution tool

View File

@@ -1,255 +0,0 @@
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { SystemMessage } from '@langchain/core/messages';
import type { BaseMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify';
import type { MCPClientConnector } from '../../mcp-client.js';
import type { HarnessEvent } from '../../harness-events.js';
/**
* Result from research subagent execution
*/
export interface ResearchResult {
text: string;
images: Array<{
data: string;
mimeType: string;
}>;
}
/**
* Research Subagent
*
* Specialized agent for creating and running Python research scripts.
* Uses python_* MCP tools to:
* - Create/edit research scripts with DataAPI and ChartingAPI
* - Execute scripts and capture matplotlib charts
* - Iterate on errors with autonomous coding loop
*
* The subagent has direct access to MCP tools and handles the full
* coding loop without requiring skill-level orchestration.
*
* Images from script execution are extracted and returned separately
* but are NOT loaded into the LLM context (pass-through only).
*/
export class ResearchSubagent extends BaseSubagent {
private lastImages: Array<{data: string; mimeType: string}> = [];
// Shared with the MCP tool wrappers — populated as tools run, cleared per execution
private imageCapture: Array<{data: string; mimeType: string}> = [];
constructor(
config: SubagentConfig,
model: BaseChatModel,
logger: FastifyBaseLogger,
mcpClient?: MCPClientConnector,
tools?: any[]
) {
super(config, model, logger, mcpClient, tools);
}
protected getRecursionLimit() { return 40; }
protected getFallbackText() { return 'Research completed.'; }
setImageCapture(capture: Array<{data: string; mimeType: string}>): void {
this.imageCapture = capture;
}
/**
* Fetch custom indicators from the sandbox and return a formatted system prompt section.
* Returns empty string if there are no custom indicators or the call fails.
*/
private async fetchCustomIndicatorsSection(): Promise<string> {
try {
const raw = await this.callMCPTool('python_list', { category: 'indicator' });
const r = raw as any;
const text = r?.content?.[0]?.text ?? r?.[0]?.text;
const parsed = typeof text === 'string' ? JSON.parse(text) : raw;
const items: any[] = parsed?.items ?? [];
if (items.length === 0) return '';
const lines: string[] = ['\n\n## Custom Indicators\n'];
lines.push('The user has defined the following custom indicators. Use `ta.custom_<name>` where `<name>` is the lowercase sanitized function name shown below.\n');
for (const item of items) {
const displayName: string = item.name ?? 'unknown';
const description: string = item.description ?? '';
const meta: any = item.metadata ?? {};
// Derive the ta attribute name: sanitize display name to lowercase + underscores
const taAttr = `custom_${displayName.toLowerCase().replace(/[^\w]/g, '_').replace(/_+/g, '_').replace(/^_+|_+$/g, '')}`;
const inputSeries: string[] = meta.input_series ?? ['close'];
const params: Record<string, any> = meta.parameters ?? {};
const pane: string = meta.pane ?? 'separate';
const inputStr = inputSeries.map((s: string) => `df['${s}']`).join(', ');
const paramStr = Object.entries(params)
.map(([k, v]: [string, any]) => `${k}=${JSON.stringify(v?.default ?? null)}`)
.join(', ');
const callExample = paramStr
? `ta.${taAttr}(${inputStr}, ${paramStr})`
: `ta.${taAttr}(${inputStr})`;
const outputNames = (meta.output_columns ?? [{ name: 'value' }])
.map((c: any) => c.name)
.join(', ');
lines.push(`### ${displayName}`);
if (description) lines.push(description);
lines.push(`- **Call**: \`${callExample}\``);
lines.push(`- **Outputs**: ${outputNames} | **Pane**: ${pane}`);
lines.push('');
}
return lines.join('\n');
} catch (err) {
this.logger.warn({ err }, 'Failed to fetch custom indicators for prompt injection');
return '';
}
}
/**
* Augment system message with custom indicators section.
*/
protected async buildSystemMessage(
context: SubagentContext,
instruction: string
): Promise<{ systemMessage: SystemMessage; humanMessage: BaseMessage }> {
const { systemMessage, humanMessage } = await super.buildSystemMessage(context, instruction);
const customIndicatorsSection = await this.fetchCustomIndicatorsSection();
if (customIndicatorsSection) {
const base = typeof systemMessage.content === 'string'
? systemMessage.content
: JSON.stringify(systemMessage.content);
return { systemMessage: new SystemMessage(base + customIndicatorsSection), humanMessage };
}
return { systemMessage, humanMessage };
}
/**
* Execute research request using LangGraph's createReactAgent.
* Wraps executeAgent to manage image capture state.
*/
async execute(context: SubagentContext, instruction: string): Promise<string> {
// Clear previous images (in-place so tool wrappers keep the same array reference)
this.imageCapture.length = 0;
this.lastImages = [];
const finalText = await this.executeAgent(context, instruction);
// Images were captured in real-time by the MCP tool wrappers into this.imageCapture
this.lastImages = [...this.imageCapture];
this.logger.info(
{ textLength: finalText.length, imageCount: this.lastImages.length },
'Research subagent finished'
);
return finalText;
}
/**
* Execute with full result including images
* This is the method that ResearchSkill should use
*/
async executeWithImages(context: SubagentContext, instruction: string): Promise<ResearchResult> {
const text = await this.execute(context, instruction);
return {
text,
images: this.lastImages,
};
}
/**
* Get images from last execution
*/
getLastImages(): Array<{data: string; mimeType: string}> {
return this.lastImages;
}
/**
* Stream typed HarnessEvents using LangGraph's agent.stream().
* Emits subagent_tool_call when tools fire, subagent_chunk for the final AI response.
* Returns the final text string as the generator return value.
*/
async *streamEvents(context: SubagentContext, instruction: string, signal?: AbortSignal): AsyncGenerator<HarnessEvent, string> {
this.logger.info({ subagent: this.getName() }, 'streamEvents starting');
if (!this.hasMCPClient()) {
throw new Error('MCP client not available for research subagent');
}
this.imageCapture.length = 0;
this.lastImages = [];
// Emit immediately so the UI shows the subagent has started — LLM generation
// can take minutes with non-streaming models and nothing else reaches the UI until
// the first `updates` event fires (after the LLM finishes its first response).
yield { type: 'subagent_tool_call', agentName: this.config.name, toolName: 'Thinking...', label: 'Thinking...' };
const finalText = yield* this.streamEventsCore(context, instruction, signal);
this.lastImages = [...this.imageCapture];
if (!finalText) {
this.logger.warn(
{ imageCount: this.lastImages.length },
'Research subagent: model returned empty output'
);
} else {
this.logger.info(
{ textLength: finalText.length, imageCount: this.lastImages.length },
'streamEvents finished'
);
}
return finalText;
}
/**
* Stream research execution (raw model streaming, no agent loop)
*/
async *stream(context: SubagentContext, instruction: string): AsyncGenerator<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
},
'Streaming research request'
);
if (!this.hasMCPClient()) {
throw new Error('MCP client not available for research subagent');
}
// Clear previous images
this.lastImages = [];
const messages = this.buildMessages(context, instruction);
const stream = await this.model.stream(messages);
for await (const chunk of stream) {
if (typeof chunk.content === 'string') {
yield chunk.content;
}
}
}
}
/**
* Factory function to create and initialize ResearchSubagent
*/
export async function createResearchSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
mcpClient?: MCPClientConnector,
tools?: any[],
imageCapture?: Array<{data: string; mimeType: string}>
): Promise<ResearchSubagent> {
const config = await BaseSubagent.loadConfig(basePath);
const subagent = new ResearchSubagent(config, model, logger, mcpClient, tools);
if (imageCapture !== undefined) {
subagent.setImageCapture(imageCapture);
}
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -1,37 +0,0 @@
name: strategy
description: Writes and manages PandasStrategy classes, runs backtests, and manages strategy activation
# Model configuration
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 16384
# Memory files loaded from memory/ directory
memoryFiles: []
# System prompt
systemPromptFile: system-prompt.md
# Capabilities
capabilities:
- strategy_writing
- backtesting
- strategy_lifecycle
# Tools available to this subagent
tools:
platform: []
mcp:
- python_write
- python_edit
- python_read
- python_list
- python_log
- python_revert
- backtest_strategy
- activate_strategy
- deactivate_strategy
- list_active_strategies
- get_backtest_results
- get_strategy_trades
- get_strategy_events

View File

@@ -1,31 +0,0 @@
import { BaseSubagent } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
import type { MCPClientConnector } from '../../mcp-client.js';
/**
* Strategy Subagent
*
* Specialized agent for writing PandasStrategy classes, running backtests,
* and managing strategy activation/deactivation.
*/
export class StrategySubagent extends BaseSubagent {
protected getRecursionLimit() { return 30; }
protected getFallbackText() { return 'Strategy task completed.'; }
}
/**
* Factory function to create and initialize StrategySubagent
*/
export async function createStrategySubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
mcpClient?: MCPClientConnector,
tools?: any[]
): Promise<StrategySubagent> {
const config = await BaseSubagent.loadConfig(basePath);
const subagent = new StrategySubagent(config, model, logger, mcpClient, tools);
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -1,30 +0,0 @@
# Web Explore Subagent Configuration
name: web-explore
description: Searches the web and academic papers, fetches content, and returns a textual summary
# Model configuration
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 8192
# No memory files needed
memoryFiles: []
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- web_search
- page_fetch
- academic_search
- content_summarization
# Tools available to this subagent (all platform tools, no MCP needed)
tools:
platform:
- web_search
- fetch_page
- arxiv_search
mcp: []

View File

@@ -1,42 +0,0 @@
import { BaseSubagent, type SubagentConfig } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
/**
* Web Explore Subagent
*
* Accepts a research instruction, searches the web (DuckDuckGo) or arXiv
* for academic queries, fetches relevant page/PDF content, and returns a
* markdown summary with cited sources.
*
* No MCP client needed — operates entirely through platform tools.
*/
export class WebExploreSubagent extends BaseSubagent {
constructor(
config: SubagentConfig,
model: BaseChatModel,
logger: FastifyBaseLogger,
tools?: any[]
) {
super(config, model, logger, undefined, tools);
}
protected getRecursionLimit() { return 15; }
protected getFallbackText() { return 'No results found.'; }
protected requiresMCPClient() { return false; }
}
/**
* Factory function to create and initialize WebExploreSubagent
*/
export async function createWebExploreSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string,
tools?: any[]
): Promise<WebExploreSubagent> {
const config = await BaseSubagent.loadConfig(basePath);
const subagent = new WebExploreSubagent(config, model, logger, tools);
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -1,33 +0,0 @@
# Web Explore Agent
You are a research assistant that searches the web and academic databases to answer questions or gather information according to the given instructions.
## Tools
You have three tools:
- **`web_search`** — Search the web broadly (Tavily). Returns titles, URLs, and content summaries. Best for general information, news, documentation, proprietary/niche topics, trading indicators, software papers, and anything not likely to be on arXiv.
- **`arxiv_search`** — Search arXiv for academic preprints. Returns titles, authors, abstracts, and PDF links. Use this **only** for peer-reviewed or academic research (e.g. machine learning, statistics, finance theory). Most trading indicators, technical analysis tools, and proprietary methods are NOT on arXiv.
- **`fetch_page`** — Fetch the full content of a URL (web page or PDF). PDFs are automatically converted to text. Use this after searching to read the complete content of a promising result.
## Strategy
1. **Choose the right search tool first:**
- Default to `web_search` for most queries — it covers the broadest range of sources including trading indicators, technical analysis, software documentation, and niche topics
- Use `arxiv_search` only when the instruction is explicitly academic in nature (e.g. "find papers on", "peer-reviewed research on", "academic study of")
- If `arxiv_search` returns nothing clearly relevant after 12 queries → switch to `web_search` immediately
2. **Search, then fetch:** After getting results, call `fetch_page` on the 23 most promising URLs to get full content.
3. **Don't loop on the same query:** If a search returns results but nothing useful, change your approach — try different keywords or a different tool. Never repeat the same search query.
4. **Synthesize:** Write a clear, well-structured markdown summary that directly addresses the instruction. Cite sources with inline links.
## Output format
Return a markdown response with:
- A direct answer or summary addressing the instruction
- Key findings or takeaways
- Sources cited inline (e.g. `[Title](url)`)
Keep the response focused and concise — avoid padding or restating the question.

View File

@@ -0,0 +1,43 @@
/**
* Maps tool names (and optionally their arguments) to user-friendly status labels
* shown in the UI during tool execution.
*
* Used by both the main agent (agent-harness) and subagents (spawn-service).
*/
const TOOL_LABELS: Record<string, string> = {
MemoryLookup: 'Checking docs...',
memory_lookup: 'Checking docs...',
GetChartData: 'Fetching chart data...',
SymbolLookup: 'Searching symbol...',
WebSearch: 'Searching the web...',
FetchPage: 'Fetching page...',
ArxivSearch: 'Searching papers...',
PythonList: 'Seeing what we have...',
PythonEdit: 'Coding...',
PythonWrite: 'Coding...',
PythonRead: 'Inspecting...',
ExecuteResearch: 'Running script...',
BacktestStrategy: 'Backtesting...',
ListActiveStrategies: 'Checking active strategies...',
ActivateStrategy: 'Activating strategy...',
};
/** Labels for Spawn tool keyed by the `agent` argument value. */
const SPAWN_AGENT_LABELS: Record<string, string> = {
indicator: 'Adjusting indicators...',
research: 'Running analysis...',
strategy: 'Working on strategy...',
'web-explore': 'Searching the web...',
};
/**
* Returns a human-friendly status label for a tool invocation.
* For the Spawn tool, the label is derived from the `agent` argument.
*/
export function getToolLabel(toolName: string, args?: Record<string, unknown>): string {
if (toolName === 'Spawn' && args?.agent) {
return SPAWN_AGENT_LABELS[args.agent as string] ?? 'Working on it...';
}
return TOOL_LABELS[toolName] ?? `Running ${toolName}...`;
}

View File

@@ -41,15 +41,7 @@ import {
EventRouter,
DeliveryService,
} from './events/index.js';
import { QdrantClient } from './clients/qdrant-client.js';
import { EmbeddingService, RAGRetriever, DocumentLoader } from './harness/memory/index.js';
import { initializeToolRegistry } from './tools/tool-registry.js';
import { join } from 'path';
import { fileURLToPath } from 'url';
import { dirname } from 'path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Load configuration from YAML files
function loadConfig() {
@@ -137,13 +129,6 @@ function loadConfig() {
// Conversation history limit: number of prior turns loaded as LLM context and flushed to Iceberg
conversationHistoryLimit: configData.agent?.conversation_history_limit || parseInt(process.env.CONVERSATION_HISTORY_LIMIT || '20'),
// Qdrant configuration (for RAG)
qdrant: {
url: configData.qdrant?.url || process.env.QDRANT_URL || 'http://localhost:6333',
apiKey: secretsData.qdrant?.api_key || process.env.QDRANT_API_KEY,
collectionName: configData.qdrant?.collection || process.env.QDRANT_COLLECTION || 'gateway_memory',
},
// Iceberg configuration (for durable storage)
iceberg: {
catalogUri: configData.iceberg?.catalog_uri || process.env.ICEBERG_CATALOG_URI || 'http://iceberg-catalog:8181',
@@ -162,14 +147,6 @@ function loadConfig() {
notificationEndpoint: configData.relay?.notification_endpoint || process.env.RELAY_NOTIFICATION_ENDPOINT || 'tcp://relay:5558',
},
// Embedding configuration (for RAG)
embedding: {
provider: (configData.embedding?.provider || process.env.EMBEDDING_PROVIDER || 'ollama') as 'ollama' | 'openai' | 'anthropic' | 'local' | 'voyage' | 'cohere' | 'none',
model: configData.embedding?.model || process.env.EMBEDDING_MODEL,
apiKey: secretsData.embedding?.api_key || process.env.EMBEDDING_API_KEY || secretsData.llm_providers?.openai_api_key || process.env.OPENAI_API_KEY,
ollamaUrl: configData.embedding?.ollama_url || process.env.OLLAMA_URL || 'http://localhost:11434',
},
// Kubernetes configuration
kubernetes: {
namespace: configData.kubernetes?.namespace || process.env.KUBERNETES_NAMESPACE || 'sandbox',
@@ -265,9 +242,6 @@ const redis = new Redis(config.redisUrl, {
lazyConnect: true,
});
// Initialize Qdrant client (for RAG)
const qdrantClient = new QdrantClient(config.qdrant, app.log);
// Initialize Iceberg client (for durable storage)
// const icebergClient = new IcebergClient(config.iceberg, app.log);
@@ -294,10 +268,8 @@ const zmqRelayClient = new ZMQRelayClient({
app.log.info({
redis: config.redisUrl,
qdrant: config.qdrant.url,
iceberg: config.iceberg.catalogUri,
relay: config.relay.requestEndpoint,
embeddingProvider: config.embedding.provider,
}, 'Harness storage clients configured');
// Initialize Kubernetes client and container manager
@@ -456,77 +428,9 @@ app.get('/health', async () => {
processedEvents: eventRouter.getProcessedEventCount(),
};
// Add RAG stats if available
if (app.hasDecorator('ragRetriever')) {
try {
const ragStats = await (app as any).ragRetriever.getStats();
health.rag = {
vectorCount: ragStats.vectorCount,
indexedCount: ragStats.indexedCount,
};
} catch (error) {
// Ignore errors in health check
}
}
return health;
});
// Admin endpoints
app.post('/admin/reload-knowledge', async (_request, reply) => {
if (!app.hasDecorator('documentLoader')) {
return reply.code(503).send({
error: 'Document loader not initialized',
});
}
try {
app.log.info('Manual knowledge reload requested');
const stats = await (app as any).documentLoader.loadAll();
return {
success: true,
stats,
timestamp: new Date().toISOString(),
};
} catch (error: any) {
app.log.error({ error }, 'Failed to reload knowledge');
return reply.code(500).send({
error: 'Failed to reload knowledge',
message: error.message,
});
}
});
app.get('/admin/knowledge-stats', async (_request, reply) => {
if (!app.hasDecorator('documentLoader')) {
return reply.code(503).send({
error: 'Document loader not initialized',
});
}
try {
const loaderStats = (app as any).documentLoader.getStats();
const ragStats = await (app as any).ragRetriever.getStats();
return {
loader: loaderStats,
rag: {
vectorCount: ragStats.vectorCount,
indexedCount: ragStats.indexedCount,
collectionSize: ragStats.collectionSize,
},
timestamp: new Date().toISOString(),
};
} catch (error: any) {
app.log.error({ error }, 'Failed to get knowledge stats');
return reply.code(500).send({
error: 'Failed to get knowledge stats',
message: error.message,
});
}
});
// Graceful shutdown
const shutdown = async () => {
app.log.info('Shutting down gracefully...');
@@ -578,15 +482,6 @@ try {
app.log.warn({ error }, 'ZMQ Relay connection failed - historical data will not be available');
}
// Initialize Qdrant collection
app.log.debug('Initializing Qdrant...');
try {
await qdrantClient.initialize();
app.log.info('Qdrant collection initialized');
} catch (error) {
app.log.warn({ error }, 'Qdrant initialization failed - RAG will not be available');
}
// Initialize tool registry
app.log.debug('Initializing tool registry...');
try {
@@ -602,42 +497,8 @@ try {
// Main agent: platform tools + user's general MCP tools
toolRegistry.registerAgentTools({
agentName: 'main',
platformTools: ['symbol_lookup', 'get_chart_data'],
mcpTools: ['python_list', 'python_delete', 'backtest_strategy', 'list_active_strategies'],
});
// Research subagent: only MCP tools for script creation/execution
toolRegistry.registerAgentTools({
agentName: 'research',
platformTools: [], // No platform tools (works at script level)
mcpTools: ['python_*', 'execute_research'],
});
// Indicator subagent: workspace patch + category tools + evaluate_indicator
toolRegistry.registerAgentTools({
agentName: 'indicator',
platformTools: [],
mcpTools: ['workspace_read', 'workspace_patch', 'python_*', 'evaluate_indicator'],
});
// Web explore subagent: platform search/fetch tools only (no MCP needed)
toolRegistry.registerAgentTools({
agentName: 'web-explore',
platformTools: ['web_search', 'fetch_page', 'arxiv_search'],
mcpTools: [],
});
// Strategy subagent: all strategy-related MCP tools
toolRegistry.registerAgentTools({
agentName: 'strategy',
platformTools: [],
mcpTools: [
'python_write', 'python_edit', 'python_read', 'python_list',
'python_log', 'python_revert', 'python_delete',
'backtest_strategy', 'activate_strategy', 'deactivate_strategy',
'list_active_strategies', 'get_backtest_results',
'get_strategy_trades', 'get_strategy_events',
],
platformTools: ['SymbolLookup', 'GetChartData'],
mcpTools: ['PythonList', 'PythonDelete', 'BacktestStrategy', 'ListActiveStrategies'],
});
app.log.info(
@@ -655,37 +516,6 @@ try {
// Non-fatal - continue without tools
}
// Initialize RAG system and load global knowledge
app.log.debug('Initializing RAG system...');
try {
// Initialize embedding service
const embeddingService = new EmbeddingService(config.embedding, app.log);
const vectorDimension = embeddingService.getDimensions();
// Initialize RAG retriever
const ragRetriever = new RAGRetriever(config.qdrant, app.log, vectorDimension);
await ragRetriever.initialize();
// Initialize document loader
const knowledgeDir = join(__dirname, '..', 'knowledge');
const documentLoader = new DocumentLoader(
{ knowledgeDir },
embeddingService,
ragRetriever,
app.log
);
// Load all knowledge documents
const loadStats = await documentLoader.loadAll();
app.log.info(loadStats, 'Global knowledge loaded into RAG');
// Store references for admin endpoints
app.decorate('documentLoader', documentLoader);
app.decorate('ragRetriever', ragRetriever);
} catch (error) {
app.log.warn({ error }, 'Failed to load global knowledge - RAG will use existing data');
}
// Start event system
app.log.debug('Starting event subscriber...');
await eventSubscriber.start();
@@ -705,7 +535,6 @@ try {
host: config.host,
eventRouterBind: config.eventRouterBind,
redis: config.redisUrl,
qdrant: config.qdrant.url,
},
'Gateway server started'
);

View File

@@ -20,31 +20,6 @@ export interface MCPToolInfo {
};
}
/**
* Strip the `details` field from all entries in a `_types` workspace store before
* syncing to clients. `details` is a long markdown blob intended for agent consumption
* only and should not be included in the compact workspace state sent to the web client.
*/
function filterTypeStoreState(storeName: string, state: unknown): unknown {
if (!storeName.endsWith('_types') || typeof state !== 'object' || state === null) {
return state;
}
const typed = state as Record<string, unknown>;
if (typeof typed['types'] !== 'object' || typed['types'] === null) {
return state;
}
const filteredTypes: Record<string, unknown> = {};
for (const [key, entry] of Object.entries(typed['types'] as Record<string, unknown>)) {
if (typeof entry === 'object' && entry !== null) {
const { details: _details, ...rest } = entry as Record<string, unknown>;
filteredTypes[key] = rest;
} else {
filteredTypes[key] = entry;
}
}
return { ...typed, types: filteredTypes };
}
/**
* Create a LangChain tool from an MCP tool definition
*/
@@ -68,7 +43,7 @@ export function createMCPToolWrapper(
logger.info({ tool: toolInfo.name }, 'MCP tool call completed');
// Fire workspace mutation callback when workspace_patch or workspace_write succeeds.
// Fire workspace mutation callback when WorkspacePatch or WorkspaceWrite succeeds.
// The sandbox returns {"success": true, "data": <newState>} as a text content item.
if (onWorkspaceMutation) {
const content = (result as any)?.content;
@@ -77,19 +52,19 @@ export function createMCPToolWrapper(
if (item.type === 'text' && item.text) {
try {
const parsed = JSON.parse(item.text);
// workspace_patch / workspace_write: {"success": true, "data": <state>}
// WorkspacePatch / WorkspaceWrite: {"success": true, "data": <state>}
if (
(toolInfo.name === 'workspace_patch' || toolInfo.name === 'workspace_write') &&
(toolInfo.name === 'WorkspacePatch' || toolInfo.name === 'WorkspaceWrite') &&
parsed?.success && parsed?.data !== undefined
) {
const storeName = (input as any).store_name as string;
onWorkspaceMutation(storeName, filterTypeStoreState(storeName, parsed.data));
onWorkspaceMutation(storeName, parsed.data);
}
// python_write / python_edit / python_delete / python_revert:
// PythonWrite / PythonEdit / PythonDelete / PythonRevert:
// {"_workspace_sync": {"store": <name>, "data": <state>}}
if (parsed?._workspace_sync?.store && parsed._workspace_sync.data !== undefined) {
const storeName = parsed._workspace_sync.store as string;
onWorkspaceMutation(storeName, filterTypeStoreState(storeName, parsed._workspace_sync.data));
onWorkspaceMutation(storeName, parsed._workspace_sync.data);
}
} catch { /* ignore parse errors */ }
}

View File

@@ -17,14 +17,14 @@ export function createArxivSearchTool(config: ArxivSearchToolConfig): DynamicStr
const { logger } = config;
return new DynamicStructuredTool({
name: 'arxiv_search',
description: 'Search arXiv for academic papers. Returns titles, authors, abstracts, and PDF links. Use this for scientific or technical research queries instead of web_search.',
name: 'ArxivSearch',
description: 'Search arXiv for academic papers. Returns titles, authors, abstracts, and PDF links. Use this for scientific or technical research queries instead of WebSearch.',
schema: z.object({
query: z.string().describe('The research query'),
max_results: z.number().optional().default(5).describe('Maximum number of papers to return (default: 5)'),
}),
func: async ({ query, max_results }) => {
logger.debug({ query, max_results }, 'Executing arxiv_search tool');
logger.debug({ query, max_results }, 'Executing ArxivSearch tool');
try {
const { ArxivRetriever } = await import('@langchain/community/retrievers/arxiv');
@@ -57,7 +57,7 @@ export function createArxivSearchTool(config: ArxivSearchToolConfig): DynamicStr
return JSON.stringify({ query, results });
} catch (error) {
logger.error({ error, query }, 'arxiv_search tool failed');
logger.error({ error, query }, 'ArxivSearch tool failed');
return JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
}
},

View File

@@ -21,13 +21,13 @@ export function createFetchPageTool(config: FetchPageToolConfig): DynamicStructu
const { logger } = config;
return new DynamicStructuredTool({
name: 'fetch_page',
description: 'Fetch a web page or PDF and return its text content. PDFs are automatically converted to markdown. Use this after web_search or arxiv_search to read the full content of a result.',
name: 'FetchPage',
description: 'Fetch a web page or PDF and return its text content. PDFs are automatically converted to markdown. Use this after WebSearch or ArxivSearch to read the full content of a result.',
schema: z.object({
url: z.string().url().describe('The URL to fetch'),
}),
func: async ({ url }) => {
logger.debug({ url }, 'Executing fetch_page tool');
logger.debug({ url }, 'Executing FetchPage tool');
try {
const response = await fetch(url, {
@@ -72,7 +72,7 @@ export function createFetchPageTool(config: FetchPageToolConfig): DynamicStructu
return JSON.stringify({ url, content: output, truncated });
} catch (error) {
logger.error({ error, url }, 'fetch_page tool failed');
logger.error({ error, url }, 'FetchPage tool failed');
return JSON.stringify({ error: error instanceof Error ? error.message : String(error), url });
}
},

View File

@@ -23,7 +23,7 @@ export function createGetChartDataTool(config: GetChartDataToolConfig): DynamicS
const { ohlcService, workspaceManager, logger } = config;
return new DynamicStructuredTool({
name: 'get_chart_data',
name: 'GetChartData',
description: `Fetch OHLCV+ data for current chart or any ticker/timeframe. All parameters are optional and default to workspace chart state.
**IMPORTANT: Use this tool ONLY for quick, casual data viewing. For any analysis, plotting, statistics, or deep research, use the 'research' tool instead.**
@@ -50,7 +50,7 @@ Parameters:
// Enforce hard cap — never return more than MAX_BARS bars
const effectiveCountback = countback !== undefined ? Math.min(countback, MAX_BARS) : MAX_BARS;
logger.debug({ ticker, period, from_time, to_time, countback: effectiveCountback, columns }, 'Executing get_chart_data tool');
logger.debug({ ticker, period, from_time, to_time, countback: effectiveCountback, columns }, 'Executing GetChartData tool');
try {
// Get workspace chart state

View File

@@ -9,3 +9,13 @@ export {
createGetChartDataTool,
type GetChartDataToolConfig,
} from './get-chart-data.tool.js';
export {
createSpawnTool,
type SpawnToolConfig,
} from './spawn.tool.js';
export {
createMemoryLookupTool,
type MemoryLookupToolConfig,
} from './memory-lookup.tool.js';

View File

@@ -1,67 +0,0 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { IndicatorSubagent } from '../../harness/subagents/indicator/index.js';
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
import type { HarnessEvent } from '../../harness/harness-events.js';
export interface IndicatorAgentToolConfig {
indicatorSubagent: IndicatorSubagent;
context: SubagentContext;
logger: FastifyBaseLogger;
}
/**
* Creates a LangChain tool that delegates to the indicator subagent.
* Mirrors the pattern of research-agent.tool.ts.
*/
export function createIndicatorAgentTool(config: IndicatorAgentToolConfig): DynamicStructuredTool & { streamFunc: (args: { instruction: string }) => AsyncGenerator<HarnessEvent, string> } {
const { indicatorSubagent, context, logger } = config;
async function* streamFunc({ instruction }: { instruction: string }, signal?: AbortSignal): AsyncGenerator<HarnessEvent, string> {
logger.info({ instruction: instruction.substring(0, 100) }, 'Streaming indicator subagent');
const gen = indicatorSubagent.streamEvents(context, instruction, signal);
let step: IteratorResult<HarnessEvent, string>;
while (!(step = await gen.next()).done) {
yield step.value;
}
return step.value;
}
const tool = new DynamicStructuredTool({
name: 'indicator',
description: `Delegate to the indicator subagent for all indicator-related tasks on the chart.
Use this tool for:
- Reading which indicators are currently on the chart and explaining what they show
- Adding indicators to the chart ("show RSI", "add Bollinger Bands with std=1.5")
- Modifying indicator parameters ("change MACD fast to 8", "set RSI length to 21")
- Removing indicators ("remove all moving averages", "clear the volume indicators")
- Toggling indicator visibility
- Creating custom indicators using Python scripts
- Recommending indicators for a given strategy or analysis goal
ALWAYS use this tool for any request about the chart's indicators.
NEVER modify the indicators workspace store directly.
NEVER use this tool to switch the chart symbol or timeframe — that is done via workspace_patch on chartState.`,
schema: z.object({
instruction: z.string().describe(
'The indicator task to perform. Be specific about which indicators, parameters, ' +
'and what changes are needed. Include relevant context like the current symbol ' +
'if the user mentioned it.'
),
}),
func: async ({ instruction }: { instruction: string }): Promise<string> => {
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to indicator subagent');
try {
return await indicatorSubagent.execute(context, instruction);
} catch (error) {
logger.error({ error, errorMessage: (error as Error)?.message }, 'Indicator subagent failed');
throw error;
}
},
});
return Object.assign(tool, { streamFunc });
}

View File

@@ -0,0 +1,52 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { WikiLoader } from '../../harness/spawn/wiki-loader.js';
export interface MemoryLookupToolConfig {
wikiLoader: WikiLoader;
logger: FastifyBaseLogger;
}
/**
* Creates the `memory_lookup` tool for the main agent.
*
* Allows the agent to read a specific knowledge wiki page on demand,
* or list available pages by passing "index".
*/
export function createMemoryLookupTool(config: MemoryLookupToolConfig): DynamicStructuredTool {
const { wikiLoader, logger } = config;
return new DynamicStructuredTool({
name: 'MemoryLookup',
description: `Read a knowledge wiki page by name to get detailed reference information.
Pass "index" to list all available pages.
Example pages:
- "api-reference" — DataAPI and ChartingAPI reference for research scripts
- "usage-examples" — Example research scripts
- "pandas-ta-reference" — Full pandas-ta indicator catalog`,
schema: z.object({
page: z.string().describe(
'Wiki page name to read (without .md extension). Pass "index" to list all pages.'
),
}),
func: async ({ page }: { page: string }): Promise<string> => {
logger.info({ page }, 'memory_lookup: reading page');
if (page === 'index') {
const pages = await wikiLoader.listPages();
return `Available wiki pages:\n${pages.map(p => `- ${p}`).join('\n')}`;
}
const wikiPage = await wikiLoader.loadPage(page);
if (!wikiPage) {
const pages = await wikiLoader.listPages();
return `Page "${page}" not found.\n\nAvailable pages:\n${pages.map(p => `- ${p}`).join('\n')}`;
}
return wikiPage.body;
},
});
}

View File

@@ -1,65 +0,0 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { ResearchSubagent } from '../../harness/subagents/research/index.js';
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
import type { HarnessEvent } from '../../harness/harness-events.js';
export interface ResearchAgentToolConfig {
researchSubagent: ResearchSubagent;
context: SubagentContext;
logger: FastifyBaseLogger;
}
/**
* Creates a LangChain tool that delegates to the research subagent.
* This is the standard LangChain pattern for exposing a subagent as a tool
* to a parent agent.
*/
export function createResearchAgentTool(config: ResearchAgentToolConfig): DynamicStructuredTool & { streamFunc: (args: { name: string; instruction: string }) => AsyncGenerator<HarnessEvent, string> } {
const { researchSubagent, context, logger } = config;
const prompt = (name: string, instruction: string) => `Research script name: "${name}"\n\n${instruction}`;
async function* streamFunc({ name, instruction }: { name: string; instruction: string }, signal?: AbortSignal): AsyncGenerator<HarnessEvent, string> {
logger.info({ name, instruction: instruction.substring(0, 100) }, 'Streaming research subagent');
const gen = researchSubagent.streamEvents(context, prompt(name, instruction), signal);
let step: IteratorResult<HarnessEvent, string>;
while (!(step = await gen.next()).done) {
yield step.value;
}
const finalText = step.value;
const images = researchSubagent.getLastImages();
return JSON.stringify({ text: finalText, images });
}
const tool = new DynamicStructuredTool({
name: 'research',
description: `Delegate to the research subagent for data analysis, charting, statistics, and Python script execution.
Use this tool for:
- Plotting charts with technical indicators (EMA, RSI, MACD, Bollinger Bands, etc.)
- Statistical analysis of price data
- Custom research scripts using the DataAPI and ChartingAPI
- Any task requiring code execution or matplotlib charts
The research subagent will write and execute Python scripts, capture output and charts, and return results.`,
schema: z.object({
name: z.string().describe('The name of the research script to create or update (e.g. "btc_ema_analysis"). Use the same name across calls to revise the same script rather than creating a new one.'),
instruction: z.string().describe('The research task or analysis to perform. Be specific about what data, indicators, timeframes, and output you want.'),
}),
func: async ({ name, instruction }: { name: string; instruction: string }): Promise<string> => {
logger.info({ name, instruction: instruction.substring(0, 100) }, 'Delegating to research subagent');
try {
const result = await researchSubagent.executeWithImages(context, prompt(name, instruction));
return JSON.stringify({ text: result.text, images: result.images });
} catch (error) {
logger.error({ error, errorMessage: (error as Error)?.message }, 'Research subagent failed');
throw error;
}
},
});
return Object.assign(tool, { streamFunc });
}

View File

@@ -0,0 +1,111 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { SpawnService } from '../../harness/spawn/spawn-service.js';
import type { MCPClientConnector } from '../../harness/mcp-client.js';
import type { WorkspaceManager } from '../../workspace/workspace-manager.js';
import type { MCPToolInfo } from '../mcp/mcp-tool-wrapper.js';
import type { HarnessEvent } from '../../harness/harness-events.js';
export interface SpawnToolConfig {
spawnService: SpawnService;
mcpClient: MCPClientConnector;
availableMCPTools: MCPToolInfo[];
workspaceManager?: WorkspaceManager;
logger: FastifyBaseLogger;
}
/**
* Creates the `spawn` tool for the main agent.
*
* The tool accepts an agent name and instruction and delegates to SpawnService,
* which runs an isolated subagent and returns only its final result.
*
* Implements the `streamFunc` protocol so the harness can forward intermediate
* subagent events (subagent_chunk, subagent_thinking, subagent_tool_call) to
* the WebSocket client during tool execution.
*/
export function createSpawnTool(config: SpawnToolConfig): DynamicStructuredTool & {
streamFunc: (args: { agent: string; instruction: string }, signal?: AbortSignal) => AsyncGenerator<HarnessEvent, string>;
} {
const { spawnService, mcpClient, availableMCPTools, workspaceManager, logger } = config;
async function* streamFunc(
{ agent, instruction }: { agent: string; instruction: string },
signal?: AbortSignal,
): AsyncGenerator<HarnessEvent, string> {
logger.info({ agent, instruction: instruction.substring(0, 100) }, 'spawn: streaming subagent');
const gen = spawnService.streamSpawn({
agentName: agent,
instruction,
mcpClient,
availableMCPTools,
workspaceManager,
signal,
});
let step = await gen.next();
while (!step.done) {
if (signal?.aborted) {
await gen.return('');
break;
}
yield step.value;
step = await gen.next();
}
return (step.value as string) ?? '';
}
const tool = new DynamicStructuredTool({
name: 'Spawn',
description: `Delegate a specialized task to a subagent and return its result.
Available agents:
- **research** — statistical analysis, data visualization, Python scripting, charting
- **indicator** — manage chart indicators: add, remove, modify, create custom indicators
- **strategy** — write PandasStrategy trading strategies, run backtests, activate paper trading
- **web-explore** — search the web, fetch pages, find academic papers
The subagent runs in isolation. Only its final answer is returned — intermediate steps do not appear in this context.
Examples:
- spawn({ agent: "research", instruction: "Plot BTC/USDT RSI over the last 2 years" })
- spawn({ agent: "indicator", instruction: "Add Bollinger Bands with std=1.5 to the chart" })
- spawn({ agent: "strategy", instruction: "Write an RSI mean-reversion strategy and backtest it" })
- spawn({ agent: "web-explore", instruction: "Find documentation on the Donchian Channel indicator" })`,
schema: z.object({
agent: z.enum(['research', 'indicator', 'strategy', 'web-explore']).describe(
'The specialized subagent to invoke'
),
instruction: z.string().describe(
'Detailed instruction for the subagent. Include all relevant context from the conversation.'
),
}),
func: async ({ agent, instruction }: { agent: string; instruction: string }): Promise<string> => {
logger.info({ agent, instruction: instruction.substring(0, 100) }, 'spawn: invoking subagent');
try {
const gen = spawnService.streamSpawn({
agentName: agent,
instruction,
mcpClient,
availableMCPTools,
workspaceManager,
});
// Drain the generator and collect the return value
let result = '';
let step = await gen.next();
while (!step.done) {
step = await gen.next();
}
result = step.value ?? '';
return result;
} catch (error) {
logger.error({ error, agent }, 'spawn: subagent failed');
throw error;
}
},
});
return Object.assign(tool, { streamFunc });
}

View File

@@ -1,66 +0,0 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { StrategySubagent } from '../../harness/subagents/strategy/index.js';
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
import type { HarnessEvent } from '../../harness/harness-events.js';
export interface StrategyAgentToolConfig {
strategySubagent: StrategySubagent;
context: SubagentContext;
logger: FastifyBaseLogger;
}
/**
* Creates a LangChain tool that delegates to the strategy subagent.
* Mirrors the pattern of indicator-agent.tool.ts.
*/
export function createStrategyAgentTool(config: StrategyAgentToolConfig): DynamicStructuredTool & { streamFunc: (args: { instruction: string }, signal?: AbortSignal) => AsyncGenerator<HarnessEvent, string> } {
const { strategySubagent, context, logger } = config;
async function* streamFunc({ instruction }: { instruction: string }, signal?: AbortSignal): AsyncGenerator<HarnessEvent, string> {
logger.info({ instruction: instruction.substring(0, 100) }, 'Streaming strategy subagent');
const gen = strategySubagent.streamEvents(context, instruction, signal);
let step: IteratorResult<HarnessEvent, string>;
while (!(step = await gen.next()).done) {
yield step.value;
}
return step.value;
}
const tool = new DynamicStructuredTool({
name: 'strategy',
description: `Delegate to the strategy subagent for all trading strategy tasks.
Use this tool for:
- Writing new PandasStrategy classes ("create a strategy that...")
- Editing or improving existing strategies
- Running backtests on a strategy
- Interpreting backtest results (Sharpe ratio, drawdown, trade list)
- Activating or deactivating strategies for paper trading
- Monitoring running strategy PnL and trade logs
- Checking which strategies already exist
ALWAYS use this tool for any request about trading strategies, backtesting, or strategy activation.
NEVER write strategy Python code or call backtest_strategy directly — delegate here instead.`,
schema: z.object({
instruction: z.string().describe(
'The strategy task to perform. Be specific: include the strategy name, ' +
'desired signals (e.g. RSI < 30 = buy), timeframe, and symbol if known. ' +
'For backtest requests include the date range and starting capital.'
),
}),
func: async ({ instruction }: { instruction: string }): Promise<string> => {
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to strategy subagent');
try {
return await strategySubagent.execute(context, instruction);
} catch (error) {
logger.error({ error, errorMessage: (error as Error)?.message }, 'Strategy subagent failed');
throw error;
}
},
});
return Object.assign(tool, { streamFunc });
}

View File

@@ -21,7 +21,7 @@ export function createSymbolLookupTool(config: SymbolLookupToolConfig): DynamicS
const { symbolIndexService, logger } = config;
return new DynamicStructuredTool({
name: 'symbol_lookup',
name: 'SymbolLookup',
description: `Search for market symbols or resolve symbol metadata. Use 'search' mode to find symbols matching a query, or 'resolve' mode to get detailed metadata for a specific symbol.
Parameters:
@@ -34,7 +34,7 @@ Parameters:
limit: z.number().optional().default(30).describe('Maximum number of search results (search mode only, default: 30)'),
}),
func: async ({ mode, query, limit }) => {
logger.debug({ mode, query, limit }, 'Executing symbol_lookup tool');
logger.debug({ mode, query, limit }, 'Executing SymbolLookup tool');
try {
if (mode === 'search') {

View File

@@ -1,67 +0,0 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { FastifyBaseLogger } from 'fastify';
import type { WebExploreSubagent } from '../../harness/subagents/web-explore/index.js';
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
import type { HarnessEvent } from '../../harness/harness-events.js';
export interface WebExploreAgentToolConfig {
webExploreSubagent: WebExploreSubagent;
context: SubagentContext;
logger: FastifyBaseLogger;
}
/**
* Creates a LangChain tool that delegates to the web-explore subagent.
* The subagent decides whether to use web search or arXiv based on the instruction.
*/
export function createWebExploreAgentTool(config: WebExploreAgentToolConfig): DynamicStructuredTool & { streamFunc: (args: { instruction: string }, signal?: AbortSignal) => AsyncGenerator<HarnessEvent, string> } {
const { webExploreSubagent, context, logger } = config;
async function* streamFunc({ instruction }: { instruction: string }, signal?: AbortSignal): AsyncGenerator<HarnessEvent, string> {
logger.info({ instruction: instruction.substring(0, 100) }, 'Streaming web-explore subagent');
const gen = webExploreSubagent.streamEvents(context, instruction, signal);
let step: IteratorResult<HarnessEvent, string>;
while (!(step = await gen.next()).done) {
yield step.value;
}
return step.value;
}
const tool = new DynamicStructuredTool({
name: 'web_explore',
description: `Search the EXTERNAL web or academic databases and return a summarized answer.
Use this tool ONLY for external, public information:
- Current events, news, or real-time information
- External documentation, tutorials, or how-to guides for third-party libraries/tools
- Academic papers, research findings, or scientific topics
- Any topic requiring external sources
NEVER use this tool for:
- Questions about the Dexorder platform itself (workspace tools, chartState, indicators, strategies)
- Internal API usage (workspace_patch, workspace_read, etc.) — consult the system prompt instead
- Anything that can be answered from the context already available
The subagent will search the web (or arXiv for academic queries), fetch relevant content, and return a markdown summary with cited sources.`,
schema: z.object({
instruction: z.string().describe(
'What to search for and summarize. Be specific — include the topic, what aspects matter, ' +
'and any context that helps narrow the search (e.g. "recent papers on momentum factor in equities" ' +
'or "how to configure rate limiting in Fastify").'
),
}),
func: async ({ instruction }: { instruction: string }): Promise<string> => {
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to web-explore subagent');
try {
return await webExploreSubagent.execute(context, instruction);
} catch (error) {
logger.error({ error, errorMessage: (error as Error)?.message }, 'Web explore subagent failed');
throw error;
}
},
});
return Object.assign(tool, { streamFunc });
}

View File

@@ -18,14 +18,14 @@ export function createWebSearchTool(config: WebSearchToolConfig): DynamicStructu
const { apiKey, logger } = config;
return new DynamicStructuredTool({
name: 'web_search',
description: 'Search the web. Returns titles, URLs, and content summaries. Use this for general web searches. For academic/scientific papers, prefer arxiv_search instead.',
name: 'WebSearch',
description: 'Search the web. Returns titles, URLs, and content summaries. Use this for general web searches. For academic/scientific papers, prefer ArxivSearch instead.',
schema: z.object({
query: z.string().describe('The search query'),
max_results: z.number().optional().default(8).describe('Maximum number of results to return (default: 8)'),
}),
func: async ({ query, max_results }) => {
logger.debug({ query, max_results }, 'Executing web_search tool');
logger.debug({ query, max_results }, 'Executing WebSearch tool');
try {
const response = await fetch('https://api.tavily.com/search', {
@@ -57,7 +57,7 @@ export function createWebSearchTool(config: WebSearchToolConfig): DynamicStructu
return JSON.stringify({ query, results: items });
} catch (error) {
logger.error({ error, query, errorMessage: error instanceof Error ? error.message : String(error) }, 'web_search tool failed');
logger.error({ error, query, errorMessage: error instanceof Error ? error.message : String(error) }, 'WebSearch tool failed');
return JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
}
},

View File

@@ -22,7 +22,7 @@ export interface AgentToolConfig {
/** Platform tool names to include */
platformTools: string[];
/** MCP tool patterns/names to include (supports wildcards like 'python_*') */
/** MCP tool patterns/names to include (supports wildcards like 'Python*') */
mcpTools: string[];
}
@@ -152,7 +152,7 @@ export class ToolRegistry {
let tool: DynamicStructuredTool | null = null;
switch (toolName) {
case 'symbol_lookup': {
case 'SymbolLookup': {
const symbolIndexService = this.resolveService(this.platformServices.symbolIndexService);
if (symbolIndexService) {
tool = createSymbolLookupTool({
@@ -160,12 +160,12 @@ export class ToolRegistry {
logger: this.logger,
});
} else {
this.logger.warn('SymbolIndexService not available for symbol_lookup tool');
this.logger.warn('SymbolIndexService not available for SymbolLookup tool');
}
break;
}
case 'get_chart_data': {
case 'GetChartData': {
const ohlcService = this.resolveService(this.platformServices.ohlcService);
// Use session workspace manager if provided, otherwise try global
const workspaceManager = sessionWorkspaceManager ||
@@ -179,27 +179,27 @@ export class ToolRegistry {
} else {
this.logger.warn(
{ hasOHLC: !!ohlcService, hasWorkspace: !!workspaceManager },
'OHLCService or WorkspaceManager not available for get_chart_data tool'
'OHLCService or WorkspaceManager not available for GetChartData tool'
);
}
break;
}
case 'web_search': {
case 'WebSearch': {
if (this.platformServices.tavilyApiKey) {
tool = createWebSearchTool({ apiKey: this.platformServices.tavilyApiKey, logger: this.logger });
} else {
this.logger.warn('TAVILY_API_KEY not configured — web_search tool unavailable');
this.logger.warn('TAVILY_API_KEY not configured — WebSearch tool unavailable');
}
break;
}
case 'fetch_page': {
case 'FetchPage': {
tool = createFetchPageTool({ logger: this.logger });
break;
}
case 'arxiv_search': {
case 'ArxivSearch': {
tool = createArxivSearchTool({ logger: this.logger });
break;
}
@@ -226,7 +226,7 @@ export class ToolRegistry {
/**
* Filter MCP tools based on patterns/names
* Supports wildcards like 'python_*' or exact names like 'execute_research'
* Supports wildcards like 'Python*' or exact names like 'ExecuteResearch'
*/
private filterMCPTools(availableTools: MCPToolInfo[], patterns: string[]): MCPToolInfo[] {
if (patterns.length === 0) {
@@ -245,7 +245,7 @@ export class ToolRegistry {
/**
* Check if a tool name matches a pattern
* Supports wildcards: 'python_*' matches 'python_write', 'python_read', etc.
* Supports wildcards: 'Python*' matches 'PythonWrite', 'PythonRead', etc.
*/
private matchesPattern(toolName: string, pattern: string): boolean {
if (pattern === toolName) {
@@ -264,6 +264,40 @@ export class ToolRegistry {
return false;
}
/**
* Resolve tools directly from explicit platform tool names and MCP patterns,
* without requiring a pre-registered agent config.
* Used by SpawnService to build tool lists from wiki frontmatter at spawn time.
*/
async resolveTools(
platformTools: string[],
mcpPatterns: string[],
mcpClient?: MCPClientConnector,
availableMCPTools?: MCPToolInfo[],
workspaceManager?: WorkspaceManager,
onImage?: (image: { data: string; mimeType: string }) => void,
onWorkspaceMutation?: (storeName: string, newState: unknown) => void
): Promise<DynamicStructuredTool[]> {
const tools: DynamicStructuredTool[] = [];
for (const toolName of platformTools) {
const tool = await this.getPlatformTool(toolName, workspaceManager);
if (tool) {
tools.push(tool);
} else {
this.logger.warn({ tool: toolName }, 'resolveTools: platform tool not found');
}
}
if (mcpClient && availableMCPTools && availableMCPTools.length > 0 && mcpPatterns.length > 0) {
const filteredMCPTools = this.filterMCPTools(availableMCPTools, mcpPatterns);
const mcpToolInstances = createMCPToolWrappers(filteredMCPTools, mcpClient, this.logger, onImage, onWorkspaceMutation);
tools.push(...mcpToolInstances);
}
return tools;
}
/**
* Get all registered agent names
*/

View File

@@ -8,9 +8,9 @@
* Container-side storage: /data/workspace/{store_name}.json
*
* MCP Tools used:
* - workspace_read(store_name) -> dict
* - workspace_write(store_name, data) -> None
* - workspace_patch(store_name, patch) -> dict (new state)
* - WorkspaceRead(store_name) -> dict
* - WorkspaceWrite(store_name, data) -> None
* - WorkspacePatch(store_name, patch) -> dict (new state)
*/
import type { FastifyBaseLogger } from 'fastify';
@@ -82,7 +82,7 @@ export class ContainerSync {
try {
this.logger.debug({ store: storeName }, 'Loading store from container');
const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_read', {
const result = this.parseMcpResult(await this.mcpClient.callTool('WorkspaceRead', {
store_name: storeName,
})) as { exists: boolean; data?: unknown; error?: string };
@@ -118,7 +118,7 @@ export class ContainerSync {
try {
this.logger.debug({ store: storeName }, 'Saving store to container');
const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_write', {
const result = this.parseMcpResult(await this.mcpClient.callTool('WorkspaceWrite', {
store_name: storeName,
data: state,
})) as { success: boolean; error?: string };
@@ -150,7 +150,7 @@ export class ContainerSync {
try {
this.logger.debug({ store: storeName, patchOps: patch.length }, 'Patching store in container');
const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_patch', {
const result = this.parseMcpResult(await this.mcpClient.callTool('WorkspacePatch', {
store_name: storeName,
patch,
})) as { success: boolean; data?: unknown; error?: string };

View File

@@ -1,6 +1,6 @@
# Research Script API Usage
Research scripts executed via the `execute_research` MCP tool have access to the global API instance, which provides both data fetching and charting capabilities.
Research scripts executed via the `ExecuteResearch` MCP tool have access to the global API instance, which provides both data fetching and charting capabilities.
## Accessing the API

View File

@@ -28,7 +28,7 @@ async def activate_strategy(
Activate a strategy for live or paper forward trading.
Args:
strategy_name: Display name as saved via python_write("strategy", ...)
strategy_name: Display name as saved via PythonWrite("strategy", ...)
feeds: List of feed dicts: [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
allocation: Capital allocated in quote currency (e.g. 5000.0 USDT)
paper: True = paper/simulated fills (default); False = live (not yet implemented)

View File

@@ -30,7 +30,7 @@ async def backtest_strategy(
Load a saved strategy, fetch OHLC+ data for each feed, and run a backtest.
Args:
strategy_name: Display name as saved via python_write("strategy", ...)
strategy_name: Display name as saved via PythonWrite("strategy", ...)
feeds: List of feed dicts, e.g. [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
from_time: Backtest start (Unix timestamp or date string)
to_time: Backtest end (Unix timestamp or date string)

View File

@@ -139,7 +139,7 @@ async def evaluate_indicator(
"error": (
f"Custom indicator '{pandas_ta_name}' not found after registering "
"custom indicators. Make sure the indicator was created with "
"python_write(category='indicator', name='...') and that its "
"PythonWrite(category='indicator', name='...') and that its "
"implementation.py defines a function matching the sanitized name."
)
}))]

View File

@@ -18,6 +18,7 @@ After write/edit operations, a category-specific test harness runs to validate
the code and capture errors/output for agent feedback.
"""
import base64
import json
import logging
import re
@@ -62,7 +63,6 @@ class BaseMetadata:
"""Base metadata for all categories."""
name: str # Display name (can have special chars)
description: str # LLM-generated description
details: str = "" # Full markdown description with enough detail to reproduce the code
conda_packages: list[str] = None # Additional conda packages required
def __post_init__(self):
@@ -165,7 +165,12 @@ class IndicatorMetadata(BaseMetadata):
@dataclass
class ResearchMetadata(BaseMetadata):
"""Metadata for research scripts."""
pass
output: dict = None # Output files: {"analysis": "analysis.md", "images": ["img1.png", ...]}
def __post_init__(self):
super().__post_init__()
if self.output is None:
self.output = {}
# Metadata class registry
@@ -546,11 +551,19 @@ class CategoryFileManager:
except Exception as e:
return {"success": False, "error": f"Failed to write implementation: {e}"}
# Build metadata
# Write details.md (stored separately from metadata)
details_path = item_dir / "details.md"
try:
details_path.write_text(details or "")
log.info(f"Wrote details: {details_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write details: {e}"}
# Build metadata (details stored separately in details.md)
meta_dict = metadata or {}
meta_dict["name"] = name
meta_dict["description"] = description
meta_dict["details"] = details
meta_dict.pop("details", None) # ensure details not stored in metadata
# For indicators, store the canonical pandas_ta_name so the reverse
# mapping (ta_name → directory) is reliable regardless of name casing.
@@ -583,7 +596,7 @@ class CategoryFileManager:
if validation["success"]:
if cat == Category.RESEARCH:
log.info(f"Auto-executing research script: {name}")
result["execution"] = await self.execute_research(name)
result["execution"] = await self.execute_research(name, commit=False)
elif cat == Category.INDICATOR:
log.info(f"Auto-executing indicator test: {name}")
result["execution"] = await self._execute_indicator(item_dir)
@@ -652,6 +665,18 @@ class CategoryFileManager:
except Exception as e:
return {"success": False, "error": f"Failed to read existing metadata: {e}"}
# Load existing details from details.md; migrate from metadata.json if needed
details_path = item_dir / "details.md"
existing_details = ""
if details_path.exists():
existing_details = details_path.read_text()
elif existing_meta.get("details"):
existing_details = existing_meta.pop("details")
try:
details_path.write_text(existing_details)
except Exception:
pass # migration failure is non-fatal
# Apply string-replacement patches if provided
if patches is not None:
if not impl_path.exists():
@@ -682,7 +707,7 @@ class CategoryFileManager:
# Apply text-replacement patches to details field if provided
if detail_patches is not None:
current_details = existing_meta.get("details", "")
current_details = existing_details
for i, patch in enumerate(detail_patches):
old = patch.get("old_string", "")
new = patch.get("new_string", "")
@@ -693,14 +718,22 @@ class CategoryFileManager:
current_details = current_details.replace(old, new, 1)
details = current_details
# Update metadata
# Write details.md if details was updated
if details is not None:
try:
details_path.write_text(details)
log.info(f"Updated details.md: {details_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write details: {e}"}
# Update metadata (details always stored in details.md, never in metadata)
updated_meta = existing_meta.copy()
updated_meta.pop("details", None)
if description is not None:
updated_meta["description"] = description
if details is not None:
updated_meta["details"] = details
if metadata:
updated_meta.update(metadata)
meta_updates = {k: v for k, v in metadata.items() if k != "details"}
updated_meta.update(meta_updates)
# Validate and write metadata
try:
@@ -730,7 +763,7 @@ class CategoryFileManager:
if code is not None and result["success"]:
if cat == Category.RESEARCH:
log.info(f"Auto-executing research script after edit: {name}")
result["execution"] = await self.execute_research(name)
result["execution"] = await self.execute_research(name, commit=False)
elif cat == Category.INDICATOR:
log.info(f"Auto-executing indicator test after edit: {name}")
result["execution"] = await self._execute_indicator(item_dir)
@@ -778,9 +811,24 @@ class CategoryFileManager:
if meta_path.exists():
metadata = json.loads(meta_path.read_text())
# Read details from details.md; migrate from metadata.json if needed
details_path = item_dir / "details.md"
details = ""
if details_path.exists():
details = details_path.read_text()
elif metadata.get("details"):
details = metadata.pop("details")
try:
details_path.write_text(details)
meta_path.write_text(json.dumps(metadata, indent=2))
log.info(f"Migrated details to details.md for {item_dir.name}")
except Exception:
pass # migration failure is non-fatal
return {
"exists": True,
"code": code,
"details": details,
"metadata": metadata,
}
except Exception as e:
@@ -972,12 +1020,14 @@ class CategoryFileManager:
"images": data["images"],
}
async def execute_research(self, name: str) -> dict[str, Any]:
async def execute_research(self, name: str, commit: bool = True) -> dict[str, Any]:
"""
Execute a research script and return structured content with images.
Args:
name: Display name of the research script
commit: Whether to commit output files to git (default True; set False
when called from write()/edit() which commit everything together)
Returns:
dict with:
@@ -1040,10 +1090,139 @@ class CategoryFileManager:
log.error(f"execute_research '{name}': script failed with no output")
return {"error": "Research script execution failed"}
# Persist output to output/ subdir
if content:
output_dir = item_dir / "output"
output_dir.mkdir(exist_ok=True)
output_meta: dict[str, Any] = {}
if data.get("stdout"):
analysis_path = output_dir / "analysis.md"
try:
analysis_path.write_text(data["stdout"])
output_meta["analysis"] = "analysis.md"
except Exception as e:
log.warning(f"execute_research '{name}': failed to write analysis.md: {e}")
image_files = []
for i, img in enumerate(data.get("images", []), 1):
img_filename = f"img{i}.png"
img_path = output_dir / img_filename
try:
img_path.write_bytes(base64.b64decode(img["data"]))
image_files.append(img_filename)
except Exception as e:
log.warning(f"execute_research '{name}': failed to write {img_filename}: {e}")
if image_files:
output_meta["images"] = image_files
# Update metadata.json with output section
meta_path = item_dir / "metadata.json"
if meta_path.exists():
try:
meta = json.loads(meta_path.read_text())
meta["output"] = output_meta
meta_path.write_text(json.dumps(meta, indent=2))
except Exception as e:
log.warning(f"execute_research '{name}': failed to update metadata output: {e}")
# Commit output files
if commit:
try:
await self.git.commit_async(f"output(research): {name}")
except Exception as e:
log.warning(f"execute_research '{name}': git commit failed: {e}")
log.info(f"execute_research '{name}': returning {len(content)} content items")
return {"content": content}
def read_output(
self,
category: str,
name: str,
files: Optional[list[str]] = None
) -> dict[str, Any]:
"""
Read output files for a category item.
Args:
category: Category name
name: Display name of the item
files: Specific filenames under output/ to return (e.g. ["analysis.md", "img1.png"]).
If omitted, returns all output files listed in metadata.
Returns:
dict with:
- content: list of TextContent and ImageContent objects (MCP format)
- files_returned: list of filenames returned
- output_dir: str path to output directory
- error: str (if any)
"""
try:
cat = Category(category)
except ValueError:
return {"error": f"Invalid category '{category}'"}
item_dir = get_category_path(self.src_dir, cat, name)
if not item_dir.exists():
return {"error": f"Item '{name}' not found in '{category}'"}
output_dir = item_dir / "output"
if not output_dir.exists():
return {"error": f"No output directory for '{name}' — run the script first"}
# Determine which files to return
if files is None:
meta_path = item_dir / "metadata.json"
if meta_path.exists():
try:
meta = json.loads(meta_path.read_text())
output_meta = meta.get("output") or {}
files = []
if output_meta.get("analysis"):
files.append(output_meta["analysis"])
files.extend(output_meta.get("images") or [])
except Exception:
files = []
if not files:
# Fallback: return all files in output dir
files = [p.name for p in sorted(output_dir.iterdir()) if p.is_file()]
if not files:
return {"error": f"No output files found for '{name}'"}
from mcp.types import TextContent, ImageContent
content = []
files_returned = []
for filename in files:
file_path = output_dir / filename
if not file_path.exists():
log.warning(f"Output file not found: {file_path}")
continue
suffix = file_path.suffix.lower()
if suffix in ('.md', '.txt'):
text = file_path.read_text()
content.append(TextContent(type="text", text=text))
files_returned.append(filename)
elif suffix in ('.png', '.jpg', '.jpeg'):
data = base64.b64encode(file_path.read_bytes()).decode()
mime = "image/png" if suffix == '.png' else "image/jpeg"
content.append(ImageContent(type="image", data=data, mimeType=mime))
files_returned.append(filename)
else:
log.warning(f"Unsupported output file type: {filename}")
return {
"content": content,
"files_returned": files_returned,
"output_dir": str(output_dir),
}
async def delete(self, category: str, name: str) -> dict[str, Any]:
"""
Delete a category script directory and commit the removal to git.

View File

@@ -7,9 +7,9 @@ in the user container. These stores sync with the gateway and web client.
Storage location: {DATA_DIR}/workspace/{store_name}.json
Available tools:
- workspace_read(store_name) -> dict
- workspace_write(store_name, data) -> None
- workspace_patch(store_name, patch) -> dict
- WorkspaceRead(store_name) -> dict
- WorkspaceWrite(store_name, data) -> None
- WorkspacePatch(store_name, patch) -> dict
Future: Path-based triggers for container-side reactions to state changes.
"""
@@ -322,14 +322,14 @@ def register_workspace_tools(server):
@server.call_tool()
async def handle_tool_call(name: str, arguments: dict) -> Any:
"""Handle workspace tool calls."""
if name == "workspace_read":
if name == "WorkspaceRead":
return store.read(arguments.get("store_name", ""))
elif name == "workspace_write":
elif name == "WorkspaceWrite":
return store.write(
arguments.get("store_name", ""),
arguments.get("data")
)
elif name == "workspace_patch":
elif name == "WorkspacePatch":
return store.patch(
arguments.get("store_name", ""),
arguments.get("patch", [])
@@ -342,7 +342,7 @@ def register_workspace_tools(server):
"""List available workspace tools."""
return [
{
"name": "workspace_read",
"name": "WorkspaceRead",
"description": "Read a workspace store from persistent storage",
"inputSchema": {
"type": "object",
@@ -356,7 +356,7 @@ def register_workspace_tools(server):
}
},
{
"name": "workspace_write",
"name": "WorkspaceWrite",
"description": "Write a workspace store to persistent storage",
"inputSchema": {
"type": "object",
@@ -373,7 +373,7 @@ def register_workspace_tools(server):
}
},
{
"name": "workspace_patch",
"name": "WorkspacePatch",
"description": "Apply JSON patch operations to a workspace store",
"inputSchema": {
"type": "object",

View File

@@ -158,7 +158,7 @@ def _remove_indicator_instances(workspace_store, pandas_ta_name: str) -> None:
def _workspace_sync_content(workspace_store, category: str) -> "TextContent | None":
"""
Return a TextContent item carrying the current {category}_types workspace state so the
gateway can sync it to connected web clients without a separate workspace_patch call.
gateway can sync it to connected web clients without a separate WorkspacePatch call.
The gateway detects items of the form {"_workspace_sync": {"store": ..., "data": ...}}.
"""
store = _type_store_name(category)
@@ -304,7 +304,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
@server.list_resources()
async def list_resources():
"""List available resources"""
return [
resources = [
{
"uri": f"dexorder://user/{config.user_id}/hello",
"name": "Hello World",
@@ -312,6 +312,14 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"mimeType": "text/plain",
}
]
if _get_env_yml() is not None:
resources.append({
"uri": f"dexorder://user/{config.user_id}/environment.yml",
"name": "Conda Environment",
"description": "Base conda environment packages available in all scripts",
"mimeType": "text/yaml",
})
return resources
@server.read_resource()
async def read_resource(uri: str):
@@ -332,6 +340,15 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"mimeType": "text/plain",
"text": f"Hello from Dexorder user container!\nUser ID: {config.user_id}\n",
}
elif uri == f"dexorder://user/{config.user_id}/environment.yml":
env_yml = _get_env_yml()
if env_yml is None:
raise ValueError("environment.yml not found")
return {
"uri": uri,
"mimeType": "text/yaml",
"text": env_yml.read_text(encoding="utf-8"),
}
else:
raise ValueError(f"Unknown resource: {uri}")
@@ -340,7 +357,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"""List available tools including workspace and category tools"""
return [
Tool(
name="workspace_read",
name="WorkspaceRead",
description="Read a workspace store from persistent storage",
inputSchema={
"type": "object",
@@ -354,7 +371,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="workspace_write",
name="WorkspaceWrite",
description="Write a workspace store to persistent storage",
inputSchema={
"type": "object",
@@ -371,7 +388,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="workspace_patch",
name="WorkspacePatch",
description="Apply JSON patch operations to a workspace store",
inputSchema={
"type": "object",
@@ -398,7 +415,47 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="python_write",
name="PreferencesRead",
description="Read the user preferences markdown file. Returns the full content of preferences.md from the user's sandbox data directory.",
inputSchema={
"type": "object",
"properties": {}
}
),
Tool(
name="PreferencesWrite",
description="Write (fully replace) the user preferences markdown file. Use this to create or overwrite preferences.md with new content.",
inputSchema={
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "Full markdown content for the preferences file"
}
},
"required": ["content"]
}
),
Tool(
name="PreferencesPatch",
description="Surgically update a section of the user preferences markdown file by finding and replacing text. Fails if old_str is not found.",
inputSchema={
"type": "object",
"properties": {
"old_str": {
"type": "string",
"description": "Exact text to find in the preferences file"
},
"new_str": {
"type": "string",
"description": "Replacement text"
}
},
"required": ["old_str", "new_str"]
}
),
Tool(
name="PythonWrite",
description="Write a new strategy, indicator, or research script with validation",
inputSchema={
"type": "object",
@@ -422,7 +479,8 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"Full markdown description of the code with sufficient detail that another coding agent "
"could functionally reproduce the implementation from this field alone. "
"Include: purpose, algorithm, all parameters and their semantics, data feed usage, "
"formulas, edge cases, and any non-obvious implementation choices (required)."
"formulas, edge cases, and any non-obvious implementation choices (required). "
"Stored as a separate details.md file alongside the implementation."
)
},
"code": {
@@ -446,7 +504,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="python_edit",
name="PythonEdit",
description=(
"Edit an existing category script. "
"Use 'patches' for targeted string replacements (preferred for small changes), "
@@ -525,8 +583,8 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="python_read",
description="Read a category script and its metadata",
name="PythonRead",
description="Read a category script, its metadata, and details. Returns: code, details (markdown), and metadata.",
inputSchema={
"type": "object",
"properties": {
@@ -544,7 +602,38 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="python_list",
name="PythonReadOutput",
description=(
"Read persisted output files from a previous research script execution. "
"Returns TextContent for .md/.txt files and ImageContent for images. "
"Output is saved automatically when ExecuteResearch or PythonWrite/PythonEdit runs a research script."
),
inputSchema={
"type": "object",
"properties": {
"category": {
"type": "string",
"enum": ["strategy", "indicator", "research"],
"description": "Category of the script"
},
"name": {
"type": "string",
"description": "Display name of the item"
},
"files": {
"type": "array",
"items": {"type": "string"},
"description": (
"Specific filenames under output/ to return (e.g. [\"analysis.md\", \"img1.png\"]). "
"If omitted, returns all output files listed in metadata."
)
}
},
"required": ["category", "name"]
}
),
Tool(
name="PythonList",
description="List all items in a category with names and descriptions",
inputSchema={
"type": "object",
@@ -559,7 +648,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="python_log",
name="PythonLog",
description="Show git commit history for category items. Filter by category and/or name to see history for a specific item.",
inputSchema={
"type": "object",
@@ -583,7 +672,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="python_revert",
name="PythonRevert",
description="Restore a category item to a previous git revision. Creates a new commit — non-destructive.",
inputSchema={
"type": "object",
@@ -606,7 +695,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="python_delete",
name="PythonDelete",
description="Delete a category script permanently. Commits removal to git history and removes any conda packages that are no longer needed.",
inputSchema={
"type": "object",
@@ -625,7 +714,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="conda_sync",
name="CondaSync",
description="Sync conda packages: scan all metadata, remove unused packages (excluding base environment)",
inputSchema={
"type": "object",
@@ -634,7 +723,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="conda_install",
name="CondaInstall",
description="Install conda packages on-demand",
inputSchema={
"type": "object",
@@ -649,7 +738,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="execute_research",
name="ExecuteResearch",
description="Execute a research script and return results with matplotlib images",
inputSchema={
"type": "object",
@@ -663,7 +752,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="evaluate_indicator",
name="EvaluateIndicator",
description=(
"Evaluate a pandas-ta indicator against real OHLC data and return a structured "
"array of timestamped values. Use this to validate that an indicator computes "
@@ -701,7 +790,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="backtest_strategy",
name="BacktestStrategy",
description=(
"Run a saved trading strategy against historical OHLC data using Nautilus Trader "
"BacktestEngine. Returns performance metrics (total return, Sharpe ratio, "
@@ -714,7 +803,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"properties": {
"strategy_name": {
"type": "string",
"description": "Display name of the strategy as saved via python_write"
"description": "Display name of the strategy as saved via PythonWrite"
},
"feeds": {
"type": "array",
@@ -757,7 +846,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="activate_strategy",
name="ActivateStrategy",
description=(
"Activate a strategy for paper or live forward trading with a capital allocation. "
"paper=true (default): simulated fills on live data — no API keys required. "
@@ -769,7 +858,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"properties": {
"strategy_name": {
"type": "string",
"description": "Display name of the strategy as saved via python_write"
"description": "Display name of the strategy as saved via PythonWrite"
},
"feeds": {
"type": "array",
@@ -798,7 +887,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="deactivate_strategy",
name="DeactivateStrategy",
description="Stop an active strategy and return its final P&L summary.",
inputSchema={
"type": "object",
@@ -812,7 +901,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="list_active_strategies",
name="ListActiveStrategies",
description="List all currently active (live or paper) strategies and their status.",
inputSchema={
"type": "object",
@@ -821,7 +910,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="get_backtest_results",
name="GetBacktestResults",
description=(
"Retrieve stored backtest results for a strategy. "
"Returns the most recent backtest runs with summary stats, "
@@ -844,7 +933,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="get_strategy_trades",
name="GetStrategyTrades",
description=(
"Retrieve the trade log for a strategy (live/paper or backtest). "
"Returns individual round-trip trades with entry/exit prices and PnL."
@@ -866,7 +955,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
),
Tool(
name="get_strategy_events",
name="GetStrategyEvents",
description=(
"Retrieve the event log for a strategy "
"(PnL updates, fills, errors, status changes)."
@@ -905,19 +994,38 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
raise
async def _handle_tool_call_inner(name: str, arguments: dict):
if name == "workspace_read":
if name == "WorkspaceRead":
return workspace_store.read(arguments.get("store_name", ""))
elif name == "workspace_write":
elif name == "WorkspaceWrite":
return workspace_store.write(
arguments.get("store_name", ""),
arguments.get("data")
)
elif name == "workspace_patch":
elif name == "WorkspacePatch":
return workspace_store.patch(
arguments.get("store_name", ""),
arguments.get("patch", [])
)
elif name == "python_write":
elif name == "PreferencesRead":
prefs_path = DATA_DIR / "preferences.md"
if not prefs_path.exists():
return {"content": "", "exists": False}
content = prefs_path.read_text(encoding="utf-8")
return {"content": content, "exists": True}
elif name == "PreferencesWrite":
prefs_path = DATA_DIR / "preferences.md"
prefs_path.write_text(arguments.get("content", ""), encoding="utf-8")
return {"success": True}
elif name == "PreferencesPatch":
prefs_path = DATA_DIR / "preferences.md"
old_str = arguments.get("old_str", "")
new_str = arguments.get("new_str", "")
content = prefs_path.read_text(encoding="utf-8") if prefs_path.exists() else ""
if old_str not in content:
return {"success": False, "error": "old_str not found in preferences file"}
prefs_path.write_text(content.replace(old_str, new_str, 1), encoding="utf-8")
return {"success": True}
elif name == "PythonWrite":
result = await category_manager.write(
category=arguments.get("category", ""),
name=arguments.get("name", ""),
@@ -941,9 +1049,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
exec_content = result["execution"].get("content", [])
content.extend(exec_content)
image_count = sum(1 for item in exec_content if item.type == "image")
logging.info(f"python_write '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
logging.info(f"PythonWrite '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
else:
logging.info(f"python_write '{arguments.get('name')}': no execution result (category={arguments.get('category')})")
logging.info(f"PythonWrite '{arguments.get('name')}': no execution result (category={arguments.get('category')})")
if result.get("success"):
_upsert_type(workspace_store, category_manager, arguments.get("category", ""), arguments.get("name", ""))
await cleanup_extra_packages_async(get_data_dir(), _get_env_yml())
@@ -951,7 +1059,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
if sync:
content.append(sync)
return content
elif name == "python_edit":
elif name == "PythonEdit":
result = await category_manager.edit(
category=arguments.get("category", ""),
name=arguments.get("name", ""),
@@ -977,9 +1085,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
exec_content = result["execution"].get("content", [])
content.extend(exec_content)
image_count = sum(1 for item in exec_content if item.type == "image")
logging.info(f"python_edit '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
logging.info(f"PythonEdit '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
else:
logging.info(f"python_edit '{arguments.get('name')}': no execution result")
logging.info(f"PythonEdit '{arguments.get('name')}': no execution result")
if result.get("success"):
_upsert_type(workspace_store, category_manager, arguments.get("category", ""), arguments.get("name", ""))
await cleanup_extra_packages_async(get_data_dir(), _get_env_yml())
@@ -987,16 +1095,30 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
if sync:
content.append(sync)
return content
elif name == "python_read":
elif name == "PythonRead":
return category_manager.read(
category=arguments.get("category", ""),
name=arguments.get("name", "")
)
elif name == "python_list":
elif name == "PythonReadOutput":
result = category_manager.read_output(
category=arguments.get("category", ""),
name=arguments.get("name", ""),
files=arguments.get("files"),
)
if "error" in result:
return [TextContent(type="text", text=f"Error: {result['error']}")]
content = result.get("content", [])
summary = TextContent(
type="text",
text=f"output_dir: {result.get('output_dir', '')}\nfiles_returned: {result.get('files_returned', [])}"
)
return [summary] + content
elif name == "PythonList":
return category_manager.list_items(
category=arguments.get("category", "")
)
elif name == "python_log":
elif name == "PythonLog":
result = await category_manager.git_log(
category=arguments.get("category"),
name=arguments.get("name"),
@@ -1006,7 +1128,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
for c in result.get("commits", []):
lines.append(f"{c['short_hash']} {c['date'][:10]} {c['message']}")
return [TextContent(type="text", text="\n".join(lines))]
elif name == "python_revert":
elif name == "PythonRevert":
result = await category_manager.git_revert(
revision=arguments.get("revision", ""),
category=arguments.get("category", ""),
@@ -1027,7 +1149,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
content_out.append(sync)
return content_out
return [TextContent(type="text", text="\n".join(meta_parts))]
elif name == "python_delete":
elif name == "PythonDelete":
result = await category_manager.delete(
category=arguments.get("category", ""),
name=arguments.get("name", "")
@@ -1047,23 +1169,23 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
if sync:
content_out.append(sync)
return content_out
elif name == "conda_sync":
elif name == "CondaSync":
return await sync_packages_async(
data_dir=get_data_dir(),
environment_yml=_get_env_yml()
)
elif name == "conda_install":
elif name == "CondaInstall":
return await install_packages_async(arguments.get("packages", []))
elif name == "execute_research":
elif name == "ExecuteResearch":
result = await category_manager.execute_research(name=arguments.get("name", ""))
if "error" in result:
logging.error(f"execute_research '{arguments.get('name')}': {result['error']}")
logging.error(f"ExecuteResearch '{arguments.get('name')}': {result['error']}")
return [TextContent(type="text", text=f"Error: {result['error']}")]
content = result.get("content", [TextContent(type="text", text="No output")])
image_count = sum(1 for item in content if item.type == "image")
logging.info(f"execute_research '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
logging.info(f"ExecuteResearch '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
return content
elif name == "evaluate_indicator":
elif name == "EvaluateIndicator":
return await evaluate_indicator(
symbol=arguments.get("symbol", ""),
from_time=arguments.get("from_time"),
@@ -1072,7 +1194,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
pandas_ta_name=arguments.get("pandas_ta_name", ""),
parameters=arguments.get("parameters") or {},
)
elif name == "backtest_strategy":
elif name == "BacktestStrategy":
result = await backtest_strategy(
strategy_name=arguments.get("strategy_name", ""),
feeds=arguments.get("feeds", []),
@@ -1101,20 +1223,20 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
except Exception as _e:
logging.debug("Failed to persist backtest results: %s", _e)
return result
elif name == "activate_strategy":
elif name == "ActivateStrategy":
return await activate_strategy(
strategy_name=arguments.get("strategy_name", ""),
feeds=arguments.get("feeds", []),
allocation=float(arguments.get("allocation", 0.0)),
paper=bool(arguments.get("paper", True)),
)
elif name == "deactivate_strategy":
elif name == "DeactivateStrategy":
return await deactivate_strategy(
strategy_name=arguments.get("strategy_name", ""),
)
elif name == "list_active_strategies":
elif name == "ListActiveStrategies":
return await list_active_strategies()
elif name == "get_backtest_results":
elif name == "GetBacktestResults":
from dexorder.strategy.db import get_strategy_db
db = get_strategy_db(get_data_dir())
results = await db.get_backtests(
@@ -1122,7 +1244,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
limit=int(arguments.get("limit", 5)),
)
return [TextContent(type="text", text=json.dumps({"backtest_runs": results}))]
elif name == "get_strategy_trades":
elif name == "GetStrategyTrades":
from dexorder.strategy.db import get_strategy_db
db = get_strategy_db(get_data_dir())
trades = await db.get_trades(
@@ -1130,7 +1252,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
limit=int(arguments.get("limit", 100)),
)
return [TextContent(type="text", text=json.dumps({"trades": trades}))]
elif name == "get_strategy_events":
elif name == "GetStrategyEvents":
from dexorder.strategy.db import get_strategy_db
db = get_strategy_db(get_data_dir())
events = await db.get_events(

View File

@@ -1,5 +1,5 @@
<script setup lang="ts">
import { ref, onMounted, onBeforeUnmount, watch } from 'vue'
import { ref, computed, onMounted, onBeforeUnmount, watch } from 'vue'
import ChartView from './components/ChartView.vue'
import ChatPanel from './components/ChatPanel.vue'
import LoginScreen from './components/LoginScreen.vue'
@@ -20,44 +20,48 @@ const authError = ref<string>()
const isMobile = ref(false)
let stateSyncCleanup: (() => void) | null = null
// Horizontal split: chart width in pixels (initialized on mount)
// Horizontal split: chat width in pixels (fixed, anchored to right edge)
const CHART_MIN_PX = 300
const CHAT_MIN_PX = 240
const CHART_DEFAULT_RATIO = 0.62
const chartWidth = ref(0)
const CHAT_DEFAULT_RATIO = 0.38
const chatWidth = ref(0)
const windowWidth = ref(0)
let hDragStartX = 0
let hDragStartWidth = 0
function initChartWidth() {
chartWidth.value = Math.round(window.innerWidth * CHART_DEFAULT_RATIO)
const chartVisible = computed(() => windowWidth.value - chatWidth.value - 4 >= CHART_MIN_PX)
function initChatWidth() {
windowWidth.value = window.innerWidth
chatWidth.value = Math.round(window.innerWidth * CHAT_DEFAULT_RATIO)
}
function startHDrag(e: PointerEvent) {
e.preventDefault()
;(e.target as HTMLElement).setPointerCapture(e.pointerId)
hDragStartX = e.clientX
hDragStartWidth = chartWidth.value
hDragStartWidth = chatWidth.value
}
function onHDragMove(e: PointerEvent) {
if (!e.buttons) return
const delta = e.clientX - hDragStartX
const maxWidth = window.innerWidth - CHAT_MIN_PX - 4
chartWidth.value = Math.max(CHART_MIN_PX, Math.min(maxWidth, hDragStartWidth + delta))
// dragging right shrinks chat; dragging left grows chat
const delta = hDragStartX - e.clientX
const maxChatWidth = windowWidth.value - CHART_MIN_PX - 4
chatWidth.value = Math.max(CHAT_MIN_PX, Math.min(Math.max(maxChatWidth, CHAT_MIN_PX), hDragStartWidth + delta))
}
// Clamp chartWidth so chart + chat always fit within the window
function clampChartWidth() {
const maxWidth = window.innerWidth - CHAT_MIN_PX - 4
if (maxWidth >= CHART_MIN_PX) {
chartWidth.value = Math.max(CHART_MIN_PX, Math.min(maxWidth, chartWidth.value))
}
// Clamp chatWidth so chat stays within valid range on resize
function clampChatWidth() {
windowWidth.value = window.innerWidth
const maxChatWidth = windowWidth.value - CHART_MIN_PX - 4
chatWidth.value = Math.max(CHAT_MIN_PX, Math.min(Math.max(maxChatWidth, CHAT_MIN_PX), chatWidth.value))
}
// Check screen width for mobile layout
const checkMobile = () => {
isMobile.value = window.innerWidth < 768
if (!isMobile.value) clampChartWidth()
if (!isMobile.value) clampChatWidth()
}
const chartStore = useChartStore()
@@ -74,7 +78,7 @@ watch(isMobile, (mobile) => {
// Check if user is already authenticated on page load
onMounted(async () => {
initChartWidth()
initChatWidth()
checkMobile()
window.addEventListener('resize', checkMobile)
@@ -149,11 +153,11 @@ onBeforeUnmount(() => {
/>
<div v-else-if="!isMobile" class="desktop-layout">
<div class="top-area">
<div class="chart-panel" :style="{ width: chartWidth + 'px' }">
<div v-if="chartVisible" class="chart-panel">
<ChartView />
</div>
<div class="h-grabber" @pointerdown="startHDrag" @pointermove="onHDragMove" />
<div class="chat-panel">
<div v-if="chartVisible" class="h-grabber" @pointerdown="startHDrag" @pointermove="onHDragMove" />
<div class="chat-panel" :style="{ width: chatWidth + 'px' }">
<ChatPanel />
</div>
</div>
@@ -189,7 +193,8 @@ onBeforeUnmount(() => {
}
.chart-panel {
flex-shrink: 0;
flex: 1;
min-width: 0;
height: 100%;
overflow: hidden;
display: flex;
@@ -209,8 +214,7 @@ onBeforeUnmount(() => {
}
.chat-panel {
flex: 1;
min-width: 240px;
flex-shrink: 0;
height: 100%;
overflow: hidden;
display: flex;

View File

@@ -1,6 +1,7 @@
<script setup lang="ts">
import { ref } from 'vue'
import DetailsEditDialog from './DetailsEditDialog.vue'
import ResearchViewDialog from './ResearchViewDialog.vue'
const props = defineProps<{
category: 'indicator' | 'strategy' | 'research'
@@ -10,11 +11,19 @@ const props = defineProps<{
const dialogVisible = ref(false)
const editingName = ref('')
const viewDialogVisible = ref(false)
const viewingName = ref('')
function openEdit(name: string) {
editingName.value = name
dialogVisible.value = true
}
function openView(name: string) {
viewingName.value = name
viewDialogVisible.value = true
}
function onUpdated(_payload: { category: string; name: string; success: boolean; error?: string }) {
// Hook for handling the details_updated response — add logic here as needed
}
@@ -26,10 +35,18 @@ function onUpdated(_payload: { category: string; name: string; success: boolean;
<div v-for="row in rows" :key="row.id" class="item-row">
<span class="item-name">{{ row.display_name }}</span>
<span class="item-desc">{{ row.description ?? '' }}</span>
<button v-if="category === 'research'" class="view-btn" @click="openView(row.display_name)">View</button>
<button class="edit-btn" @click="openEdit(row.display_name)">Edit</button>
</div>
</div>
<ResearchViewDialog
v-if="category === 'research'"
v-model:visible="viewDialogVisible"
:category="category"
:name="viewingName"
/>
<DetailsEditDialog
v-model:visible="dialogVisible"
:category="category"
@@ -88,6 +105,23 @@ function onUpdated(_payload: { category: string; name: string; success: boolean;
mask-image: linear-gradient(to right, black calc(100% - 48px), transparent 100%);
}
.view-btn {
flex-shrink: 0;
background: none;
border: 1px solid #3d3d3d;
color: #888;
cursor: pointer;
font-size: 11px;
padding: 2px 8px;
border-radius: 3px;
line-height: 18px;
}
.view-btn:hover {
border-color: #4a9eca;
color: #4a9eca;
}
.edit-btn {
flex-shrink: 0;
background: none;

View File

@@ -131,6 +131,93 @@ function close() {
<i class="pi pi-exclamation-triangle" /> {{ loadError }}
</div>
<div class="editor-wrap">
<div v-if="editor" class="toolbar">
<div class="toolbar-group">
<button
class="tb-btn"
:class="{ active: editor.isActive('bold') }"
title="Bold"
@click="editor.chain().focus().toggleBold().run()"
><b>B</b></button>
<button
class="tb-btn"
:class="{ active: editor.isActive('italic') }"
title="Italic"
@click="editor.chain().focus().toggleItalic().run()"
><i>I</i></button>
<button
class="tb-btn"
:class="{ active: editor.isActive('strike') }"
title="Strikethrough"
@click="editor.chain().focus().toggleStrike().run()"
><s>S</s></button>
</div>
<div class="toolbar-sep" />
<div class="toolbar-group">
<button
class="tb-btn"
:class="{ active: editor.isActive('heading', { level: 1 }) }"
title="Heading 1"
@click="editor.chain().focus().toggleHeading({ level: 1 }).run()"
>H1</button>
<button
class="tb-btn"
:class="{ active: editor.isActive('heading', { level: 2 }) }"
title="Heading 2"
@click="editor.chain().focus().toggleHeading({ level: 2 }).run()"
>H2</button>
<button
class="tb-btn"
:class="{ active: editor.isActive('heading', { level: 3 }) }"
title="Heading 3"
@click="editor.chain().focus().toggleHeading({ level: 3 }).run()"
>H3</button>
</div>
<div class="toolbar-sep" />
<div class="toolbar-group">
<button
class="tb-btn"
:class="{ active: editor.isActive('bulletList') }"
title="Bullet list"
@click="editor.chain().focus().toggleBulletList().run()"
><i class="pi pi-list" /></button>
<button
class="tb-btn"
:class="{ active: editor.isActive('orderedList') }"
title="Ordered list"
@click="editor.chain().focus().toggleOrderedList().run()"
><i class="pi pi-sort-amount-down" /></button>
</div>
<div class="toolbar-sep" />
<div class="toolbar-group">
<button
class="tb-btn"
:class="{ active: editor.isActive('code') }"
title="Inline code"
@click="editor.chain().focus().toggleCode().run()"
><code style="font-size:11px">`c`</code></button>
<button
class="tb-btn"
:class="{ active: editor.isActive('codeBlock') }"
title="Code block"
@click="editor.chain().focus().toggleCodeBlock().run()"
><i class="pi pi-code" /></button>
<button
class="tb-btn"
:class="{ active: editor.isActive('blockquote') }"
title="Blockquote"
@click="editor.chain().focus().toggleBlockquote().run()"
>&ldquo;&rdquo;</button>
</div>
<div class="toolbar-sep" />
<div class="toolbar-group">
<button
class="tb-btn"
title="Horizontal rule"
@click="editor.chain().focus().setHorizontalRule().run()"
></button>
</div>
</div>
<EditorContent :editor="editor" class="tiptap-editor" />
</div>
</template>
@@ -206,6 +293,59 @@ function close() {
overflow: auto;
background: #0d0d0d;
min-height: 300px;
display: flex;
flex-direction: column;
}
.toolbar {
display: flex;
align-items: center;
gap: 2px;
padding: 5px 8px;
border-bottom: 1px solid #2e2e2e;
background: #111;
flex-shrink: 0;
flex-wrap: wrap;
}
.toolbar-group {
display: flex;
align-items: center;
gap: 1px;
}
.toolbar-sep {
width: 1px;
height: 16px;
background: #2e2e2e;
margin: 0 4px;
}
.tb-btn {
background: none;
border: none;
color: #8a8a8a;
cursor: pointer;
padding: 3px 7px;
border-radius: 3px;
font-size: 12px;
font-family: inherit;
line-height: 1;
min-width: 26px;
display: flex;
align-items: center;
justify-content: center;
transition: background 0.1s, color 0.1s;
}
.tb-btn:hover {
background: #1e1e1e;
color: #dbdbdb;
}
.tb-btn.active {
background: #1e3a3f;
color: #89d4e0;
}
.confirm-overlay {
@@ -245,6 +385,11 @@ function close() {
<style>
/* Global (unscoped) so TipTap's .ProseMirror gets styled */
.details-dialog .tiptap-editor {
flex: 1;
overflow: auto;
}
.details-dialog .tiptap-editor .ProseMirror {
padding: 12px 14px;
outline: none;

View File

@@ -0,0 +1,294 @@
<script setup lang="ts">
import { ref, watch, onUnmounted } from 'vue'
import Dialog from 'primevue/dialog'
import { useEditor, EditorContent } from '@tiptap/vue-3'
import StarterKit from '@tiptap/starter-kit'
import { Markdown } from 'tiptap-markdown'
import { wsManager, type WebSocketMessage } from '../composables/useWebSocket'
const props = defineProps<{
visible: boolean
category: string
name: string
}>()
const emit = defineEmits<{
'update:visible': [value: boolean]
}>()
type LoadState = 'idle' | 'loading' | 'ready' | 'error' | 'empty'
const loadState = ref<LoadState>('idle')
const loadError = ref('')
const images = ref<Array<{ mimeType: string; data: string }>>([])
const lightboxSrc = ref<string | null>(null)
// Keep editable:true so setContent works reliably; read-only appearance is via CSS
const editor = useEditor({
extensions: [
StarterKit,
Markdown.configure({ html: false, transformPastedText: false }),
],
content: '',
})
watch(() => props.visible, (v) => {
if (v) {
loadState.value = 'loading'
loadError.value = ''
images.value = []
lightboxSrc.value = null
editor.value?.commands.setContent('')
wsManager.send({ type: 'read_output', category: props.category, name: props.name })
} else {
loadState.value = 'idle'
images.value = []
lightboxSrc.value = null
editor.value?.commands.setContent('')
}
})
const messageHandler = (msg: WebSocketMessage) => {
if (msg.category !== props.category || msg.name !== props.name) return
if (msg.type === 'output_data') {
const hasAnalysis = !!msg.analysis
const hasImages = Array.isArray(msg.images) && msg.images.length > 0
if (!hasAnalysis && !hasImages) {
loadState.value = 'empty'
return
}
if (hasAnalysis) {
editor.value?.commands.setContent(msg.analysis ?? '')
}
images.value = hasImages ? msg.images : []
loadState.value = 'ready'
} else if (msg.type === 'output_error') {
loadError.value = msg.error ?? 'Failed to load output'
loadState.value = 'error'
}
}
wsManager.addHandler(messageHandler)
onUnmounted(() => wsManager.removeHandler(messageHandler))
function openLightbox(src: string) {
lightboxSrc.value = src
}
function closeLightbox() {
lightboxSrc.value = null
}
</script>
<template>
<Dialog
:visible="visible"
:header="`Output — ${name}`"
:modal="true"
:closable="true"
:style="{ width: '800px', maxWidth: '95vw' }"
class="research-view-dialog"
@update:visible="emit('update:visible', false)"
>
<div class="dialog-body">
<div v-if="loadState === 'loading'" class="state-msg">
<i class="pi pi-spin pi-spinner" /> Loading output
</div>
<div v-else-if="loadState === 'error'" class="state-msg error">
<i class="pi pi-exclamation-triangle" /> {{ loadError }}
</div>
<div v-else-if="loadState === 'empty'" class="state-msg muted">
No output yet run the script first.
</div>
<div v-else-if="loadState === 'ready'" class="content-scroll">
<img
v-for="(img, idx) in images"
:key="idx"
:src="`data:${img.mimeType};base64,${img.data}`"
class="output-image"
:alt="`Chart ${idx + 1}`"
@click="openLightbox(`data:${img.mimeType};base64,${img.data}`)"
/>
<EditorContent v-if="editor" :editor="editor" class="tiptap-viewer" />
</div>
</div>
<!-- Lightbox overlay -->
<Teleport to="body">
<div v-if="lightboxSrc" class="lightbox-overlay" @click="closeLightbox">
<img :src="lightboxSrc" class="lightbox-img" alt="Full size" @click.stop />
<button class="lightbox-close" @click="closeLightbox">
<i class="pi pi-times" />
</button>
</div>
</Teleport>
</Dialog>
</template>
<style scoped>
.dialog-body {
min-height: 200px;
display: flex;
flex-direction: column;
}
.state-msg {
flex: 1;
display: flex;
align-items: center;
justify-content: center;
gap: 8px;
color: #8a8a8a;
font-size: 13px;
min-height: 200px;
}
.state-msg.error { color: #e06c6c; }
.state-msg.muted { color: #555; }
.content-scroll {
overflow-y: auto;
max-height: 65vh;
display: flex;
flex-direction: column;
gap: 16px;
padding-bottom: 4px;
}
.output-image {
max-width: 100%;
border-radius: 4px;
border: 1px solid #2e2e2e;
display: block;
cursor: zoom-in;
transition: border-color 0.15s;
}
.output-image:hover {
border-color: #4a9eca;
}
.lightbox-overlay {
position: fixed;
inset: 0;
background: rgba(0, 0, 0, 0.88);
display: flex;
align-items: center;
justify-content: center;
z-index: 9999;
cursor: zoom-out;
}
.lightbox-img {
max-width: 92vw;
max-height: 92vh;
border-radius: 4px;
box-shadow: 0 8px 40px rgba(0, 0, 0, 0.6);
cursor: default;
}
.lightbox-close {
position: absolute;
top: 16px;
right: 20px;
background: rgba(30, 30, 30, 0.9);
border: 1px solid #3d3d3d;
color: #aaa;
border-radius: 50%;
width: 32px;
height: 32px;
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
font-size: 13px;
}
.lightbox-close:hover {
color: #fff;
border-color: #666;
}
</style>
<style>
/* Global so TipTap's .ProseMirror gets styled */
.research-view-dialog .tiptap-viewer .ProseMirror {
padding: 12px 14px;
outline: none;
font-size: 13px;
line-height: 1.6;
color: #dbdbdb;
font-family: 'JetBrains Mono', 'Fira Code', 'Cascadia Code', monospace;
/* Read-only appearance without disabling setContent */
caret-color: transparent;
cursor: default;
}
.research-view-dialog .tiptap-viewer .ProseMirror h1,
.research-view-dialog .tiptap-viewer .ProseMirror h2,
.research-view-dialog .tiptap-viewer .ProseMirror h3 {
color: #eaeaea;
margin: 0.8em 0 0.4em;
font-size: 1.1em;
font-weight: 700;
letter-spacing: 0.01em;
}
.research-view-dialog .tiptap-viewer .ProseMirror h1 { font-size: 1.3em; }
.research-view-dialog .tiptap-viewer .ProseMirror h2 { font-size: 1.15em; }
.research-view-dialog .tiptap-viewer .ProseMirror p { margin: 0 0 0.6em; }
.research-view-dialog .tiptap-viewer .ProseMirror strong {
color: #eaeaea;
font-weight: 700;
}
.research-view-dialog .tiptap-viewer .ProseMirror em {
color: #bbb;
font-style: italic;
}
.research-view-dialog .tiptap-viewer .ProseMirror code {
background: #1a1a1a;
border-radius: 3px;
padding: 1px 5px;
font-size: 0.9em;
color: #89d4e0;
}
.research-view-dialog .tiptap-viewer .ProseMirror pre {
background: #141414;
border: 1px solid #2e2e2e;
border-radius: 4px;
padding: 10px 14px;
overflow-x: auto;
margin: 0.6em 0;
}
.research-view-dialog .tiptap-viewer .ProseMirror pre code {
background: none;
padding: 0;
color: #dbdbdb;
}
.research-view-dialog .tiptap-viewer .ProseMirror ul,
.research-view-dialog .tiptap-viewer .ProseMirror ol {
padding-left: 1.4em;
margin: 0.4em 0;
}
.research-view-dialog .tiptap-viewer .ProseMirror blockquote {
border-left: 3px solid #3d3d3d;
margin: 0.6em 0;
padding-left: 12px;
color: #8a8a8a;
}
.research-view-dialog .tiptap-viewer .ProseMirror hr {
border: none;
border-top: 1px solid #2e2e2e;
margin: 1em 0;
}
</style>

View File

@@ -553,6 +553,7 @@ export function useCustomIndicators(tvWidget: any) {
// ------------------------------------------------------------------
async function registerCustomStudy(indicator: IndicatorInstance) {
const meta = indicator.custom_metadata
?? indicatorTypesStore.types[indicator.pandas_ta_name]?.metadata
if (!meta) {
console.warn('[CustomIndicators] No custom_metadata on indicator:', indicator.id)
return