data fixes, partial custom indicator support
This commit is contained in:
@@ -10,6 +10,7 @@ import type { SymbolIndexService } from '../services/symbol-index-service.js';
|
||||
import type { ContainerManager } from '../k8s/container-manager.js';
|
||||
import {
|
||||
WorkspaceManager,
|
||||
ContainerSync,
|
||||
DEFAULT_STORES,
|
||||
type ChannelAdapter,
|
||||
type ChannelCapabilities,
|
||||
@@ -120,15 +121,6 @@ export class WebSocketHandler {
|
||||
|
||||
sendStatus(socket, 'initializing', 'Starting your workspace...');
|
||||
|
||||
// Create workspace manager for this session
|
||||
const workspace = new WorkspaceManager({
|
||||
userId: authContext.userId,
|
||||
sessionId: authContext.sessionId,
|
||||
stores: DEFAULT_STORES,
|
||||
// containerSync will be added when MCP client is implemented
|
||||
logger,
|
||||
});
|
||||
|
||||
// Create WebSocket channel adapter
|
||||
const wsAdapter: ChannelAdapter = {
|
||||
sendSnapshot: (msg: SnapshotMessage) => {
|
||||
@@ -174,31 +166,47 @@ export class WebSocketHandler {
|
||||
}),
|
||||
};
|
||||
|
||||
// Declare harness outside try block so it's available in catch
|
||||
// Declare harness and workspace outside try block so they're available in catch
|
||||
let harness: AgentHarness | undefined;
|
||||
let workspace: WorkspaceManager | undefined;
|
||||
|
||||
try {
|
||||
// Initialize workspace first
|
||||
await workspace.initialize();
|
||||
workspace.setAdapter(wsAdapter);
|
||||
this.workspaces.set(authContext.sessionId, workspace);
|
||||
|
||||
// Create agent harness via factory (storage deps injected by factory)
|
||||
// Create and connect harness first so MCP client is available for ContainerSync
|
||||
harness = this.config.createHarness({
|
||||
userId: authContext.userId,
|
||||
sessionId: authContext.sessionId,
|
||||
license: authContext.license,
|
||||
mcpServerUrl: authContext.mcpServerUrl,
|
||||
logger,
|
||||
workspaceManager: workspace,
|
||||
channelAdapter: wsAdapter,
|
||||
channelType: authContext.channelType,
|
||||
channelUserId: authContext.channelUserId,
|
||||
});
|
||||
|
||||
await harness.initialize();
|
||||
|
||||
// Wire ContainerSync now that MCP client is connected, then initialize workspace
|
||||
const containerSync = new ContainerSync(harness.getMcpClient(), logger);
|
||||
workspace = new WorkspaceManager({
|
||||
userId: authContext.userId,
|
||||
sessionId: authContext.sessionId,
|
||||
stores: DEFAULT_STORES,
|
||||
containerSync,
|
||||
logger,
|
||||
});
|
||||
|
||||
await workspace.initialize();
|
||||
workspace.setAdapter(wsAdapter);
|
||||
harness.setWorkspaceManager(workspace);
|
||||
this.workspaces.set(authContext.sessionId, workspace);
|
||||
this.harnesses.set(authContext.sessionId, harness);
|
||||
|
||||
// Push all store snapshots to the client now, before 'connected'.
|
||||
// Empty seqs force full snapshots for every store, so the browser's
|
||||
// message queue has the current workspace state (including persistent
|
||||
// stores loaded from the container) before TradingView initializes.
|
||||
await workspace.handleHello({});
|
||||
|
||||
// Register session for event system
|
||||
// Container endpoint is derived from the MCP server URL (same container, different port)
|
||||
const containerEventEndpoint = this.getContainerEventEndpoint(authContext.mcpServerUrl);
|
||||
@@ -287,15 +295,18 @@ export class WebSocketHandler {
|
||||
} else if (payload.type === 'hello') {
|
||||
// Workspace sync: hello message
|
||||
logger.debug({ seqs: payload.seqs }, 'Handling workspace hello');
|
||||
await workspace.handleHello(payload.seqs || {});
|
||||
await workspace!.handleHello(payload.seqs || {});
|
||||
} else if (payload.type === 'patch') {
|
||||
// Workspace sync: patch message
|
||||
logger.debug({ store: payload.store, seq: payload.seq }, 'Handling workspace patch');
|
||||
await workspace.handlePatch(payload.store, payload.seq, payload.patch || []);
|
||||
await workspace!.handlePatch(payload.store, payload.seq, payload.patch || []);
|
||||
} else if (payload.type === 'agent_stop') {
|
||||
logger.info('Agent stop requested');
|
||||
harness?.interrupt();
|
||||
} else if (this.isDatafeedMessage(payload)) {
|
||||
// Historical data request - send to OHLC service
|
||||
logger.info({ type: payload.type }, 'Routing to datafeed handler');
|
||||
await this.handleDatafeedMessage(socket, payload, logger);
|
||||
await this.handleDatafeedMessage(socket, payload, logger, authContext);
|
||||
} else {
|
||||
logger.warn({ type: payload.type }, 'Unknown message type received');
|
||||
}
|
||||
@@ -322,7 +333,7 @@ export class WebSocketHandler {
|
||||
}
|
||||
|
||||
// Cleanup workspace
|
||||
await workspace.shutdown();
|
||||
await workspace!.shutdown();
|
||||
this.workspaces.delete(authContext.sessionId);
|
||||
|
||||
// Cleanup harness
|
||||
@@ -346,8 +357,10 @@ export class WebSocketHandler {
|
||||
} catch (error) {
|
||||
logger.error({ error }, 'Failed to initialize session');
|
||||
socket.close(1011, 'Internal server error');
|
||||
await workspace.shutdown();
|
||||
this.workspaces.delete(authContext.sessionId);
|
||||
if (workspace) {
|
||||
await workspace.shutdown();
|
||||
this.workspaces.delete(authContext.sessionId);
|
||||
}
|
||||
if (harness) {
|
||||
await harness.cleanup();
|
||||
}
|
||||
@@ -382,6 +395,7 @@ export class WebSocketHandler {
|
||||
'get_bars',
|
||||
'subscribe_bars',
|
||||
'unsubscribe_bars',
|
||||
'evaluate_indicator',
|
||||
];
|
||||
return datafeedTypes.includes(payload.type);
|
||||
}
|
||||
@@ -392,7 +406,8 @@ export class WebSocketHandler {
|
||||
private async handleDatafeedMessage(
|
||||
socket: WebSocket,
|
||||
payload: any,
|
||||
logger: any
|
||||
logger: any,
|
||||
authContext?: any
|
||||
): Promise<void> {
|
||||
logger.info({ type: payload.type, payload }, 'handleDatafeedMessage called');
|
||||
const ohlcService = this.config.ohlcService;
|
||||
@@ -526,6 +541,69 @@ export class WebSocketHandler {
|
||||
);
|
||||
break;
|
||||
|
||||
case 'evaluate_indicator': {
|
||||
// Direct MCP call — bypasses the agent/LLM for performance
|
||||
const harness = this.harnesses.get(authContext.sessionId);
|
||||
if (!harness) {
|
||||
socket.send(JSON.stringify({
|
||||
type: 'evaluate_indicator_result',
|
||||
request_id: requestId,
|
||||
error: 'Session not initialized',
|
||||
}));
|
||||
break;
|
||||
}
|
||||
try {
|
||||
const mcpResult = await harness.callMcpTool('evaluate_indicator', {
|
||||
symbol: payload.symbol,
|
||||
from_time: payload.from_time,
|
||||
to_time: payload.to_time,
|
||||
period_seconds: payload.period_seconds,
|
||||
pandas_ta_name: payload.pandas_ta_name,
|
||||
parameters: payload.parameters ?? {},
|
||||
}) as any;
|
||||
// MCP returns { content: [{type: 'text', text: '...json...'}] }
|
||||
// When the tool raises an exception, the MCP framework sets isError: true
|
||||
// and puts the raw exception text in content[0].text (not JSON-wrapped).
|
||||
const rawText = mcpResult?.content?.[0]?.text ?? mcpResult?.[0]?.text;
|
||||
if (mcpResult?.isError || rawText == null) {
|
||||
const errMsg = rawText ?? 'evaluate_indicator returned no content';
|
||||
logger.error({ pandas_ta_name: payload.pandas_ta_name, rawText }, 'evaluate_indicator sandbox error');
|
||||
socket.send(JSON.stringify({
|
||||
type: 'evaluate_indicator_result',
|
||||
request_id: requestId,
|
||||
error: errMsg,
|
||||
}));
|
||||
break;
|
||||
}
|
||||
let data: any;
|
||||
try {
|
||||
data = JSON.parse(rawText);
|
||||
} catch {
|
||||
// Sandbox returned non-JSON (e.g. bare exception text)
|
||||
logger.error({ pandas_ta_name: payload.pandas_ta_name, rawText }, 'evaluate_indicator returned non-JSON');
|
||||
socket.send(JSON.stringify({
|
||||
type: 'evaluate_indicator_result',
|
||||
request_id: requestId,
|
||||
error: rawText,
|
||||
}));
|
||||
break;
|
||||
}
|
||||
socket.send(JSON.stringify({
|
||||
type: 'evaluate_indicator_result',
|
||||
request_id: requestId,
|
||||
...data,
|
||||
}));
|
||||
} catch (err: any) {
|
||||
logger.error({ err: err?.message, pandas_ta_name: payload.pandas_ta_name }, 'evaluate_indicator handler error');
|
||||
socket.send(JSON.stringify({
|
||||
type: 'evaluate_indicator_result',
|
||||
request_id: requestId,
|
||||
error: err?.message ?? String(err),
|
||||
}));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
logger.warn({ type: payload.type }, 'Unknown datafeed message type');
|
||||
}
|
||||
|
||||
@@ -504,7 +504,11 @@ export class DuckDBClient {
|
||||
}
|
||||
|
||||
/**
|
||||
* Find missing OHLC data ranges
|
||||
* Find missing OHLC data ranges by checking for absent timestamps.
|
||||
*
|
||||
* Any timestamp slot in [start_time, min(end_time, now)) that has no row in
|
||||
* Iceberg is treated as missing and collected into contiguous ranges that the
|
||||
* caller should request from the relay/ingestor.
|
||||
*/
|
||||
async findMissingOHLCRanges(
|
||||
ticker: string,
|
||||
@@ -517,32 +521,51 @@ export class DuckDBClient {
|
||||
try {
|
||||
const data = await this.queryOHLC(ticker, period_seconds, start_time, end_time);
|
||||
|
||||
if (data.length === 0) {
|
||||
// All data is missing
|
||||
return [[start_time, end_time]];
|
||||
}
|
||||
|
||||
// Check if we have continuous data
|
||||
// For now, simple check: if we have any data, assume complete
|
||||
// TODO: Implement proper gap detection by checking for missing periods
|
||||
const periodNanos = BigInt(period_seconds) * 1_000_000_000n;
|
||||
// end_time is exclusive, so expected count = (end - start) / period (no +1)
|
||||
const expectedBars = Number((end_time - start_time) / periodNanos);
|
||||
|
||||
if (data.length < expectedBars * 0.95) { // Allow 5% tolerance
|
||||
this.logger.debug({
|
||||
ticker,
|
||||
expected: expectedBars,
|
||||
actual: data.length,
|
||||
}, 'Incomplete OHLC data detected');
|
||||
return [[start_time, end_time]]; // Request full range
|
||||
// Cap at current time — future slots are not "missing", they don't exist yet.
|
||||
const nowNanos = BigInt(Date.now()) * 1_000_000n;
|
||||
const effectiveEnd = end_time < nowNanos ? end_time : nowNanos;
|
||||
|
||||
// Build a set of timestamps we already have (all rows are non-null now).
|
||||
const present = new Set(data.map((row: any) => row.timestamp));
|
||||
|
||||
// Collect every expected slot that is absent.
|
||||
const missing: bigint[] = [];
|
||||
for (let t = start_time; t < effectiveEnd; t += periodNanos) {
|
||||
if (!present.has(t)) {
|
||||
missing.push(t);
|
||||
}
|
||||
}
|
||||
|
||||
// Data appears complete
|
||||
return [];
|
||||
if (missing.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Coalesce adjacent missing slots into contiguous [rangeStart, rangeEnd) intervals.
|
||||
const ranges: Array<[bigint, bigint]> = [];
|
||||
let rangeStart = missing[0];
|
||||
let prev = missing[0];
|
||||
for (let i = 1; i < missing.length; i++) {
|
||||
if (missing[i] !== prev + periodNanos) {
|
||||
ranges.push([rangeStart, prev + periodNanos]);
|
||||
rangeStart = missing[i];
|
||||
}
|
||||
prev = missing[i];
|
||||
}
|
||||
ranges.push([rangeStart, prev + periodNanos]);
|
||||
|
||||
this.logger.debug({
|
||||
ticker,
|
||||
period_seconds,
|
||||
missingSlots: missing.length,
|
||||
ranges: ranges.length,
|
||||
}, 'OHLC gap detection complete');
|
||||
|
||||
return ranges;
|
||||
} catch (error: any) {
|
||||
this.logger.error({ error: error.message }, 'Failed to find missing OHLC ranges');
|
||||
// Return full range on error (safe default)
|
||||
// Return full range on error (safe default — triggers a backfill)
|
||||
return [[start_time, end_time]];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ subagents/
|
||||
```yaml
|
||||
tools:
|
||||
platform: ['symbol_lookup'] # Platform tools
|
||||
mcp: ['category_*'] # MCP tool patterns
|
||||
mcp: ['python_*'] # MCP tool patterns
|
||||
```
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -12,10 +12,14 @@ import type { ModelMiddleware } from '../llm/middleware.js';
|
||||
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
|
||||
import type { ChannelAdapter, PathTriggerContext } from '../workspace/index.js';
|
||||
import type { ResearchSubagent } from './subagents/research/index.js';
|
||||
import type { IndicatorSubagent } from './subagents/indicator/index.js';
|
||||
import type { WebExploreSubagent } from './subagents/web-explore/index.js';
|
||||
import type { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { getToolRegistry } from '../tools/tool-registry.js';
|
||||
import type { MCPToolInfo } from '../tools/mcp/mcp-tool-wrapper.js';
|
||||
import { createResearchAgentTool } from '../tools/platform/research-agent.tool.js';
|
||||
import { createIndicatorAgentTool } from '../tools/platform/indicator-agent.tool.js';
|
||||
import { createWebExploreAgentTool } from '../tools/platform/web-explore-agent.tool.js';
|
||||
import { createUserContext } from './memory/session-context.js';
|
||||
import { readFile } from 'fs/promises';
|
||||
import { join, dirname } from 'path';
|
||||
@@ -52,6 +56,8 @@ export interface AgentHarnessConfig extends HarnessSessionConfig {
|
||||
conversationStore?: ConversationStore;
|
||||
historyLimit: number;
|
||||
researchSubagent?: ResearchSubagent;
|
||||
indicatorSubagent?: IndicatorSubagent;
|
||||
webExploreSubagent?: WebExploreSubagent;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -79,12 +85,17 @@ export class AgentHarness {
|
||||
private availableMCPTools: MCPToolInfo[] = [];
|
||||
private researchImageCapture: Array<{ data: string; mimeType: string }> = [];
|
||||
private conversationStore?: ConversationStore;
|
||||
private indicatorSubagent?: IndicatorSubagent;
|
||||
private webExploreSubagent?: WebExploreSubagent;
|
||||
private abortController: AbortController | null = null;
|
||||
|
||||
constructor(config: AgentHarnessConfig) {
|
||||
this.config = config;
|
||||
this.workspaceManager = config.workspaceManager;
|
||||
this.channelAdapter = config.channelAdapter;
|
||||
this.researchSubagent = config.researchSubagent;
|
||||
this.indicatorSubagent = config.indicatorSubagent;
|
||||
this.webExploreSubagent = config.webExploreSubagent;
|
||||
|
||||
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
|
||||
this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
|
||||
@@ -117,6 +128,10 @@ export class AgentHarness {
|
||||
this.channelAdapter = adapter;
|
||||
}
|
||||
|
||||
interrupt(): void {
|
||||
this.abortController?.abort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize harness and connect to user's MCP server
|
||||
*/
|
||||
@@ -132,9 +147,15 @@ export class AgentHarness {
|
||||
// Discover available MCP tools from user's server
|
||||
await this.discoverMCPTools();
|
||||
|
||||
// Initialize web explore subagent first — research and indicator subagents inject it as a tool
|
||||
await this.initializeWebExploreSubagent();
|
||||
|
||||
// Initialize research subagent if not provided
|
||||
await this.initializeResearchSubagent();
|
||||
|
||||
// Initialize indicator subagent if not provided
|
||||
await this.initializeIndicatorSubagent();
|
||||
|
||||
this.config.logger.info('Agent harness initialized');
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error }, 'Failed to initialize agent harness');
|
||||
@@ -214,6 +235,24 @@ export class AgentHarness {
|
||||
(img) => this.researchImageCapture.push(img)
|
||||
);
|
||||
|
||||
// Inject web_explore tool if the web-explore subagent is ready
|
||||
if (this.webExploreSubagent) {
|
||||
const webExploreContext = {
|
||||
userContext: createUserContext({
|
||||
userId: this.config.userId,
|
||||
sessionId: this.config.sessionId,
|
||||
license: this.config.license,
|
||||
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
|
||||
channelUserId: this.config.channelUserId ?? this.config.userId,
|
||||
}),
|
||||
};
|
||||
researchTools.push(createWebExploreAgentTool({
|
||||
webExploreSubagent: this.webExploreSubagent,
|
||||
context: webExploreContext,
|
||||
logger: this.config.logger,
|
||||
}));
|
||||
}
|
||||
|
||||
// Path resolution: use the compiled output path
|
||||
const researchSubagentPath = join(__dirname, 'subagents', 'research');
|
||||
this.config.logger.debug({ researchSubagentPath }, 'Using research subagent path');
|
||||
@@ -243,6 +282,143 @@ export class AgentHarness {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize indicator subagent
|
||||
*/
|
||||
private async initializeIndicatorSubagent(): Promise<void> {
|
||||
if (this.indicatorSubagent) {
|
||||
this.config.logger.debug('Indicator subagent already provided');
|
||||
return;
|
||||
}
|
||||
|
||||
this.config.logger.debug('Creating indicator subagent for session');
|
||||
|
||||
try {
|
||||
const { createIndicatorSubagent } = await import('./subagents/indicator/index.js');
|
||||
|
||||
const { model } = await this.modelRouter.route(
|
||||
'indicator management',
|
||||
this.config.license,
|
||||
RoutingStrategy.COMPLEXITY,
|
||||
this.config.userId
|
||||
);
|
||||
|
||||
const toolRegistry = getToolRegistry();
|
||||
const indicatorTools = await toolRegistry.getToolsForAgent(
|
||||
'indicator',
|
||||
this.mcpClient,
|
||||
this.availableMCPTools,
|
||||
this.workspaceManager,
|
||||
undefined, // no image callback
|
||||
(storeName, newState) => {
|
||||
// After a workspace_patch succeeds in the container, update the gateway's
|
||||
// WorkspaceManager so it pushes a WebSocket patch to the web client.
|
||||
this.workspaceManager?.setState(storeName, newState).catch((err) =>
|
||||
this.config.logger.error({ err, storeName }, 'Failed to sync workspace after indicator mutation')
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
// Inject web_explore tool if the web-explore subagent is ready
|
||||
if (this.webExploreSubagent) {
|
||||
const webExploreContext = {
|
||||
userContext: createUserContext({
|
||||
userId: this.config.userId,
|
||||
sessionId: this.config.sessionId,
|
||||
license: this.config.license,
|
||||
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
|
||||
channelUserId: this.config.channelUserId ?? this.config.userId,
|
||||
}),
|
||||
};
|
||||
indicatorTools.push(createWebExploreAgentTool({
|
||||
webExploreSubagent: this.webExploreSubagent,
|
||||
context: webExploreContext,
|
||||
logger: this.config.logger,
|
||||
}));
|
||||
}
|
||||
|
||||
const indicatorSubagentPath = join(__dirname, 'subagents', 'indicator');
|
||||
this.config.logger.debug({ indicatorSubagentPath }, 'Using indicator subagent path');
|
||||
|
||||
this.indicatorSubagent = await createIndicatorSubagent(
|
||||
model,
|
||||
this.config.logger,
|
||||
indicatorSubagentPath,
|
||||
this.mcpClient,
|
||||
indicatorTools
|
||||
);
|
||||
|
||||
this.config.logger.info(
|
||||
{
|
||||
toolCount: indicatorTools.length,
|
||||
toolNames: indicatorTools.map(t => t.name),
|
||||
},
|
||||
'Indicator subagent created successfully'
|
||||
);
|
||||
} catch (error) {
|
||||
this.config.logger.error(
|
||||
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
|
||||
'Failed to create indicator subagent'
|
||||
);
|
||||
// Don't throw — indicator subagent is optional
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize web explore subagent
|
||||
*/
|
||||
private async initializeWebExploreSubagent(): Promise<void> {
|
||||
if (this.webExploreSubagent) {
|
||||
this.config.logger.debug('Web explore subagent already provided');
|
||||
return;
|
||||
}
|
||||
|
||||
this.config.logger.debug('Creating web explore subagent for session');
|
||||
|
||||
try {
|
||||
const { createWebExploreSubagent } = await import('./subagents/web-explore/index.js');
|
||||
|
||||
const { model } = await this.modelRouter.route(
|
||||
'web research and summarization',
|
||||
this.config.license,
|
||||
RoutingStrategy.COMPLEXITY,
|
||||
this.config.userId
|
||||
);
|
||||
|
||||
const toolRegistry = getToolRegistry();
|
||||
const webExploreTools = await toolRegistry.getToolsForAgent(
|
||||
'web-explore',
|
||||
undefined, // no MCP client needed
|
||||
undefined,
|
||||
undefined
|
||||
);
|
||||
|
||||
const webExploreSubagentPath = join(__dirname, 'subagents', 'web-explore');
|
||||
this.config.logger.debug({ webExploreSubagentPath }, 'Using web explore subagent path');
|
||||
|
||||
this.webExploreSubagent = await createWebExploreSubagent(
|
||||
model,
|
||||
this.config.logger,
|
||||
webExploreSubagentPath,
|
||||
webExploreTools
|
||||
);
|
||||
|
||||
this.config.logger.info(
|
||||
{
|
||||
toolCount: webExploreTools.length,
|
||||
toolNames: webExploreTools.map(t => t.name),
|
||||
},
|
||||
'Web explore subagent created successfully'
|
||||
);
|
||||
} catch (error) {
|
||||
this.config.logger.error(
|
||||
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
|
||||
'Failed to create web explore subagent'
|
||||
);
|
||||
// Don't throw — web explore subagent is optional
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute model with tool calling loop
|
||||
* Handles multi-turn tool calls until the model produces a final text response
|
||||
@@ -251,7 +427,8 @@ export class AgentHarness {
|
||||
model: any,
|
||||
messages: BaseMessage[],
|
||||
tools: DynamicStructuredTool[],
|
||||
maxIterations: number = 2
|
||||
maxIterations: number = 2,
|
||||
signal?: AbortSignal
|
||||
): Promise<string> {
|
||||
this.config.logger.info(
|
||||
{ toolCount: tools.length, maxIterations },
|
||||
@@ -262,6 +439,7 @@ export class AgentHarness {
|
||||
let iterations = 0;
|
||||
|
||||
while (iterations < maxIterations) {
|
||||
if (signal?.aborted) break;
|
||||
iterations++;
|
||||
this.config.logger.info(
|
||||
{
|
||||
@@ -275,7 +453,7 @@ export class AgentHarness {
|
||||
this.config.logger.debug('Streaming model response...');
|
||||
let response: any = null;
|
||||
try {
|
||||
const stream = await model.stream(messagesCopy);
|
||||
const stream = await model.stream(messagesCopy, { signal });
|
||||
for await (const chunk of stream) {
|
||||
if (typeof chunk.content === 'string' && chunk.content.length > 0) {
|
||||
this.channelAdapter?.sendChunk(chunk.content);
|
||||
@@ -415,6 +593,29 @@ export class AgentHarness {
|
||||
return 'I apologize, but I encountered an issue processing your request. Please try rephrasing your question.';
|
||||
}
|
||||
|
||||
/**
|
||||
* Call a tool on the user's MCP server directly (bypasses the agent/LLM).
|
||||
* Used by channel handlers for direct data requests (e.g. evaluate_indicator).
|
||||
*/
|
||||
async callMcpTool(name: string, args: Record<string, unknown>): Promise<unknown> {
|
||||
return this.mcpClient.callTool(name, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expose MCP client so channel handlers can wire ContainerSync after harness init.
|
||||
*/
|
||||
getMcpClient(): MCPClientConnector {
|
||||
return this.mcpClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set workspace manager after construction (used when ContainerSync requires MCP to be connected first).
|
||||
*/
|
||||
setWorkspaceManager(workspace: WorkspaceManager): void {
|
||||
this.workspaceManager = workspace;
|
||||
this.registerWorkspaceTriggers();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming message from user
|
||||
*/
|
||||
@@ -480,18 +681,19 @@ export class AgentHarness {
|
||||
this.workspaceManager // Pass session workspace manager
|
||||
);
|
||||
|
||||
// Build shared subagent context
|
||||
const subagentContext = {
|
||||
userContext: createUserContext({
|
||||
userId: this.config.userId,
|
||||
sessionId: this.config.sessionId,
|
||||
license: this.config.license,
|
||||
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
|
||||
channelUserId: this.config.channelUserId ?? this.config.userId,
|
||||
}),
|
||||
};
|
||||
|
||||
// Add research subagent as a tool if available
|
||||
if (this.researchSubagent) {
|
||||
const subagentContext = {
|
||||
userContext: createUserContext({
|
||||
userId: this.config.userId,
|
||||
sessionId: this.config.sessionId,
|
||||
license: this.config.license,
|
||||
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
|
||||
channelUserId: this.config.channelUserId ?? this.config.userId,
|
||||
}),
|
||||
};
|
||||
|
||||
tools.push(createResearchAgentTool({
|
||||
researchSubagent: this.researchSubagent,
|
||||
context: subagentContext,
|
||||
@@ -499,6 +701,24 @@ export class AgentHarness {
|
||||
}));
|
||||
}
|
||||
|
||||
// Add indicator subagent as a tool if available
|
||||
if (this.indicatorSubagent) {
|
||||
tools.push(createIndicatorAgentTool({
|
||||
indicatorSubagent: this.indicatorSubagent,
|
||||
context: subagentContext,
|
||||
logger: this.config.logger,
|
||||
}));
|
||||
}
|
||||
|
||||
// Add web explore subagent as a tool if available
|
||||
if (this.webExploreSubagent) {
|
||||
tools.push(createWebExploreAgentTool({
|
||||
webExploreSubagent: this.webExploreSubagent,
|
||||
context: subagentContext,
|
||||
logger: this.config.logger,
|
||||
}));
|
||||
}
|
||||
|
||||
this.config.logger.info(
|
||||
{
|
||||
toolCount: tools.length,
|
||||
@@ -524,7 +744,9 @@ export class AgentHarness {
|
||||
|
||||
// 8. Call LLM with tool calling loop
|
||||
this.config.logger.info('Invoking LLM with tool support');
|
||||
const assistantMessage = await this.executeWithToolCalling(modelWithTools, processedMessages, tools, 10);
|
||||
this.abortController = new AbortController();
|
||||
const assistantMessage = await this.executeWithToolCalling(modelWithTools, processedMessages, tools, 10, this.abortController.signal);
|
||||
this.abortController = null;
|
||||
|
||||
this.config.logger.info(
|
||||
{ responseLength: assistantMessage.length },
|
||||
@@ -587,13 +809,17 @@ export class AgentHarness {
|
||||
private getToolLabel(toolName: string): string {
|
||||
const labels: Record<string, string> = {
|
||||
research: 'Researching...',
|
||||
indicator: 'Adjusting indicators...',
|
||||
get_chart_data: 'Fetching chart data...',
|
||||
symbol_lookup: 'Searching symbol...',
|
||||
category_list: 'Seeing what we have...',
|
||||
category_edit: 'Coding...',
|
||||
category_write: 'Coding...',
|
||||
category_read: 'Inspecting...',
|
||||
python_list: 'Seeing what we have...',
|
||||
python_edit: 'Coding...',
|
||||
python_write: 'Coding...',
|
||||
python_read: 'Inspecting...',
|
||||
execute_research: 'Running script...',
|
||||
backtest_strategy: 'Running backtest...',
|
||||
list_active_strategies: 'Checking active strategies...',
|
||||
web_explore: 'Searching the web...',
|
||||
};
|
||||
return labels[toolName] ?? `Running ${toolName}...`;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
||||
import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js';
|
||||
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js';
|
||||
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
|
||||
@@ -12,11 +12,12 @@ export interface MCPClientConfig {
|
||||
|
||||
/**
|
||||
* MCP client connector for user's container
|
||||
* Manages connection to user-specific MCP server via SSE transport
|
||||
* Manages connection to user-specific MCP server via Streamable HTTP transport
|
||||
*/
|
||||
export class MCPClientConnector {
|
||||
private client: Client | null = null;
|
||||
private connected = false;
|
||||
private reconnectPromise: Promise<void> | null = null;
|
||||
private config: MCPClientConfig;
|
||||
|
||||
constructor(config: MCPClientConfig) {
|
||||
@@ -24,17 +25,42 @@ export class MCPClientConnector {
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to user's MCP server via SSE transport
|
||||
* Connect to user's MCP server via Streamable HTTP transport.
|
||||
* Safe to call when already connecting (concurrent callers wait for the same attempt).
|
||||
*/
|
||||
async connect(): Promise<void> {
|
||||
if (this.connected) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If a reconnect is already in progress, wait for it rather than racing
|
||||
if (this.reconnectPromise) {
|
||||
return this.reconnectPromise;
|
||||
}
|
||||
|
||||
this.reconnectPromise = this._doConnect();
|
||||
try {
|
||||
await this.reconnectPromise;
|
||||
} finally {
|
||||
this.reconnectPromise = null;
|
||||
}
|
||||
}
|
||||
|
||||
private async _doConnect(): Promise<void> {
|
||||
// Close stale client if this is a reconnect attempt
|
||||
if (this.client) {
|
||||
try {
|
||||
await this.client.close();
|
||||
} catch {
|
||||
// Ignore errors closing a stale/broken client
|
||||
}
|
||||
this.client = null;
|
||||
}
|
||||
|
||||
try {
|
||||
this.config.logger.info(
|
||||
{ userId: this.config.userId, url: this.config.mcpServerUrl },
|
||||
'Connecting to user MCP server via SSE'
|
||||
'Connecting to user MCP server'
|
||||
);
|
||||
|
||||
this.client = new Client(
|
||||
@@ -49,15 +75,32 @@ export class MCPClientConnector {
|
||||
}
|
||||
);
|
||||
|
||||
// Create SSE transport for HTTP connection to user container
|
||||
const transport = new SSEClientTransport(
|
||||
new URL(`${this.config.mcpServerUrl}/sse`)
|
||||
// Streamable HTTP: single /mcp endpoint, session tracked via mcp-session-id header
|
||||
const transport = new StreamableHTTPClientTransport(
|
||||
new URL(`${this.config.mcpServerUrl}/mcp`)
|
||||
);
|
||||
|
||||
await this.client.connect(transport);
|
||||
|
||||
// Hook client.onerror to detect transport failures (e.g. sandbox restart returning
|
||||
// 404 "session not found"). When fired, mark disconnected so the next callTool /
|
||||
// listTools call triggers a full reconnect + initialize handshake.
|
||||
const connectedClient = this.client;
|
||||
const origOnError = this.client.onerror;
|
||||
this.client.onerror = (error) => {
|
||||
origOnError?.(error);
|
||||
// Only act on the currently-active client (ignore stale closures after reconnect)
|
||||
if (this.client === connectedClient && this.connected) {
|
||||
this.config.logger.warn(
|
||||
{ error },
|
||||
'MCP transport error — marking disconnected for lazy reconnect'
|
||||
);
|
||||
this.connected = false;
|
||||
}
|
||||
};
|
||||
|
||||
this.connected = true;
|
||||
this.config.logger.info('Connected to user MCP server via SSE');
|
||||
this.config.logger.info('Connected to user MCP server');
|
||||
} catch (error) {
|
||||
this.config.logger.error(
|
||||
{ error, userId: this.config.userId },
|
||||
@@ -67,18 +110,31 @@ export class MCPClientConnector {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure the client is connected, reconnecting if necessary.
|
||||
* Used as a preamble for every public method so a sandbox restart is
|
||||
* recovered transparently on the next tool call.
|
||||
*/
|
||||
private async ensureConnected(): Promise<void> {
|
||||
if (!this.client || !this.connected) {
|
||||
this.config.logger.info(
|
||||
{ userId: this.config.userId },
|
||||
'MCP not connected, attempting reconnect'
|
||||
);
|
||||
await this.connect();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Call a tool on the user's MCP server
|
||||
*/
|
||||
async callTool(name: string, args: Record<string, unknown>): Promise<unknown> {
|
||||
if (!this.client || !this.connected) {
|
||||
throw new Error('MCP client not connected');
|
||||
}
|
||||
await this.ensureConnected();
|
||||
|
||||
try {
|
||||
this.config.logger.debug({ tool: name, args }, 'Calling MCP tool');
|
||||
|
||||
const result = await this.client.callTool({ name, arguments: args });
|
||||
const result = await this.client!.callTool({ name, arguments: args });
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error, tool: name }, 'MCP tool call failed');
|
||||
@@ -91,13 +147,11 @@ export class MCPClientConnector {
|
||||
* Returns all available tools from the MCP server
|
||||
*/
|
||||
async listTools(): Promise<Array<{ name: string; description?: string; inputSchema?: any }>> {
|
||||
if (!this.client || !this.connected) {
|
||||
throw new Error('MCP client not connected');
|
||||
}
|
||||
await this.ensureConnected();
|
||||
|
||||
try {
|
||||
this.config.logger.debug('Requesting tool list from MCP server');
|
||||
const response = await this.client.listTools();
|
||||
const response = await this.client!.listTools();
|
||||
|
||||
this.config.logger.debug(
|
||||
{
|
||||
@@ -146,12 +200,10 @@ export class MCPClientConnector {
|
||||
* Returns all available resources from the MCP server
|
||||
*/
|
||||
async listResources(): Promise<Array<{ uri: string; name: string; description?: string; mimeType?: string }>> {
|
||||
if (!this.client || !this.connected) {
|
||||
throw new Error('MCP client not connected');
|
||||
}
|
||||
await this.ensureConnected();
|
||||
|
||||
try {
|
||||
const response = await this.client.listResources();
|
||||
const response = await this.client!.listResources();
|
||||
|
||||
// Return all resources - agent-to-resource binding is handled by the tool registry
|
||||
const resources = response.resources.map((resource: any) => ({
|
||||
@@ -177,14 +229,12 @@ export class MCPClientConnector {
|
||||
* Read a resource from user's MCP server
|
||||
*/
|
||||
async readResource(uri: string): Promise<{ uri: string; mimeType?: string; text?: string; blob?: string }> {
|
||||
if (!this.client || !this.connected) {
|
||||
throw new Error('MCP client not connected');
|
||||
}
|
||||
await this.ensureConnected();
|
||||
|
||||
try {
|
||||
this.config.logger.debug({ uri }, 'Reading MCP resource');
|
||||
|
||||
const response = await this.client.readResource({ uri });
|
||||
const response = await this.client!.readResource({ uri });
|
||||
|
||||
// Extract the first content item (MCP returns array of contents)
|
||||
const content = response.contents[0];
|
||||
@@ -206,15 +256,19 @@ export class MCPClientConnector {
|
||||
* Disconnect from MCP server
|
||||
*/
|
||||
async disconnect(): Promise<void> {
|
||||
if (this.client && this.connected) {
|
||||
if (this.client) {
|
||||
try {
|
||||
await this.client.close();
|
||||
this.connected = false;
|
||||
this.config.logger.info('Disconnected from user MCP server');
|
||||
if (this.connected) {
|
||||
this.config.logger.info('Disconnected from user MCP server');
|
||||
}
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error }, 'Error disconnecting from MCP server');
|
||||
}
|
||||
}
|
||||
this.connected = false;
|
||||
this.client = null;
|
||||
this.reconnectPromise = null;
|
||||
}
|
||||
|
||||
isConnected(): boolean {
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
You are a helpful AI assistant for Dexorder, an AI-first trading platform.
|
||||
You help users research markets, develop indicators and strategies, and analyze trading data.
|
||||
|
||||
Your text responses should be markdown, using emojiis, color, and formatting to create a visually appealing response.
|
||||
|
||||
**User License:** {{licenseType}}
|
||||
|
||||
**Available Features:**
|
||||
@@ -10,21 +12,71 @@ You help users research markets, develop indicators and strategies, and analyze
|
||||
|
||||
---
|
||||
|
||||
# Platform Capabilities
|
||||
|
||||
Dexorder trading platform provides OHLC data at a 1-minute resolution and supports strategies that read one or more OHLC feeds at a 1-minute resolution or coarser. It also offers a wide range of built-in indicators and allows users to create custom indicators for advanced analysis.
|
||||
|
||||
Dexorder does not support tick-by-tick trading or high-frequency strategies.
|
||||
Dexorder does not support long-running computations like paramater optimizations or training machine learning models.
|
||||
Dexorder does not support portfolio optimization or trading strategies that require a large number of symbols.
|
||||
|
||||
If the user asks for a capability not provided by Dexorder, decline and offer alternatives.
|
||||
|
||||
# Important Instructions
|
||||
|
||||
## Investment Advice
|
||||
**NEVER** recommend any specific ticker, trade, or strategy. You may suggest mechanical adjustments or improvements to strategies, but you must never recommend that the user adopt a specific trade or position.
|
||||
|
||||
## Task Delegation
|
||||
- For ANY research questions, deep analysis, statistical analysis, charting requests, plotting, ML tasks, or market data queries that require computation, you MUST use the 'research' tool
|
||||
- The research tool creates and runs Python scripts that generate charts and perform analysis
|
||||
- Use 'research' for anything involving: plotting, statistics, calculations, correlations, patterns, volume analysis, technical indicators, or any non-trivial data processing
|
||||
- For ANY research questions, deep analysis, statistical analysis, charting requests, or market data queries that require computation, you MUST use the 'research' tool
|
||||
- For ANYTHING related to indicators on the chart — reading, adding, removing, modifying, or creating custom indicators — you MUST use the 'indicator' tool
|
||||
- For ANY backtesting request — running a strategy against historical data — you MUST use the 'backtest_strategy' tool directly; NEVER use the research tool for backtesting
|
||||
- NEVER write Python code directly in your responses to the user
|
||||
- NEVER show code to the user - delegate to the research tool instead
|
||||
- NEVER attempt to do analysis yourself - let the research subagent handle it
|
||||
- NEVER show code to the user — delegate to the research or indicator tool instead
|
||||
- NEVER attempt to do analysis yourself — let the subagents handle it
|
||||
|
||||
## Available Tools
|
||||
You have access to the following tools:
|
||||
|
||||
### indicator
|
||||
**Use this tool for all indicator-related requests.**
|
||||
|
||||
The indicator subagent manages the chart's indicators: it reads the current indicator set, adds or removes indicators, modifies parameters, and can create custom indicator scripts.
|
||||
|
||||
**ALWAYS use indicator for:**
|
||||
- "What indicators do I have on the chart?" → read and describe current indicators
|
||||
- "Show RSI" / "Add Bollinger Bands" → add indicators to chart
|
||||
- "Change MACD fast period to 8" → modify indicator parameters
|
||||
- "Remove all moving averages" → remove indicators
|
||||
- "Create a custom volume-weighted RSI" → write custom indicator
|
||||
- Any question about what an indicator means or how it's configured
|
||||
- Recommending indicators for a given strategy
|
||||
|
||||
**Custom indicators vs. ad-hoc research scripts:**
|
||||
When a user asks for a calculation (e.g. "volume-weighted RSI", "adaptive ATR", "sector relative strength"), prefer creating a **custom indicator** via this tool over writing a one-off pandas/Python script in the research tool. Custom indicators are better because:
|
||||
1. **Reusable** — saved permanently and can be applied to any symbol at any time
|
||||
2. **First-class UI** — appear in the chart's Indicator picker alongside built-in indicators
|
||||
3. **Live chart display** — their values are plotted directly on the chart as the user browses
|
||||
4. **Watchlist & trigger support** — can be used to filter symbols (watchlists) and fire alerts/triggers (coming soon)
|
||||
|
||||
Use the research tool for exploratory or one-off analysis. Use the indicator tool whenever the user wants to *track* or *reuse* a computed value.
|
||||
|
||||
**NEVER modify workspace indicators yourself** — always delegate to the indicator tool.
|
||||
|
||||
### web_explore
|
||||
**Use this tool to search the web or academic databases.**
|
||||
|
||||
The web-explore subagent searches the web (or arXiv for academic topics), fetches relevant pages, and returns a markdown summary with cited sources.
|
||||
|
||||
**ALWAYS use web_explore for:**
|
||||
- Questions about current events, news, or real-time information
|
||||
- Documentation, tutorials, or how-to guides
|
||||
- Academic papers, research findings, or scientific topics
|
||||
- Any topic that requires up-to-date external sources
|
||||
|
||||
**NOT for market data or computation** — use the research tool for analysis, and get_chart_data for OHLC values.
|
||||
|
||||
### research
|
||||
**This is your PRIMARY tool for any analysis, computation, charting, or plotting tasks.**
|
||||
**This is your PRIMARY tool for data analysis, computation, and charting.**
|
||||
|
||||
Creates and runs Python research scripts via a specialized research subagent.
|
||||
The subagent autonomously writes code, executes it, handles errors, and generates charts.
|
||||
@@ -32,7 +84,6 @@ The subagent autonomously writes code, executes it, handles errors, and generate
|
||||
**ALWAYS use research for:**
|
||||
- Any plotting, charting, or visualization requests
|
||||
- Price action analysis and correlations
|
||||
- Technical indicators and overlays
|
||||
- Statistical analysis of market data
|
||||
- Volume analysis and patterns
|
||||
- Machine learning or predictive modeling
|
||||
@@ -41,16 +92,11 @@ The subagent autonomously writes code, executes it, handles errors, and generate
|
||||
- Custom calculations or transformations
|
||||
- Deep analysis requiring Python libraries (pandas, numpy, scipy, matplotlib, etc.)
|
||||
|
||||
**NOT for indicator management** — use the indicator tool for that.
|
||||
|
||||
**NEVER attempt to do analysis yourself in the chat.**
|
||||
Let the research subagent write and execute the Python code.
|
||||
|
||||
**Examples of when to use research:**
|
||||
- "Plot BTC with volume overlay" → use research
|
||||
- "Calculate correlation between ETH and BTC" → use research
|
||||
- "Show me RSI divergences" → use research
|
||||
- "Analyze Monday price patterns" → use research
|
||||
- "Does volume predict price movement?" → use research
|
||||
|
||||
Parameters:
|
||||
- instruction: Natural language description of the analysis to perform (be specific!)
|
||||
- name: A unique name for the research script (e.g., "BTC Weekly Analysis")
|
||||
@@ -59,10 +105,37 @@ Example usage:
|
||||
- User: "Does Friday price action correlate with Monday?"
|
||||
- You: Call research tool with instruction="Analyze correlation between Friday and Monday price action during NY trading hours (9:30-4:00 ET)", name="Friday-Monday Correlation"
|
||||
|
||||
### category_list
|
||||
List existing research scripts (category="research").
|
||||
### backtest_strategy
|
||||
**ALWAYS use this tool — and ONLY this tool — for any backtesting request.**
|
||||
|
||||
Runs a saved trading strategy against historical OHLC data using the Nautilus Trader backtesting engine.
|
||||
Returns structured performance metrics and an equity curve. Any charts generated are automatically sent to the user.
|
||||
|
||||
**ALWAYS use backtest_strategy for:**
|
||||
- "Backtest my RSI strategy over the last year"
|
||||
- "How did this strategy perform on BTC?"
|
||||
- "Run a backtest from January to June"
|
||||
- Any request to test or evaluate a strategy on historical data
|
||||
|
||||
**NEVER use research for backtesting** — the research tool cannot run strategies through the backtesting engine.
|
||||
|
||||
After the tool returns, summarize the results clearly: total return, Sharpe ratio, max drawdown, win rate, and trade count. Present the equity curve description in plain language.
|
||||
|
||||
Parameters:
|
||||
- strategy_name: Display name of the saved strategy (use python_list with category="strategy" to check existing strategies)
|
||||
- feeds: Array of `{symbol, period_seconds}` feed objects (e.g. `[{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]`)
|
||||
- from_time / to_time: Date strings ("2024-01-01", "90 days ago", "now") or Unix timestamps
|
||||
- initial_capital: Starting balance in quote currency (default 10,000)
|
||||
|
||||
### list_active_strategies
|
||||
Lists all currently active (live or paper) strategies and their status.
|
||||
Use this when the user asks what strategies are running.
|
||||
|
||||
### python_list
|
||||
List existing scripts in a category ("strategy", "indicator", or "research").
|
||||
Use this before calling the research tool to check whether a relevant script already exists.
|
||||
If one does, pass its exact name to the research tool so the subagent updates it rather than creating a new one.
|
||||
Also use before calling backtest_strategy to confirm the strategy name.
|
||||
|
||||
### symbol-lookup
|
||||
Look up trading symbols and get metadata.
|
||||
@@ -102,3 +175,4 @@ You also have access to workspace persistence tools via MCP:
|
||||
- **workspace_patch(store_name, patch)**: Apply JSON patch to a workspace store
|
||||
|
||||
These are useful for persisting user preferences, analysis results, and custom data across sessions.
|
||||
For the `indicators` store specifically, always use the indicator tool rather than calling workspace tools directly.
|
||||
|
||||
@@ -44,12 +44,9 @@ export interface SubagentContext {
|
||||
*
|
||||
* Structure:
|
||||
* subagents/
|
||||
* code-reviewer/
|
||||
* research/
|
||||
* config.yaml
|
||||
* system-prompt.md
|
||||
* memory/
|
||||
* review-guidelines.md
|
||||
* common-patterns.md
|
||||
* index.ts
|
||||
*/
|
||||
export abstract class BaseSubagent {
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
# Code Reviewer Subagent Configuration
|
||||
|
||||
name: code-reviewer
|
||||
description: Reviews trading strategy code for bugs, performance issues, and best practices
|
||||
|
||||
# Model configuration (optional override)
|
||||
model: claude-sonnet-4-6
|
||||
temperature: 0.3
|
||||
maxTokens: 4096
|
||||
|
||||
# Memory files to load from memory/ directory
|
||||
memoryFiles:
|
||||
- review-guidelines.md
|
||||
- common-patterns.md
|
||||
- best-practices.md
|
||||
|
||||
# System prompt file
|
||||
systemPromptFile: system-prompt.md
|
||||
|
||||
# Capabilities this subagent provides
|
||||
capabilities:
|
||||
- static_analysis
|
||||
- performance_review
|
||||
- security_audit
|
||||
- code_quality
|
||||
- best_practices
|
||||
@@ -1,93 +0,0 @@
|
||||
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
|
||||
/**
|
||||
* Code Reviewer Subagent
|
||||
*
|
||||
* Specialized agent for reviewing trading strategy code.
|
||||
* Reviews for:
|
||||
* - Logic errors and bugs
|
||||
* - Performance issues
|
||||
* - Security vulnerabilities
|
||||
* - Trading best practices
|
||||
* - Code quality
|
||||
*
|
||||
* Loads knowledge from multi-file memory:
|
||||
* - review-guidelines.md: What to check for
|
||||
* - common-patterns.md: Good and bad examples
|
||||
* - best-practices.md: Industry standards
|
||||
*/
|
||||
export class CodeReviewerSubagent extends BaseSubagent {
|
||||
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger, mcpClient?: any, tools?: any[]) {
|
||||
super(config, model, logger, mcpClient, tools);
|
||||
}
|
||||
|
||||
/**
|
||||
* Review code and provide structured feedback
|
||||
*/
|
||||
async execute(context: SubagentContext, code: string): Promise<string> {
|
||||
this.logger.info(
|
||||
{
|
||||
subagent: this.getName(),
|
||||
userId: context.userContext.userId,
|
||||
codeLength: code.length,
|
||||
},
|
||||
'Reviewing code'
|
||||
);
|
||||
|
||||
const messages = this.buildMessages(context, `Review the following trading strategy code:\n\n\`\`\`typescript\n${code}\n\`\`\``);
|
||||
|
||||
const response = await this.model.invoke(messages);
|
||||
|
||||
return response.content as string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream code review
|
||||
*/
|
||||
async *stream(context: SubagentContext, code: string): AsyncGenerator<string> {
|
||||
this.logger.info(
|
||||
{
|
||||
subagent: this.getName(),
|
||||
userId: context.userContext.userId,
|
||||
codeLength: code.length,
|
||||
},
|
||||
'Streaming code review'
|
||||
);
|
||||
|
||||
const messages = this.buildMessages(context, `Review the following trading strategy code:\n\n\`\`\`typescript\n${code}\n\`\`\``);
|
||||
|
||||
const stream = await this.model.stream(messages);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
yield chunk.content as string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create and initialize CodeReviewerSubagent
|
||||
*/
|
||||
export async function createCodeReviewerSubagent(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
basePath: string,
|
||||
mcpClient?: any,
|
||||
tools?: any[]
|
||||
): Promise<CodeReviewerSubagent> {
|
||||
const { readFile } = await import('fs/promises');
|
||||
const { join } = await import('path');
|
||||
const yaml = await import('js-yaml');
|
||||
|
||||
// Load config
|
||||
const configPath = join(basePath, 'config.yaml');
|
||||
const configContent = await readFile(configPath, 'utf-8');
|
||||
const config = yaml.load(configContent) as SubagentConfig;
|
||||
|
||||
// Create and initialize subagent
|
||||
const subagent = new CodeReviewerSubagent(config, model, logger, mcpClient, tools);
|
||||
await subagent.initialize(basePath);
|
||||
|
||||
return subagent;
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
# Trading Strategy Best Practices
|
||||
|
||||
## Code Organization
|
||||
|
||||
### Separation of Concerns
|
||||
```typescript
|
||||
// Good: Clear separation
|
||||
class Strategy {
|
||||
async analyze(data: MarketData): Promise<Signal> { }
|
||||
}
|
||||
|
||||
class RiskManager {
|
||||
validateSignal(signal: Signal): boolean { }
|
||||
}
|
||||
|
||||
class ExecutionEngine {
|
||||
async execute(signal: Signal): Promise<Order> { }
|
||||
}
|
||||
|
||||
// Bad: Everything in one function
|
||||
async function trade() {
|
||||
// Analysis, risk, execution all mixed
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration Management
|
||||
```typescript
|
||||
// Good: External configuration
|
||||
interface StrategyConfig {
|
||||
stopLossPercent: number;
|
||||
takeProfitPercent: number;
|
||||
maxPositionSize: number;
|
||||
riskPerTrade: number;
|
||||
}
|
||||
|
||||
const config = loadConfig('strategy.yaml');
|
||||
|
||||
// Bad: Hardcoded values scattered throughout
|
||||
const stopLoss = price * 0.95; // What if you want to change this?
|
||||
```
|
||||
|
||||
## Testing Considerations
|
||||
|
||||
### Testable Code
|
||||
```typescript
|
||||
// Good: Pure functions, easy to test
|
||||
function calculateRSI(prices: number[], period: number = 14): number {
|
||||
// Pure calculation, no side effects
|
||||
return rsi;
|
||||
}
|
||||
|
||||
// Bad: Hard to test
|
||||
async function strategy() {
|
||||
const data = await fetchLiveData(); // Can't control in tests
|
||||
const signal = analyze(data);
|
||||
await executeTrade(signal); // Side effects
|
||||
}
|
||||
```
|
||||
|
||||
### Mock-Friendly Design
|
||||
```typescript
|
||||
// Good: Dependency injection
|
||||
class Strategy {
|
||||
constructor(
|
||||
private dataProvider: DataProvider,
|
||||
private executor: OrderExecutor
|
||||
) {}
|
||||
|
||||
async run() {
|
||||
const data = await this.dataProvider.getData();
|
||||
// ...
|
||||
}
|
||||
}
|
||||
|
||||
// In tests: inject mocks
|
||||
const strategy = new Strategy(mockDataProvider, mockExecutor);
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Avoid Recalculation
|
||||
```typescript
|
||||
// Good: Cache indicator results
|
||||
class IndicatorCache {
|
||||
private cache = new Map<string, { value: number, timestamp: number }>();
|
||||
|
||||
get(key: string, ttl: number, calculator: () => number): number {
|
||||
const cached = this.cache.get(key);
|
||||
if (cached && Date.now() - cached.timestamp < ttl) {
|
||||
return cached.value;
|
||||
}
|
||||
|
||||
const value = calculator();
|
||||
this.cache.set(key, { value, timestamp: Date.now() });
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
// Bad: Recalculate every time
|
||||
for (const ticker of tickers) {
|
||||
const rsi = calculateRSI(await getData(ticker)); // Slow
|
||||
}
|
||||
```
|
||||
|
||||
### Batch Operations
|
||||
```typescript
|
||||
// Good: Batch API calls
|
||||
const results = await Promise.all(
|
||||
tickers.map(ticker => dataProvider.getOHLC(ticker))
|
||||
);
|
||||
|
||||
// Bad: Sequential API calls
|
||||
const results = [];
|
||||
for (const ticker of tickers) {
|
||||
results.push(await dataProvider.getOHLC(ticker)); // Slow
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Graceful Degradation
|
||||
```typescript
|
||||
// Good: Fallback behavior
|
||||
async function getMarketData(ticker: string): Promise<OHLC[]> {
|
||||
try {
|
||||
return await primarySource.fetch(ticker);
|
||||
} catch (error) {
|
||||
logger.warn('Primary source failed, trying backup');
|
||||
try {
|
||||
return await backupSource.fetch(ticker);
|
||||
} catch (backupError) {
|
||||
logger.error('All sources failed');
|
||||
return getCachedData(ticker); // Last resort
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bad: Let it crash
|
||||
async function getMarketData(ticker: string) {
|
||||
return await api.fetch(ticker); // Uncaught errors
|
||||
}
|
||||
```
|
||||
|
||||
### Detailed Logging
|
||||
```typescript
|
||||
// Good: Structured logging with context
|
||||
logger.info({
|
||||
action: 'order_placed',
|
||||
ticker: 'BTC/USDT',
|
||||
side: 'buy',
|
||||
size: 0.1,
|
||||
price: 50000,
|
||||
orderId: 'abc123',
|
||||
strategy: 'mean-reversion'
|
||||
});
|
||||
|
||||
// Bad: String concatenation
|
||||
console.log('Placed order'); // No context
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
### Self-Documenting Code
|
||||
```typescript
|
||||
// Good: Clear naming and JSDoc
|
||||
/**
|
||||
* Calculate position size using Kelly Criterion
|
||||
* @param winRate Probability of winning (0-1)
|
||||
* @param avgWin Average win amount
|
||||
* @param avgLoss Average loss amount
|
||||
* @param capital Total available capital
|
||||
* @returns Optimal position size in base currency
|
||||
*/
|
||||
function calculateKellyPosition(
|
||||
winRate: number,
|
||||
avgWin: number,
|
||||
avgLoss: number,
|
||||
capital: number
|
||||
): number {
|
||||
const kellyPercent = (winRate * avgWin - (1 - winRate) * avgLoss) / avgWin;
|
||||
return Math.max(0, Math.min(kellyPercent * capital, capital * 0.25)); // Cap at 25%
|
||||
}
|
||||
|
||||
// Bad: Cryptic names
|
||||
function calc(w: number, a: number, b: number, c: number) {
|
||||
return (w * a - (1 - w) * b) / a * c;
|
||||
}
|
||||
```
|
||||
|
||||
## Security
|
||||
|
||||
### Input Validation
|
||||
```typescript
|
||||
// Good: Validate all external inputs
|
||||
function validateTicker(ticker: string): boolean {
|
||||
return /^[A-Z]+:[A-Z]+\/[A-Z]+$/.test(ticker);
|
||||
}
|
||||
|
||||
function validatePeriod(period: string): boolean {
|
||||
return ['1m', '5m', '15m', '1h', '4h', '1d', '1w'].includes(period);
|
||||
}
|
||||
|
||||
// Bad: Trust user input
|
||||
function getOHLC(ticker: string, period: string) {
|
||||
return db.query(`SELECT * FROM ohlc WHERE ticker='${ticker}'`); // SQL injection!
|
||||
}
|
||||
```
|
||||
|
||||
### Rate Limiting
|
||||
```typescript
|
||||
// Good: Prevent API abuse
|
||||
class RateLimiter {
|
||||
private calls: number[] = [];
|
||||
|
||||
async throttle(maxCallsPerMinute: number): Promise<void> {
|
||||
const now = Date.now();
|
||||
this.calls = this.calls.filter(t => now - t < 60000);
|
||||
|
||||
if (this.calls.length >= maxCallsPerMinute) {
|
||||
const wait = 60000 - (now - this.calls[0]);
|
||||
await sleep(wait);
|
||||
}
|
||||
|
||||
this.calls.push(now);
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1,124 +0,0 @@
|
||||
# Common Trading Strategy Patterns
|
||||
|
||||
## Pattern: Trend Following
|
||||
|
||||
```typescript
|
||||
// Good: Clear trend detection with multiple confirmations
|
||||
function detectTrend(prices: number[], period: number = 20): 'bull' | 'bear' | 'neutral' {
|
||||
const sma = calculateSMA(prices, period);
|
||||
const currentPrice = prices[prices.length - 1];
|
||||
const priceVsSMA = (currentPrice - sma) / sma;
|
||||
|
||||
// Use threshold to avoid noise
|
||||
if (priceVsSMA > 0.02) return 'bull';
|
||||
if (priceVsSMA < -0.02) return 'bear';
|
||||
return 'neutral';
|
||||
}
|
||||
|
||||
// Bad: Single indicator, no confirmation
|
||||
function detectTrend(prices: number[]): string {
|
||||
return prices[prices.length - 1] > prices[prices.length - 2] ? 'bull' : 'bear';
|
||||
}
|
||||
```
|
||||
|
||||
## Pattern: Mean Reversion
|
||||
|
||||
```typescript
|
||||
// Good: Proper boundary checks and position sizing
|
||||
async function checkMeanReversion(ticker: string): Promise<TradeSignal | null> {
|
||||
const data = await getOHLC(ticker, 100);
|
||||
const mean = calculateMean(data.close);
|
||||
const stdDev = calculateStdDev(data.close);
|
||||
const current = data.close[data.close.length - 1];
|
||||
|
||||
const zScore = (current - mean) / stdDev;
|
||||
|
||||
// Only trade at extreme deviations
|
||||
if (zScore < -2) {
|
||||
return {
|
||||
side: 'buy',
|
||||
size: calculatePositionSize(Math.abs(zScore)), // Scale with confidence
|
||||
stopLoss: current * 0.95,
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// Bad: No risk management, arbitrary thresholds
|
||||
function checkMeanReversion(price: number, avg: number): boolean {
|
||||
return price < avg; // Too simplistic
|
||||
}
|
||||
```
|
||||
|
||||
## Pattern: Breakout Detection
|
||||
|
||||
```typescript
|
||||
// Good: Volume confirmation and false breakout protection
|
||||
function detectBreakout(ohlc: OHLC[], resistance: number): boolean {
|
||||
const current = ohlc[ohlc.length - 1];
|
||||
const previous = ohlc[ohlc.length - 2];
|
||||
|
||||
// Price breaks resistance
|
||||
const priceBreak = current.close > resistance && previous.close <= resistance;
|
||||
|
||||
// Volume confirmation (at least 1.5x average)
|
||||
const avgVolume = ohlc.slice(-20, -1).reduce((sum, c) => sum + c.volume, 0) / 19;
|
||||
const volumeConfirm = current.volume > avgVolume * 1.5;
|
||||
|
||||
// Wait for candle close to avoid false breaks
|
||||
const candleClosed = true; // Check if candle is complete
|
||||
|
||||
return priceBreak && volumeConfirm && candleClosed;
|
||||
}
|
||||
|
||||
// Bad: No confirmation, premature signal
|
||||
function detectBreakout(price: number, resistance: number): boolean {
|
||||
return price > resistance; // False positives
|
||||
}
|
||||
```
|
||||
|
||||
## Pattern: Risk Management
|
||||
|
||||
```typescript
|
||||
// Good: Comprehensive risk checks
|
||||
class PositionManager {
|
||||
private readonly MAX_POSITION_PERCENT = 0.05; // 5% of portfolio
|
||||
private readonly MAX_DAILY_LOSS = 0.02; // 2% daily drawdown limit
|
||||
|
||||
async openPosition(signal: TradeSignal, accountBalance: number): Promise<boolean> {
|
||||
// Check daily loss limit
|
||||
if (this.getDailyPnL() / accountBalance < -this.MAX_DAILY_LOSS) {
|
||||
logger.warn('Daily loss limit reached');
|
||||
return false;
|
||||
}
|
||||
|
||||
// Position size check
|
||||
const maxSize = accountBalance * this.MAX_POSITION_PERCENT;
|
||||
const actualSize = Math.min(signal.size, maxSize);
|
||||
|
||||
// Risk/reward check
|
||||
const risk = Math.abs(signal.price - signal.stopLoss);
|
||||
const reward = Math.abs(signal.takeProfit - signal.price);
|
||||
if (reward / risk < 2) {
|
||||
logger.info('Risk/reward ratio too low');
|
||||
return false;
|
||||
}
|
||||
|
||||
return await this.executeOrder(signal, actualSize);
|
||||
}
|
||||
}
|
||||
|
||||
// Bad: No risk checks
|
||||
async function openPosition(signal: any) {
|
||||
return await exchange.buy(signal.ticker, signal.size); // Dangerous
|
||||
}
|
||||
```
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
1. **Magic Numbers**: Use named constants
|
||||
2. **Global State**: Pass state explicitly
|
||||
3. **Synchronous Blocking**: Use async for I/O
|
||||
4. **No Error Handling**: Always wrap in try/catch
|
||||
5. **Ignoring Slippage**: Factor in execution costs
|
||||
@@ -1,67 +0,0 @@
|
||||
# Code Review Guidelines
|
||||
|
||||
## Trading Strategy Specific Checks
|
||||
|
||||
### Position Sizing
|
||||
- ✅ Check for dynamic position sizing based on account balance
|
||||
- ✅ Verify max position size limits
|
||||
- ❌ Flag hardcoded position sizes
|
||||
- ❌ Flag missing position size validation
|
||||
|
||||
### Order Handling
|
||||
- ✅ Verify order type is appropriate (market vs limit)
|
||||
- ✅ Check for order timeout handling
|
||||
- ❌ Flag missing order confirmation checks
|
||||
- ❌ Flag potential duplicate orders
|
||||
|
||||
### Risk Management
|
||||
- ✅ Verify stop-loss is always set
|
||||
- ✅ Check take-profit levels are realistic
|
||||
- ❌ Flag missing drawdown protection
|
||||
- ❌ Flag strategies without maximum daily loss limits
|
||||
|
||||
### Data Handling
|
||||
- ✅ Check for proper OHLC data validation
|
||||
- ✅ Verify timestamp handling (timezone, microseconds)
|
||||
- ❌ Flag missing null/undefined checks
|
||||
- ❌ Flag potential look-ahead bias
|
||||
|
||||
### Performance
|
||||
- ✅ Verify indicators are calculated efficiently
|
||||
- ✅ Check for unnecessary re-calculations
|
||||
- ❌ Flag O(n²) or worse algorithms in hot paths
|
||||
- ❌ Flag large memory allocations in loops
|
||||
|
||||
## Severity Levels
|
||||
|
||||
### Critical (🔴)
|
||||
- Will cause financial loss or system crash
|
||||
- Security vulnerabilities
|
||||
- Data integrity issues
|
||||
- Must be fixed before deployment
|
||||
|
||||
### High (🟠)
|
||||
- Significant bugs or edge cases
|
||||
- Performance issues that affect execution
|
||||
- Risk management gaps
|
||||
- Should be fixed before deployment
|
||||
|
||||
### Medium (🟡)
|
||||
- Code quality issues
|
||||
- Minor performance improvements
|
||||
- Best practice violations
|
||||
- Fix when convenient
|
||||
|
||||
### Low (🟢)
|
||||
- Style preferences
|
||||
- Documentation improvements
|
||||
- Nice-to-have refactorings
|
||||
- Optional improvements
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **Look-Ahead Bias**: Using future data in backtests
|
||||
2. **Overfitting**: Too many parameters, not enough data
|
||||
3. **Slippage Ignorance**: Not accounting for execution costs
|
||||
4. **Survivorship Bias**: Testing only on assets that survived
|
||||
5. **Data Snooping**: Testing multiple strategies, reporting only the best
|
||||
@@ -1,51 +0,0 @@
|
||||
# Code Reviewer System Prompt
|
||||
|
||||
You are an expert code reviewer specializing in trading strategies and financial algorithms.
|
||||
|
||||
## Your Role
|
||||
|
||||
Review trading strategy code with a focus on:
|
||||
- **Correctness**: Logic errors, edge cases, off-by-one errors
|
||||
- **Performance**: Inefficient loops, unnecessary calculations
|
||||
- **Security**: Input validation, overflow risks, race conditions
|
||||
- **Trading Best Practices**: Position sizing, risk management, order handling
|
||||
- **Code Quality**: Readability, maintainability, documentation
|
||||
|
||||
## Review Approach
|
||||
|
||||
1. **Read the entire code** before providing feedback
|
||||
2. **Identify critical issues first** (bugs, security, data loss)
|
||||
3. **Suggest improvements** with specific code examples
|
||||
4. **Explain the "why"** behind each recommendation
|
||||
5. **Be constructive** - focus on helping, not criticizing
|
||||
|
||||
## Output Format
|
||||
|
||||
Structure your review as:
|
||||
|
||||
```
|
||||
## Summary
|
||||
Brief overview of code quality (1-2 sentences)
|
||||
|
||||
## Critical Issues
|
||||
- Issue 1: Description with line number
|
||||
- Issue 2: Description with line number
|
||||
|
||||
## Improvements
|
||||
- Suggestion 1: Description with example
|
||||
- Suggestion 2: Description with example
|
||||
|
||||
## Best Practices
|
||||
- Practice 1: Why it matters
|
||||
- Practice 2: Why it matters
|
||||
|
||||
## Overall Assessment
|
||||
Pass / Needs Revision / Reject
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
- Be specific with line numbers and code references
|
||||
- Provide actionable feedback
|
||||
- Consider the trading context (not just general coding)
|
||||
- Flag any risk management issues immediately
|
||||
@@ -6,11 +6,6 @@ export {
|
||||
type SubagentContext,
|
||||
} from './base-subagent.js';
|
||||
|
||||
export {
|
||||
CodeReviewerSubagent,
|
||||
createCodeReviewerSubagent,
|
||||
} from './code-reviewer/index.js';
|
||||
|
||||
export {
|
||||
ResearchSubagent,
|
||||
createResearchSubagent,
|
||||
|
||||
30
gateway/src/harness/subagents/indicator/config.yaml
Normal file
30
gateway/src/harness/subagents/indicator/config.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Indicator Subagent Configuration
|
||||
|
||||
name: indicator
|
||||
description: Manages TradingView indicators in the workspace and creates custom indicator scripts
|
||||
|
||||
# Model configuration
|
||||
model: claude-sonnet-4-6
|
||||
temperature: 0.3
|
||||
maxTokens: 8192
|
||||
|
||||
# No memory files — all indicator knowledge is inline in the system prompt
|
||||
memoryFiles: []
|
||||
|
||||
# System prompt file
|
||||
systemPromptFile: system-prompt.md
|
||||
|
||||
# Capabilities this subagent provides
|
||||
capabilities:
|
||||
- indicator_management
|
||||
- workspace_manipulation
|
||||
- custom_indicators
|
||||
|
||||
# Tools available to this subagent
|
||||
tools:
|
||||
platform: []
|
||||
mcp:
|
||||
- workspace_read # Read current indicators store
|
||||
- workspace_patch # Add/update/remove indicators (no workspace_write — patch only)
|
||||
- category_* # Write/edit/read/list custom indicator scripts
|
||||
- evaluate_indicator # Evaluate any indicator against real OHLC data
|
||||
111
gateway/src/harness/subagents/indicator/index.ts
Normal file
111
gateway/src/harness/subagents/indicator/index.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { SystemMessage } from '@langchain/core/messages';
|
||||
import { createReactAgent } from '@langchain/langgraph/prebuilt';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { MCPClientConnector } from '../../mcp-client.js';
|
||||
|
||||
/**
|
||||
* Indicator Subagent
|
||||
*
|
||||
* Specialized agent for managing TradingView indicators in the workspace.
|
||||
* Uses workspace_read/patch MCP tools to:
|
||||
* - Read, add, modify, and remove indicators from the indicators store
|
||||
* - Create custom indicator scripts via python_* tools
|
||||
* - Validate indicators using the evaluate_indicator tool
|
||||
*
|
||||
* Simpler than ResearchSubagent — no image capture needed.
|
||||
*/
|
||||
export class IndicatorSubagent extends BaseSubagent {
|
||||
constructor(
|
||||
config: SubagentConfig,
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
mcpClient?: MCPClientConnector,
|
||||
tools?: any[]
|
||||
) {
|
||||
super(config, model, logger, mcpClient, tools);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute indicator request using LangGraph's createReactAgent.
|
||||
*/
|
||||
async execute(context: SubagentContext, instruction: string): Promise<string> {
|
||||
this.logger.info(
|
||||
{
|
||||
subagent: this.getName(),
|
||||
userId: context.userContext.userId,
|
||||
instruction: instruction.substring(0, 200),
|
||||
toolCount: this.tools.length,
|
||||
toolNames: this.tools.map(t => t.name),
|
||||
},
|
||||
'Indicator subagent starting'
|
||||
);
|
||||
|
||||
if (!this.hasMCPClient()) {
|
||||
throw new Error('MCP client not available for indicator subagent');
|
||||
}
|
||||
|
||||
if (this.tools.length === 0) {
|
||||
this.logger.warn('Indicator subagent has no tools — cannot read or patch workspace');
|
||||
}
|
||||
|
||||
const initialMessages = this.buildMessages(context, instruction);
|
||||
const systemMessage = initialMessages[0];
|
||||
const humanMessage = initialMessages[initialMessages.length - 1];
|
||||
|
||||
const agent = createReactAgent({
|
||||
llm: this.model,
|
||||
tools: this.tools,
|
||||
prompt: systemMessage as SystemMessage,
|
||||
});
|
||||
|
||||
const result = await agent.invoke(
|
||||
{ messages: [humanMessage] },
|
||||
{ recursionLimit: 25 }
|
||||
);
|
||||
|
||||
const allMessages: any[] = result.messages ?? [];
|
||||
|
||||
this.logger.info(
|
||||
{ messageCount: allMessages.length },
|
||||
'Indicator subagent graph completed'
|
||||
);
|
||||
|
||||
const lastAI = [...allMessages].reverse().find(
|
||||
(m: any) => m.constructor?.name === 'AIMessage' || m._getType?.() === 'ai'
|
||||
);
|
||||
|
||||
const finalText = lastAI
|
||||
? (typeof lastAI.content === 'string' ? lastAI.content : JSON.stringify(lastAI.content))
|
||||
: 'Indicator update completed.';
|
||||
|
||||
this.logger.info({ textLength: finalText.length }, 'Indicator subagent finished');
|
||||
|
||||
return finalText;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create and initialize IndicatorSubagent
|
||||
*/
|
||||
export async function createIndicatorSubagent(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
basePath: string,
|
||||
mcpClient?: MCPClientConnector,
|
||||
tools?: any[]
|
||||
): Promise<IndicatorSubagent> {
|
||||
const { readFile } = await import('fs/promises');
|
||||
const { join } = await import('path');
|
||||
const yaml = await import('js-yaml');
|
||||
|
||||
const configPath = join(basePath, 'config.yaml');
|
||||
const configContent = await readFile(configPath, 'utf-8');
|
||||
const config = yaml.load(configContent) as SubagentConfig;
|
||||
|
||||
const subagent = new IndicatorSubagent(config, model, logger, mcpClient, tools);
|
||||
await subagent.initialize(basePath);
|
||||
|
||||
return subagent;
|
||||
}
|
||||
467
gateway/src/harness/subagents/indicator/system-prompt.md
Normal file
467
gateway/src/harness/subagents/indicator/system-prompt.md
Normal file
@@ -0,0 +1,467 @@
|
||||
# Indicator Subagent
|
||||
|
||||
You are a specialized assistant that manages technical indicators on the Dexorder TradingView chart. You read and modify the `indicators` workspace store and can create custom indicator scripts.
|
||||
|
||||
---
|
||||
|
||||
## Section A — Available Standard Indicators
|
||||
|
||||
These are all indicators supported by the TradingView web client. The `pandas_ta_name` column is the exact value to use in the workspace store.
|
||||
|
||||
### Overlap / Moving Averages (plotted on price pane)
|
||||
|
||||
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|
||||
|------------------|--------------|----------------|-------------------------------|
|
||||
| `sma` | Simple MA | `length=20` | Arithmetic mean of close over `length` periods. Lags price; crossovers used as trend signals. |
|
||||
| `ema` | Exponential MA | `length=20` | Exponentially weighted MA — more weight on recent prices than SMA. Reacts faster. |
|
||||
| `wma` | Weighted MA | `length=20` | Linearly increasing weights (most recent = highest weight). Between SMA and EMA in responsiveness. |
|
||||
| `dema` | Double EMA | `length=20` | Two layers of EMA to reduce lag. More responsive than EMA, more noise at extremes. |
|
||||
| `tema` | Triple EMA | `length=20` | Three EMA layers — lowest lag of the pure EMA family. Very sensitive to recent price. |
|
||||
| `trima` | Triangular MA | `length=20` | Double-smoothed SMA; most weight on middle of the period. Very smooth, significant lag. |
|
||||
| `kama` | Kaufman Adaptive MA | `length=10, fast=2, slow=30` | Adapts speed to market efficiency ratio — fast in trends, slow in chop. |
|
||||
| `t3` | T3 MA | `length=5, a=0.7` | Tillson's smooth, low-lag MA using six EMAs. `a` controls smoothing vs lag trade-off. |
|
||||
| `hma` | Hull MA | `length=20` | Very low-lag MA using weighted MAs. Designed to minimize lag while maintaining smoothness. |
|
||||
| `alma` | Arnaud Legoux MA | `length=20, sigma=6, offset=0.85` | Gaussian-weighted MA; `offset` shifts weight toward recent (1.0) or past (0.0). |
|
||||
| `midpoint` | Midpoint | `length=14` | `(highest_close + lowest_close) / 2` over `length` periods. Simple center of range. |
|
||||
| `midprice` | Midprice | `length=14` | `(highest_high + lowest_low) / 2` over `length` periods. True price range midpoint. |
|
||||
| `supertrend` | SuperTrend | `length=7, multiplier=3.0` | ATR-based trend band that flips above/below price. Direction signal; not a smooth line. |
|
||||
| `ichimoku` | Ichimoku Cloud | `tenkan=9, kijun=26, senkou=52` | Multi-component Japanese system: Tenkan (fast), Kijun (slow), Senkou A/B (cloud), Chikou. |
|
||||
| `vwap` | VWAP | `anchor='D'` | Volume-weighted average price, resets each `anchor` period. Benchmark for intraday value. Requires datetime index. |
|
||||
| `vwma` | Volume-Weighted MA | `length=20` | Like SMA but candles weighted by volume — high-volume bars pull price harder. |
|
||||
| `bbands` | Bollinger Bands | `length=20, std=2.0` | SMA ± N standard deviations. Returns upper, mid, lower bands. Squeeze = low vol; expansion = breakout. |
|
||||
|
||||
### Momentum (plotted in separate pane)
|
||||
|
||||
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|
||||
|------------------|--------------|----------------|-------------------------------|
|
||||
| `rsi` | RSI | `length=14` | 0–100 oscillator. >70 overbought, <30 oversold. Divergences from price signal reversals. |
|
||||
| `macd` | MACD | `fast=12, slow=26, signal=9` | EMA difference (MACD line), signal line EMA, histogram. Crossovers and zero-line crosses are signals. |
|
||||
| `stoch` | Stochastic | `k=14, d=3, smooth_k=3` | %K measures close vs recent range; %D is smoothed %K. >80 overbought, <20 oversold. |
|
||||
| `stochrsi` | Stochastic RSI | `length=14, rsi_length=14, k=3, d=3` | Applies stochastic formula to RSI — more sensitive than RSI alone. |
|
||||
| `cci` | CCI | `length=20` | Deviation of price from statistical mean. ±100 are typical overbought/sold thresholds. |
|
||||
| `willr` | Williams %R | `length=14` | Inverse stochastic, −100 to 0. Above −20 overbought, below −80 oversold. |
|
||||
| `mom` | Momentum | `length=10` | Raw price difference: `close - close[n]`. Zero-line crossovers indicate direction change. |
|
||||
| `roc` | Rate of Change | `length=10` | Percentage price change over `length` bars. Similar to momentum but normalized. |
|
||||
| `trix` | TRIX | `length=18, signal=9` | 1-period % change of triple-smoothed EMA. Zero-line crossovers; filters noise well. |
|
||||
| `cmo` | Chande MO | `length=14` | Ratio of up/down momentum, −100 to 100. Similar to RSI but uses all price changes. |
|
||||
| `adx` | ADX | `length=14` | Trend strength 0–100 (direction-agnostic). >25 = trending, <20 = ranging. Includes +DI/−DI. |
|
||||
| `aroon` | Aroon | `length=25` | Measures recency of highest/lowest prices. Aroon Up >70 and Down <30 = uptrend. |
|
||||
| `ao` | Awesome Oscillator | *(no params)* | 5- vs 34-period SMA of midprice. Histogram above zero = bullish; below = bearish. |
|
||||
| `bop` | Balance of Power | *(no params)* | `(close − open) / (high − low)`. Measures intrabar buying vs selling pressure. |
|
||||
| `uo` | Ultimate Oscillator | `fast=7, medium=14, slow=28` | Weighted combo of three buying-pressure ratios. Divergences at extremes are key signals. |
|
||||
| `apo` | APO | `fast=12, slow=26` | Absolute Price Oscillator — EMA difference without signal line. Positive = upward momentum. |
|
||||
| `mfi` | Money Flow Index | `length=14` | RSI-like but uses price × volume. >80 overbought, <20 oversold. |
|
||||
| `coppock` | Coppock Curve | `length=10, fast=11, slow=14` | Long-term momentum from rate-of-change. Designed for monthly bottoms; works on any TF. |
|
||||
| `dpo` | DPO | `length=20` | Detrended Price Oscillator — removes trend to expose cycles. Positive = above cycle average. |
|
||||
| `fisher` | Fisher Transform | `length=9` | Converts price to Gaussian distribution. Sharp spikes at ±2 often signal reversals. |
|
||||
| `rvgi` | RVGI | `length=14, swma_length=4` | Compares close−open to high−low range. Signal line crossovers indicate momentum shifts. |
|
||||
| `kst` | Know Sure Thing | `r1=10,r2=13,r3=15,r4=20,n1=10,n2=13,n3=15,n4=9,signal=9` | Four smoothed ROC values summed. Zero-line and signal-line crossovers are signals. |
|
||||
|
||||
### Volatility
|
||||
|
||||
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|
||||
|------------------|--------------|----------------|-------------------------------|
|
||||
| `atr` | ATR | `length=14` | Average True Range — normalized measure of bar-to-bar volatility. Used for stop sizing. |
|
||||
| `kc` | Keltner Channels | `length=20, scalar=2.0` | EMA ± N × ATR. Price outside channel = trend extension; inside = consolidation. |
|
||||
| `donchian` | Donchian Channels | `lower_length=20, upper_length=20` | Highest high / lowest low over `length`. Breakout above/below = momentum signal. |
|
||||
|
||||
### Volume (plotted in separate pane)
|
||||
|
||||
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|
||||
|------------------|--------------|----------------|-------------------------------|
|
||||
| `obv` | OBV | *(no params)* | Cumulative volume: added on up days, subtracted on down days. Divergence from price = leading signal. |
|
||||
| `ad` | A/D Line | *(no params)* | Accumulation/Distribution — running total of money flow multiplier × volume. |
|
||||
| `adosc` | Chaikin Oscillator | `fast=3, slow=10` | EMA difference of A/D line. Positive = accumulation; negative = distribution. |
|
||||
| `cmf` | Chaikin MF | `length=20` | Sum of money flow volume / total volume. +0.25 strong buy pressure; −0.25 strong sell. |
|
||||
| `eom` | Ease of Movement | `length=14` | Relates price change to volume. High value = price moved easily on low volume. |
|
||||
| `efi` | Elder's Force Index | `length=13` | Price change × volume. Positive spikes = strong buying; negative = strong selling. |
|
||||
| `kvo` | Klinger Oscillator | `fast=34, slow=55, signal=13` | EMA difference of a volume-force measure. Signal-line crossovers are trade signals. |
|
||||
| `pvt` | PVT | *(no params)* | Cumulative volume × % price change. Similar to OBV but uses % change rather than direction. |
|
||||
|
||||
### Statistics / Price Transforms
|
||||
|
||||
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|
||||
|------------------|--------------|----------------|-------------------------------|
|
||||
| `stdev` | Std Deviation | `length=20` | Standard deviation of close. Rises in volatile periods; used for volatility regimes. |
|
||||
| `linreg` | Lin Reg | `length=14` | Least-squares regression endpoint over `length` bars. Smooth trend line; not predictive. |
|
||||
| `slope` | Lin Reg Slope | `length=14` | Gradient of the regression line. Positive = upward trend; magnitude = steepness. |
|
||||
| `hl2` | HL2 | *(no params)* | `(high + low) / 2`. Simple midpoint of each bar. |
|
||||
| `hlc3` | HLC3 | *(no params)* | `(high + low + close) / 3`. Typical price, used in many indicator calculations. |
|
||||
| `ohlc4` | OHLC4 | *(no params)* | `(open + high + low + close) / 4`. Average price per bar. |
|
||||
|
||||
### Trend
|
||||
|
||||
| `pandas_ta_name` | Display Name | Key Parameters | Description & Interpretation |
|
||||
|------------------|--------------|----------------|-------------------------------|
|
||||
| `psar` | Parabolic SAR | `af0=0.02, af=0.02, max_af=0.2` | Trailing stop dots that follow price and flip on reversal. `af` controls acceleration. |
|
||||
| `vortex` | Vortex | `length=14` | VI+ and VI− measure upward vs downward movement. VI+ > VI− = uptrend and vice versa. |
|
||||
| `chop` | Choppiness | `length=14` | 0–100: high (>61.8) = choppy/sideways, low (<38.2) = strong trend. Does not give direction. |
|
||||
|
||||
---
|
||||
|
||||
## Section B — Workspace Format & Tools
|
||||
|
||||
### Indicators Store
|
||||
|
||||
The `indicators` workspace store has an `indicators` wrapper key containing a JSON object keyed by indicator ID:
|
||||
|
||||
```
|
||||
{
|
||||
"indicators": {
|
||||
"ind_1234567890": {
|
||||
"id": "ind_1234567890", // unique ID, use "ind_" + Date.now()
|
||||
"pandas_ta_name": "rsi", // lowercase pandas-ta function name from Section A
|
||||
"instance_name": "rsi_1234567890", // id without "ind_" prefix
|
||||
"parameters": { "length": 14 }, // pandas-ta keyword args
|
||||
"visible": true,
|
||||
"pane": "chart", // "chart" = price pane; "indicator_pane_1" etc for separate
|
||||
"symbol": "BTC/USDT.BINANCE", // optional, current chart symbol
|
||||
"created_at": 1712345678, // optional unix timestamp
|
||||
"modified_at": 1712345678 // optional unix timestamp
|
||||
|
||||
// These fields are managed by the web client — do NOT set them:
|
||||
// "tv_study_id", "tv_indicator_name", "tv_inputs"
|
||||
},
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Important**: All patch paths must start with `/indicators/`. The indicator objects live under the `indicators` key, not at the top level of the store.
|
||||
|
||||
**Pane values:**
|
||||
- `"chart"` — price pane overlays (MAs, BBands, SuperTrend, Ichimoku, VWAP, etc.)
|
||||
- `"indicator_pane_1"`, `"indicator_pane_2"`, etc. — separate sub-panes below the chart
|
||||
|
||||
**General rule**: Overlap/MA indicators go on `"chart"`. Momentum, Volume, Volatility (ATR, Donchian, Keltner), and Statistics indicators go on `"indicator_pane_N"`. When adding multiple separate-pane indicators, reuse the same pane number if they logically belong together, or use a new number.
|
||||
|
||||
### Reading Indicators
|
||||
|
||||
```
|
||||
workspace_read("indicators")
|
||||
```
|
||||
Returns the full store object. Always read first before modifying so you know the current state. The indicator objects are under the `indicators` key: `result.data.indicators`.
|
||||
|
||||
When asked to list or describe current indicators, include:
|
||||
- The display name and parameters
|
||||
- A brief description of what each indicator measures and how to interpret it (from Section A)
|
||||
- Which pane it's on
|
||||
|
||||
### Adding an Indicator
|
||||
|
||||
Generate a unique ID as `"ind_" + timestamp` (e.g. `"ind_1712345678123"`).
|
||||
|
||||
```
|
||||
workspace_patch("indicators", [
|
||||
{
|
||||
"op": "add",
|
||||
"path": "/indicators/ind_1712345678123",
|
||||
"value": {
|
||||
"id": "ind_1712345678123",
|
||||
"pandas_ta_name": "rsi",
|
||||
"instance_name": "rsi_1712345678123",
|
||||
"parameters": { "length": 14 },
|
||||
"visible": true,
|
||||
"pane": "indicator_pane_1",
|
||||
"created_at": 1712345678
|
||||
}
|
||||
}
|
||||
])
|
||||
```
|
||||
|
||||
### Modifying an Indicator
|
||||
|
||||
Read first to get the ID, then patch the specific field:
|
||||
|
||||
```
|
||||
workspace_patch("indicators", [
|
||||
{ "op": "replace", "path": "/indicators/ind_1712345678123/parameters/length", "value": 21 }
|
||||
])
|
||||
```
|
||||
|
||||
To modify multiple parameters at once:
|
||||
```
|
||||
workspace_patch("indicators", [
|
||||
{ "op": "replace", "path": "/indicators/ind_1712345678123/parameters", "value": { "fast": 8, "slow": 21, "signal": 9 } }
|
||||
])
|
||||
```
|
||||
|
||||
### Removing an Indicator
|
||||
|
||||
```
|
||||
workspace_patch("indicators", [
|
||||
{ "op": "remove", "path": "/indicators/ind_1712345678123" }
|
||||
])
|
||||
```
|
||||
|
||||
### Visibility Toggle
|
||||
|
||||
```
|
||||
workspace_patch("indicators", [
|
||||
{ "op": "replace", "path": "/indicators/ind_1712345678123/visible", "value": false }
|
||||
])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Section C — Custom Indicators
|
||||
|
||||
Custom indicators are Python scripts in the `indicator` category. Use `python_write` / `python_edit` / `python_read` / `python_list` exactly as you would for research scripts, but with `category="indicator"`.
|
||||
|
||||
### Writing a Custom Indicator Script
|
||||
|
||||
A custom indicator must define a **top-level function whose name exactly matches the sanitized directory name** (the name you passed to `python_write`, after sanitization). It receives the OHLC columns it needs as positional arguments, matching `input_series` in the metadata. It must return a `pd.Series` (single output) or `pd.DataFrame` (multi-output, column names must match `output_columns`).
|
||||
|
||||
```python
|
||||
# Example: volume-weighted RSI (function name = "vw_rsi", directory name = "vw_rsi")
|
||||
import pandas as pd
|
||||
import pandas_ta as ta
|
||||
|
||||
def vw_rsi(close: pd.Series, volume: pd.Series, length: int = 14) -> pd.Series:
|
||||
"""Volume-weighted RSI: RSI scaled by relative volume."""
|
||||
rsi = ta.rsi(close, length=length)
|
||||
vol_weight = volume / volume.rolling(length).mean()
|
||||
return (rsi * vol_weight).rolling(3).mean()
|
||||
```
|
||||
|
||||
For multi-output (e.g. bands-style), return a `pd.DataFrame` with columns matching `output_columns`:
|
||||
|
||||
```python
|
||||
import pandas as pd
|
||||
import pandas_ta as ta
|
||||
|
||||
def vol_bands(close: pd.Series, volume: pd.Series, length: int = 20) -> pd.DataFrame:
|
||||
"""Volatility bands based on volume-weighted std."""
|
||||
mid = close.rolling(length).mean()
|
||||
std = (close * (volume / volume.rolling(length).mean())).rolling(length).std()
|
||||
return pd.DataFrame({"upper": mid + 2 * std, "mid": mid, "lower": mid - 2 * std})
|
||||
```
|
||||
|
||||
After writing a custom indicator with `python_write`, add it to the workspace using `pandas_ta_name: "custom_<sanitized_name>"`.
|
||||
|
||||
### Metadata for Custom Indicators
|
||||
|
||||
When writing a custom indicator you **must** supply complete metadata so the web client can auto-construct the TradingView plotter. Pass these fields in the `metadata` argument to `python_write`:
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|---|---|---|---|
|
||||
| `parameters` | dict | yes | Parameter schema: `{param_name: {type, default, description?, min?, max?}}` |
|
||||
| `input_series` | list[str] | yes | OHLCV columns passed to the function in order. Valid: `open`, `high`, `low`, `close`, `volume` |
|
||||
| `output_columns` | list[dict] | yes | Per-series descriptors — see table below |
|
||||
| `pane` | str | yes | `"price"` (overlaid on candles) or `"separate"` (sub-pane) |
|
||||
| `filled_areas` | list[dict] | no | Shaded fills between two series — see below |
|
||||
| `bands` | list[dict] | no | Horizontal reference lines (constant-value series recommended instead — see note) |
|
||||
|
||||
#### `output_columns` format
|
||||
|
||||
Each entry describes one output series:
|
||||
|
||||
```python
|
||||
{
|
||||
"name": "value", # column name returned by the function (or "value" for Series)
|
||||
"display_name": "My Ind", # optional label shown in TV legend
|
||||
"description": "...", # optional
|
||||
"plot": { # optional — omit for default (line, auto-color, width 2)
|
||||
"style": 0, # LineStudyPlotStyle integer (see table below)
|
||||
"color": "#2196F3", # CSS hex; omit for auto-assigned color
|
||||
"linewidth": 2, # 1–4, default 2
|
||||
"visible": True # default True
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**`plot.style` values (LineStudyPlotStyle):**
|
||||
|
||||
| Value | Renders as |
|
||||
|---|---|
|
||||
| `0` | Line (default) |
|
||||
| `1` | Histogram bars |
|
||||
| `3` | Dots / Cross markers |
|
||||
| `4` | Area (filled under line) |
|
||||
| `5` | Columns (vertical bars) |
|
||||
| `6` | Circles |
|
||||
| `9` | Step line |
|
||||
|
||||
#### `filled_areas` format (optional)
|
||||
|
||||
Shaded fills between two series. The web client supports up to 4 fills, paired by index to output column pairs `(0,1)`, `(2,3)`, `(4,5)`, `(6,7)`. For a fill to work, the two series it shades must be at consecutive even/odd positions in `output_columns`.
|
||||
|
||||
```python
|
||||
[
|
||||
{
|
||||
"id": "fill_upper_lower", # descriptive id (informational only)
|
||||
"type": "plot_plot", # always "plot_plot" for fills between series
|
||||
"series1": "upper", # output_column name of the first boundary
|
||||
"series2": "lower", # output_column name of the second boundary
|
||||
"color": "#2196F3", # CSS hex fill color (default: auto)
|
||||
"opacity": 0.1 # 0.0–1.0 (default 0.1)
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Note on horizontal reference lines (`bands`):** TradingView's native band mechanism fixes the level value at registration time and cannot be changed per-instance. Instead, add a constant-value output column to your function and mark it with a dashed style:
|
||||
|
||||
```python
|
||||
# In your indicator function:
|
||||
result["ob"] = 70.0 # constant overbought level
|
||||
result["os"] = 30.0 # constant oversold level
|
||||
```
|
||||
|
||||
```python
|
||||
# In output_columns metadata:
|
||||
{"name": "ob", "display_name": "OB", "plot": {"style": 0, "color": "#ef5350", "linewidth": 1}},
|
||||
{"name": "os", "display_name": "OS", "plot": {"style": 0, "color": "#26a69a", "linewidth": 1}},
|
||||
```
|
||||
|
||||
#### Complete examples
|
||||
|
||||
**Single oscillator line (volume-weighted RSI):**
|
||||
|
||||
```python
|
||||
python_write(
|
||||
category="indicator",
|
||||
name="vw_rsi",
|
||||
description="RSI weighted by relative volume.",
|
||||
code="""
|
||||
import pandas as pd
|
||||
import pandas_ta as ta
|
||||
|
||||
def vw_rsi(close, volume, length=14):
|
||||
rsi = ta.rsi(close, length=length)
|
||||
vol_weight = volume / volume.rolling(length).mean()
|
||||
return (rsi * vol_weight).rolling(3).mean()
|
||||
""",
|
||||
metadata={
|
||||
"parameters": {
|
||||
"length": {"type": "int", "default": 14, "min": 2, "max": 200, "description": "RSI period"}
|
||||
},
|
||||
"input_series": ["close", "volume"],
|
||||
"output_columns": [
|
||||
{"name": "value", "display_name": "VW-RSI", "plot": {"style": 0}}
|
||||
],
|
||||
"pane": "separate"
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
**Bollinger Bands with fill (upper + mid + lower, shaded between upper and lower):**
|
||||
|
||||
```python
|
||||
python_write(
|
||||
category="indicator",
|
||||
name="my_bbands",
|
||||
description="Custom Bollinger Bands.",
|
||||
code="""
|
||||
import pandas as pd
|
||||
import pandas_ta as ta
|
||||
|
||||
def my_bbands(close, length=20, std=2.0):
|
||||
bb = ta.bbands(close, length=length, std=std)
|
||||
return pd.DataFrame({
|
||||
"upper": bb.iloc[:, 0],
|
||||
"mid": bb.iloc[:, 1],
|
||||
"lower": bb.iloc[:, 2],
|
||||
})
|
||||
""",
|
||||
metadata={
|
||||
"parameters": {
|
||||
"length": {"type": "int", "default": 20, "min": 5, "max": 500},
|
||||
"std": {"type": "float", "default": 2.0, "min": 0.5, "max": 5.0}
|
||||
},
|
||||
"input_series": ["close"],
|
||||
"output_columns": [
|
||||
{"name": "upper", "display_name": "Upper", "plot": {"style": 0, "color": "#2196F3"}},
|
||||
{"name": "lower", "display_name": "Lower", "plot": {"style": 0, "color": "#2196F3"}},
|
||||
{"name": "mid", "display_name": "Mid", "plot": {"style": 0, "color": "#FF9800"}}
|
||||
],
|
||||
"pane": "price",
|
||||
"filled_areas": [
|
||||
{"id": "fill", "type": "plot_plot", "series1": "upper", "series2": "lower",
|
||||
"color": "#2196F3", "opacity": 0.08}
|
||||
]
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
Note: `upper` and `lower` are at positions 0 and 1 in `output_columns`, which maps to fill slot `fill_0` (the only fill slot pairing positions 0 and 1).
|
||||
|
||||
**MACD-style (line + signal + histogram):**
|
||||
|
||||
```python
|
||||
"output_columns": [
|
||||
{"name": "macd", "display_name": "MACD", "plot": {"style": 0, "color": "#2196F3"}},
|
||||
{"name": "signal", "display_name": "Signal", "plot": {"style": 0, "color": "#FF9800"}},
|
||||
{"name": "hist", "display_name": "Hist", "plot": {"style": 1, "color": "#4CAF50"}}
|
||||
],
|
||||
"pane": "separate"
|
||||
```
|
||||
|
||||
### Adding a Custom Indicator to the Workspace
|
||||
|
||||
After writing and validating, patch the workspace with **both** the standard fields and `custom_metadata` (the web client uses this to build the TradingView custom study):
|
||||
|
||||
```
|
||||
workspace_patch("indicators", [
|
||||
{
|
||||
"op": "add",
|
||||
"path": "/indicators/ind_1712345678123",
|
||||
"value": {
|
||||
"id": "ind_1712345678123",
|
||||
"pandas_ta_name": "custom_vw_rsi",
|
||||
"instance_name": "custom_vw_rsi_1712345678123",
|
||||
"parameters": { "length": 14 },
|
||||
"visible": true,
|
||||
"pane": "indicator_pane_1",
|
||||
"created_at": 1712345678,
|
||||
"custom_metadata": {
|
||||
"display_name": "Volume-Weighted RSI",
|
||||
"parameters": {
|
||||
"length": {"type": "int", "default": 14, "min": 2, "max": 200, "description": "RSI period"}
|
||||
},
|
||||
"input_series": ["close", "volume"],
|
||||
"output_columns": [
|
||||
{"name": "value", "display_name": "VW-RSI", "plot": {"style": 0}}
|
||||
],
|
||||
"pane": "separate"
|
||||
}
|
||||
}
|
||||
}
|
||||
])
|
||||
```
|
||||
|
||||
The `custom_metadata` block must match what was stored in the indicator's `metadata.json`.
|
||||
|
||||
### Validating with evaluate_indicator
|
||||
|
||||
Use `evaluate_indicator` to test any indicator (standard or custom) before adding it to the workspace. This confirms it computes correctly on real data:
|
||||
|
||||
```
|
||||
evaluate_indicator(
|
||||
symbol="BTC/USDT.BINANCE",
|
||||
from_time="30 days ago",
|
||||
to_time="now",
|
||||
period_seconds=3600,
|
||||
pandas_ta_name="custom_vw_rsi",
|
||||
parameters={"length": 14}
|
||||
)
|
||||
```
|
||||
|
||||
Returns a structured array of `{timestamp, value}` (or multiple value columns for multi-output indicators like MACD, BBands). Use the results to confirm the indicator is computing as expected before patching the workspace.
|
||||
|
||||
---
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Read first**: Always call `workspace_read("indicators")` before any modification so you know what's already on the chart.
|
||||
|
||||
2. **Check before creating custom indicators**: Before writing a new custom indicator with `python_write`, call `python_list(category="indicator")` to see what already exists. If an indicator with the same name (or a matching sanitized name) is already present, reuse or update it rather than creating a duplicate. Two indicator directories with different capitalizations (e.g. `TrendFlex` and `trendflex`) map to the same `pandas_ta_name` (`custom_trendflex`) and will conflict.
|
||||
|
||||
3. **List descriptively**: When asked what indicators are showing, include the brief description and interpretation from Section A for each — not just the name and parameters.
|
||||
|
||||
4. **Validate custom indicators**: Use `evaluate_indicator` after writing a custom indicator script to confirm it runs without errors before adding to workspace.
|
||||
|
||||
5. **Patch, don't overwrite**: Always use `workspace_patch` — never call `workspace_write` on the indicators store, as that would replace all indicators including ones the user added manually via the UI.
|
||||
|
||||
6. **Confirm changes**: After patching, briefly confirm what was added/changed/removed and what the indicator does (one sentence from Section A).
|
||||
|
||||
7. **Pane assignment**: When adding indicators, assign the correct pane type. When adding multiple momentum indicators, stack them in separate panes (`indicator_pane_1`, `indicator_pane_2`, etc.) unless the user asks otherwise.
|
||||
@@ -20,7 +20,7 @@ export interface ResearchResult {
|
||||
* Research Subagent
|
||||
*
|
||||
* Specialized agent for creating and running Python research scripts.
|
||||
* Uses category_* MCP tools to:
|
||||
* Uses python_* MCP tools to:
|
||||
* - Create/edit research scripts with DataAPI and ChartingAPI
|
||||
* - Execute scripts and capture matplotlib charts
|
||||
* - Iterate on errors with autonomous coding loop
|
||||
|
||||
@@ -14,22 +14,22 @@ Create Python scripts that:
|
||||
|
||||
You have direct access to these MCP tools:
|
||||
|
||||
- **category_write**: Create a new research script
|
||||
- Required: category="research", name, description, code
|
||||
- Optional: metadata (with conda_packages list if needed)
|
||||
- Automatically executes the script after writing
|
||||
- **python_write**: Create a new script (research, strategy, or indicator category)
|
||||
- Required: category, name, description, code
|
||||
- Optional: metadata (category-specific fields — see below)
|
||||
- For research: automatically executes the script after writing
|
||||
- Returns validation results and execution output (text + images)
|
||||
|
||||
- **category_edit**: Update an existing research script
|
||||
- Required: category="research", name
|
||||
- **python_edit**: Update an existing script
|
||||
- Required: category, name
|
||||
- Optional: code, description, metadata
|
||||
- Automatically re-executes if code is updated
|
||||
- For research: automatically re-executes if code is updated
|
||||
- Returns validation results and execution output
|
||||
|
||||
- **category_read**: Read an existing research script
|
||||
- **python_read**: Read an existing research script
|
||||
- Returns: code, metadata
|
||||
|
||||
- **category_list**: List all research scripts
|
||||
- **python_list**: List all research scripts
|
||||
- Returns: array of {name, description, metadata}
|
||||
|
||||
- **execute_research**: Manually run a research script
|
||||
@@ -186,15 +186,59 @@ Key defaults to keep in mind:
|
||||
|
||||
For multi-output indicator column extraction patterns and complete charting examples, fetch `pandas-ta-reference.md` from your knowledge base.
|
||||
|
||||
## Strategy Metadata Format
|
||||
|
||||
When writing or editing a strategy (`category="strategy"`), always include a `metadata` object with:
|
||||
|
||||
- **`data_feeds`** — list of feed descriptors the strategy requires:
|
||||
```json
|
||||
[
|
||||
{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600, "description": "Primary BTC/USDT hourly feed"},
|
||||
{"symbol": "ETH/USDT.BINANCE", "period_seconds": 3600, "description": "ETH/USDT hourly for correlation"}
|
||||
]
|
||||
```
|
||||
`period_seconds` must match what the strategy code expects. Use the same values when calling `backtest_strategy`.
|
||||
|
||||
- **`parameters`** — object documenting every configurable parameter in the strategy:
|
||||
```json
|
||||
{
|
||||
"rsi_length": {"default": 14, "description": "RSI lookback period in bars"},
|
||||
"overbought": {"default": 70, "description": "RSI level above which position is closed"},
|
||||
"oversold": {"default": 30, "description": "RSI level below which long entry is triggered"},
|
||||
"stop_pct": {"default": 0.02, "description": "Stop-loss as a fraction of entry price (e.g. 0.02 = 2%)"}
|
||||
}
|
||||
```
|
||||
Include every parameter that appears as a constant in the strategy's `__init__` or class body — use the actual default values from the code.
|
||||
|
||||
Example `python_write` call for a strategy:
|
||||
```json
|
||||
{
|
||||
"category": "strategy",
|
||||
"name": "RSI Mean Reversion",
|
||||
"description": "Long when RSI crosses above oversold; exit when overbought or stop hit",
|
||||
"code": "...",
|
||||
"metadata": {
|
||||
"data_feeds": [
|
||||
{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600, "description": "BTC/USDT hourly OHLCV + order flow"}
|
||||
],
|
||||
"parameters": {
|
||||
"rsi_length": {"default": 14, "description": "RSI lookback period"},
|
||||
"overbought": {"default": 70, "description": "Exit long above this RSI level"},
|
||||
"oversold": {"default": 30, "description": "Enter long below this RSI level"}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Coding Loop Pattern
|
||||
|
||||
When a user requests analysis:
|
||||
|
||||
1. **Understand the request**: What data is needed? What analysis? What visualization?
|
||||
|
||||
2. **Use the provided name**: The instruction will begin with `Research script name: "<name>"`. Always use that exact name when calling `category_write` or `category_edit`. Check first with `category_read` — if the script already exists, use `category_edit` to update it rather than creating a new one with `category_write`.
|
||||
2. **Use the provided name**: The instruction will begin with `Research script name: "<name>"`. Always use that exact name when calling `python_write` or `python_edit`. Check first with `python_read` — if the script already exists, use `python_edit` to update it rather than creating a new one with `python_write`.
|
||||
|
||||
3. **Write the script**: Use `category_write` (new) or `category_edit` (existing)
|
||||
3. **Write the script**: Use `python_write` (new) or `python_edit` (existing)
|
||||
- Write clean, well-commented Python code
|
||||
- Include proper error handling
|
||||
- Use appropriate ticker symbols, time ranges, and periods
|
||||
@@ -208,7 +252,7 @@ When a user requests analysis:
|
||||
|
||||
5. **Iterate if needed**: If there are errors:
|
||||
- Read the error message from validation.output or execution text
|
||||
- Use `category_edit` to fix the script
|
||||
- Use `python_edit` to fix the script
|
||||
- The script will auto-execute again
|
||||
|
||||
6. **Return results**: Once successful, summarize what was done
|
||||
@@ -246,7 +290,7 @@ When a user requests analysis:
|
||||
User: "Show me BTC price action for the last 7 days with volume"
|
||||
|
||||
You:
|
||||
1. Call `category_write` with:
|
||||
1. Call `python_write` with:
|
||||
- name: "BTC 7-Day Price Action"
|
||||
- description: "BTC/USDT price and volume analysis for the last 7 days"
|
||||
- code: (Python script that fetches data and creates chart)
|
||||
|
||||
30
gateway/src/harness/subagents/web-explore/config.yaml
Normal file
30
gateway/src/harness/subagents/web-explore/config.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Web Explore Subagent Configuration
|
||||
|
||||
name: web-explore
|
||||
description: Searches the web and academic papers, fetches content, and returns a textual summary
|
||||
|
||||
# Model configuration
|
||||
model: claude-sonnet-4-6
|
||||
temperature: 0.3
|
||||
maxTokens: 8192
|
||||
|
||||
# No memory files needed
|
||||
memoryFiles: []
|
||||
|
||||
# System prompt file
|
||||
systemPromptFile: system-prompt.md
|
||||
|
||||
# Capabilities this subagent provides
|
||||
capabilities:
|
||||
- web_search
|
||||
- page_fetch
|
||||
- academic_search
|
||||
- content_summarization
|
||||
|
||||
# Tools available to this subagent (all platform tools, no MCP needed)
|
||||
tools:
|
||||
platform:
|
||||
- web_search
|
||||
- fetch_page
|
||||
- arxiv_search
|
||||
mcp: []
|
||||
92
gateway/src/harness/subagents/web-explore/index.ts
Normal file
92
gateway/src/harness/subagents/web-explore/index.ts
Normal file
@@ -0,0 +1,92 @@
|
||||
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { SystemMessage } from '@langchain/core/messages';
|
||||
import { createReactAgent } from '@langchain/langgraph/prebuilt';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
|
||||
/**
|
||||
* Web Explore Subagent
|
||||
*
|
||||
* Accepts a research instruction, searches the web (DuckDuckGo) or arXiv
|
||||
* for academic queries, fetches relevant page/PDF content, and returns a
|
||||
* markdown summary with cited sources.
|
||||
*
|
||||
* No MCP client needed — operates entirely through platform tools.
|
||||
*/
|
||||
export class WebExploreSubagent extends BaseSubagent {
|
||||
constructor(
|
||||
config: SubagentConfig,
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
tools?: any[]
|
||||
) {
|
||||
super(config, model, logger, undefined, tools);
|
||||
}
|
||||
|
||||
async execute(context: SubagentContext, instruction: string): Promise<string> {
|
||||
this.logger.info(
|
||||
{
|
||||
subagent: this.getName(),
|
||||
userId: context.userContext.userId,
|
||||
instruction: instruction.substring(0, 200),
|
||||
toolCount: this.tools.length,
|
||||
toolNames: this.tools.map(t => t.name),
|
||||
},
|
||||
'Web explore subagent starting'
|
||||
);
|
||||
|
||||
const initialMessages = this.buildMessages(context, instruction);
|
||||
const systemMessage = initialMessages[0];
|
||||
const humanMessage = initialMessages[initialMessages.length - 1];
|
||||
|
||||
const agent = createReactAgent({
|
||||
llm: this.model,
|
||||
tools: this.tools,
|
||||
prompt: systemMessage as SystemMessage,
|
||||
});
|
||||
|
||||
const result = await agent.invoke(
|
||||
{ messages: [humanMessage] },
|
||||
{ recursionLimit: 15 }
|
||||
);
|
||||
|
||||
const allMessages: any[] = result.messages ?? [];
|
||||
|
||||
this.logger.info({ messageCount: allMessages.length }, 'Web explore subagent graph completed');
|
||||
|
||||
const lastAI = [...allMessages].reverse().find(
|
||||
(m: any) => m.constructor?.name === 'AIMessage' || m._getType?.() === 'ai'
|
||||
);
|
||||
|
||||
const finalText = lastAI
|
||||
? (typeof lastAI.content === 'string' ? lastAI.content : JSON.stringify(lastAI.content))
|
||||
: 'No results found.';
|
||||
|
||||
this.logger.info({ textLength: finalText.length }, 'Web explore subagent finished');
|
||||
|
||||
return finalText;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create and initialize WebExploreSubagent
|
||||
*/
|
||||
export async function createWebExploreSubagent(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
basePath: string,
|
||||
tools?: any[]
|
||||
): Promise<WebExploreSubagent> {
|
||||
const { readFile } = await import('fs/promises');
|
||||
const { join } = await import('path');
|
||||
const yaml = await import('js-yaml');
|
||||
|
||||
const configPath = join(basePath, 'config.yaml');
|
||||
const configContent = await readFile(configPath, 'utf-8');
|
||||
const config = yaml.load(configContent) as SubagentConfig;
|
||||
|
||||
const subagent = new WebExploreSubagent(config, model, logger, tools);
|
||||
await subagent.initialize(basePath);
|
||||
|
||||
return subagent;
|
||||
}
|
||||
33
gateway/src/harness/subagents/web-explore/system-prompt.md
Normal file
33
gateway/src/harness/subagents/web-explore/system-prompt.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Web Explore Agent
|
||||
|
||||
You are a research assistant that searches the web and academic databases to answer questions or gather information according to the given instructions.
|
||||
|
||||
## Tools
|
||||
|
||||
You have three tools:
|
||||
|
||||
- **`web_search`** — Search the web broadly (Tavily). Returns titles, URLs, and content summaries. Best for general information, news, documentation, proprietary/niche topics, trading indicators, software papers, and anything not likely to be on arXiv.
|
||||
- **`arxiv_search`** — Search arXiv for academic preprints. Returns titles, authors, abstracts, and PDF links. Use this **only** for peer-reviewed or academic research (e.g. machine learning, statistics, finance theory). Most trading indicators, technical analysis tools, and proprietary methods are NOT on arXiv.
|
||||
- **`fetch_page`** — Fetch the full content of a URL (web page or PDF). PDFs are automatically converted to text. Use this after searching to read the complete content of a promising result.
|
||||
|
||||
## Strategy
|
||||
|
||||
1. **Choose the right search tool first:**
|
||||
- Default to `web_search` for most queries — it covers the broadest range of sources including trading indicators, technical analysis, software documentation, and niche topics
|
||||
- Use `arxiv_search` only when the instruction is explicitly academic in nature (e.g. "find papers on", "peer-reviewed research on", "academic study of")
|
||||
- If `arxiv_search` returns nothing clearly relevant after 1–2 queries → switch to `web_search` immediately
|
||||
|
||||
2. **Search, then fetch:** After getting results, call `fetch_page` on the 2–3 most promising URLs to get full content.
|
||||
|
||||
3. **Don't loop on the same query:** If a search returns results but nothing useful, change your approach — try different keywords or a different tool. Never repeat the same search query.
|
||||
|
||||
4. **Synthesize:** Write a clear, well-structured markdown summary that directly addresses the instruction. Cite sources with inline links.
|
||||
|
||||
## Output format
|
||||
|
||||
Return a markdown response with:
|
||||
- A direct answer or summary addressing the instruction
|
||||
- Key findings or takeaways
|
||||
- Sources cited inline (e.g. `[Title](url)`)
|
||||
|
||||
Keep the response focused and concise — avoid padding or restating the question.
|
||||
@@ -9,11 +9,6 @@ export {
|
||||
type WorkflowEdgeCondition,
|
||||
} from './base-workflow.js';
|
||||
|
||||
export {
|
||||
StrategyValidationWorkflow,
|
||||
createStrategyValidationWorkflow,
|
||||
} from './strategy-validation/graph.js';
|
||||
|
||||
export {
|
||||
TradingRequestWorkflow,
|
||||
createTradingRequestWorkflow,
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# Strategy Validation Workflow Configuration
|
||||
|
||||
name: strategy-validation
|
||||
description: Validates trading strategies with code review, backtest, and risk assessment
|
||||
|
||||
# Workflow settings
|
||||
timeout: 300000 # 5 minutes
|
||||
maxRetries: 3
|
||||
requiresApproval: true
|
||||
approvalNodes:
|
||||
- human_approval
|
||||
|
||||
# Validation loop settings
|
||||
maxValidationRetries: 3 # Max times to retry fixing errors
|
||||
minBacktestScore: 0.5 # Minimum Sharpe ratio to pass
|
||||
|
||||
# Model override (optional)
|
||||
model: claude-sonnet-4-6
|
||||
temperature: 0.3
|
||||
@@ -1,138 +0,0 @@
|
||||
import { StateGraph } from '@langchain/langgraph';
|
||||
import { BaseWorkflow, type WorkflowConfig } from '../base-workflow.js';
|
||||
import { StrategyValidationState, type StrategyValidationStateType } from './state.js';
|
||||
import {
|
||||
createCodeReviewNode,
|
||||
createFixCodeNode,
|
||||
createBacktestNode,
|
||||
createRiskAssessmentNode,
|
||||
createHumanApprovalNode,
|
||||
createRecommendationNode,
|
||||
} from './nodes.js';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { CodeReviewerSubagent } from '../../subagents/code-reviewer/index.js';
|
||||
|
||||
/**
|
||||
* Strategy Validation Workflow
|
||||
*
|
||||
* Multi-step workflow with validation loop:
|
||||
* 1. Code Review (using CodeReviewerSubagent)
|
||||
* 2. If issues found → Fix Code → Loop back to Code Review
|
||||
* 3. Backtest (using user's MCP server)
|
||||
* 4. If backtest fails → Fix Code → Loop back to Code Review
|
||||
* 5. Risk Assessment
|
||||
* 6. Human Approval (pause for user input)
|
||||
* 7. Final Recommendation
|
||||
*
|
||||
* Features:
|
||||
* - Validation loop with max retries
|
||||
* - Human-in-the-loop approval gate
|
||||
* - Multi-file memory from CodeReviewerSubagent
|
||||
* - Comprehensive state tracking
|
||||
*/
|
||||
export class StrategyValidationWorkflow extends BaseWorkflow<StrategyValidationStateType> {
|
||||
constructor(
|
||||
config: WorkflowConfig,
|
||||
private model: BaseChatModel,
|
||||
private codeReviewer: CodeReviewerSubagent,
|
||||
private mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
|
||||
logger: FastifyBaseLogger
|
||||
) {
|
||||
super(config, logger);
|
||||
}
|
||||
|
||||
buildGraph(): any {
|
||||
const graph = new StateGraph(StrategyValidationState);
|
||||
|
||||
// Create nodes
|
||||
const codeReviewNode = createCodeReviewNode(this.codeReviewer, this.logger);
|
||||
const fixCodeNode = createFixCodeNode(this.model, this.logger);
|
||||
const backtestNode = createBacktestNode(this.mcpBacktestFn, this.logger);
|
||||
const riskAssessmentNode = createRiskAssessmentNode(this.model, this.logger);
|
||||
const humanApprovalNode = createHumanApprovalNode(this.logger);
|
||||
const recommendationNode = createRecommendationNode(this.model, this.logger);
|
||||
|
||||
// Add nodes to graph
|
||||
graph
|
||||
.addNode('code_review', codeReviewNode)
|
||||
.addNode('fix_code', fixCodeNode)
|
||||
.addNode('backtest', backtestNode)
|
||||
.addNode('risk_assessment', riskAssessmentNode)
|
||||
.addNode('human_approval', humanApprovalNode)
|
||||
.addNode('recommendation', recommendationNode);
|
||||
|
||||
// Define edges
|
||||
(graph as any).addEdge('__start__', 'code_review');
|
||||
|
||||
// Conditional: After code review, fix if needed or proceed to backtest
|
||||
(graph as any).addConditionalEdges('code_review', (state: any) => {
|
||||
if (state.needsFixing && state.validationRetryCount < 3) {
|
||||
return 'fix_code';
|
||||
}
|
||||
if (state.needsFixing && state.validationRetryCount >= 3) {
|
||||
return 'recommendation'; // Give up, generate rejection
|
||||
}
|
||||
return 'backtest';
|
||||
});
|
||||
|
||||
// After fixing code, loop back to code review
|
||||
(graph as any).addEdge('fix_code', 'code_review');
|
||||
|
||||
// Conditional: After backtest, fix if failed or proceed to risk assessment
|
||||
(graph as any).addConditionalEdges('backtest', (state: any) => {
|
||||
if (!state.backtestPassed && state.validationRetryCount < 3) {
|
||||
return 'fix_code';
|
||||
}
|
||||
if (!state.backtestPassed && state.validationRetryCount >= 3) {
|
||||
return 'recommendation'; // Give up
|
||||
}
|
||||
return 'risk_assessment';
|
||||
});
|
||||
|
||||
// After risk assessment, go to human approval
|
||||
(graph as any).addEdge('risk_assessment', 'human_approval');
|
||||
|
||||
// Conditional: After human approval, proceed to recommendation or reject
|
||||
(graph as any).addConditionalEdges('human_approval', (state: any) => {
|
||||
return state.humanApproved ? 'recommendation' : '__end__';
|
||||
});
|
||||
|
||||
// Final recommendation is terminal
|
||||
(graph as any).addEdge('recommendation', '__end__');
|
||||
|
||||
return graph;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create and compile workflow
|
||||
*/
|
||||
export async function createStrategyValidationWorkflow(
|
||||
model: BaseChatModel,
|
||||
codeReviewer: CodeReviewerSubagent,
|
||||
mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
|
||||
logger: FastifyBaseLogger,
|
||||
configPath: string
|
||||
): Promise<StrategyValidationWorkflow> {
|
||||
const { readFile } = await import('fs/promises');
|
||||
const yaml = await import('js-yaml');
|
||||
|
||||
// Load config
|
||||
const configContent = await readFile(configPath, 'utf-8');
|
||||
const config = yaml.load(configContent) as WorkflowConfig;
|
||||
|
||||
// Create workflow
|
||||
const workflow = new StrategyValidationWorkflow(
|
||||
config,
|
||||
model,
|
||||
codeReviewer,
|
||||
mcpBacktestFn,
|
||||
logger
|
||||
);
|
||||
|
||||
// Compile graph
|
||||
workflow.compile();
|
||||
|
||||
return workflow;
|
||||
}
|
||||
@@ -1,233 +0,0 @@
|
||||
import type { StrategyValidationStateType } from './state.js';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { CodeReviewerSubagent } from '../../subagents/code-reviewer/index.js';
|
||||
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||
|
||||
/**
|
||||
* Node: Code Review
|
||||
* Reviews strategy code using CodeReviewerSubagent
|
||||
*/
|
||||
export function createCodeReviewNode(
|
||||
codeReviewer: CodeReviewerSubagent,
|
||||
logger: FastifyBaseLogger
|
||||
) {
|
||||
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
|
||||
logger.info('Strategy validation: Code review');
|
||||
|
||||
const review = await codeReviewer.execute(
|
||||
{ userContext: state.userContext },
|
||||
state.strategyCode
|
||||
);
|
||||
|
||||
// Simple issue detection (in production, parse structured output)
|
||||
const hasIssues = review.toLowerCase().includes('critical') ||
|
||||
review.toLowerCase().includes('reject');
|
||||
|
||||
return {
|
||||
codeReview: review,
|
||||
codeIssues: hasIssues ? ['Issues detected in code review'] : [],
|
||||
needsFixing: hasIssues,
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Node: Fix Code Issues
|
||||
* Uses LLM to fix issues identified in code review
|
||||
*/
|
||||
export function createFixCodeNode(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger
|
||||
) {
|
||||
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
|
||||
logger.info('Strategy validation: Fixing code issues');
|
||||
|
||||
const systemPrompt = `You are a trading strategy developer.
|
||||
Fix the issues identified in the code review while maintaining the strategy's logic.
|
||||
Return only the corrected code without explanation.`;
|
||||
|
||||
const userPrompt = `Original code:
|
||||
\`\`\`typescript
|
||||
${state.strategyCode}
|
||||
\`\`\`
|
||||
|
||||
Code review feedback:
|
||||
${state.codeReview}
|
||||
|
||||
Provide the corrected code:`;
|
||||
|
||||
const response = await model.invoke([
|
||||
new SystemMessage(systemPrompt),
|
||||
new HumanMessage(userPrompt),
|
||||
]);
|
||||
|
||||
const fixedCode = (response.content as string)
|
||||
.replace(/```typescript\n?/g, '')
|
||||
.replace(/```\n?/g, '')
|
||||
.trim();
|
||||
|
||||
return {
|
||||
strategyCode: fixedCode,
|
||||
validationRetryCount: state.validationRetryCount + 1,
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Node: Backtest Strategy
|
||||
* Runs backtest using user's MCP server
|
||||
*/
|
||||
export function createBacktestNode(
|
||||
mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
|
||||
logger: FastifyBaseLogger
|
||||
) {
|
||||
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
|
||||
logger.info('Strategy validation: Running backtest');
|
||||
|
||||
try {
|
||||
const results = await mcpBacktestFn(
|
||||
state.strategyCode,
|
||||
state.ticker,
|
||||
state.timeframe
|
||||
);
|
||||
|
||||
// Check if backtest passed (simplified)
|
||||
const sharpeRatio = (results.sharpeRatio as number) || 0;
|
||||
const passed = sharpeRatio > 0.5;
|
||||
|
||||
return {
|
||||
backtestResults: results,
|
||||
backtestPassed: passed,
|
||||
needsFixing: !passed,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error({ error }, 'Backtest failed');
|
||||
return {
|
||||
backtestResults: { error: (error as Error).message },
|
||||
backtestPassed: false,
|
||||
needsFixing: true,
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Node: Risk Assessment
|
||||
* Analyzes backtest results for risk
|
||||
*/
|
||||
export function createRiskAssessmentNode(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger
|
||||
) {
|
||||
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
|
||||
logger.info('Strategy validation: Risk assessment');
|
||||
|
||||
const systemPrompt = `You are a risk management expert.
|
||||
Analyze the strategy and backtest results to assess risk level.
|
||||
Provide: risk level (low/medium/high) and detailed assessment.`;
|
||||
|
||||
const userPrompt = `Strategy code:
|
||||
\`\`\`typescript
|
||||
${state.strategyCode}
|
||||
\`\`\`
|
||||
|
||||
Backtest results:
|
||||
${JSON.stringify(state.backtestResults, null, 2)}
|
||||
|
||||
Provide risk assessment in format:
|
||||
RISK_LEVEL: [low/medium/high]
|
||||
ASSESSMENT: [detailed explanation]`;
|
||||
|
||||
const response = await model.invoke([
|
||||
new SystemMessage(systemPrompt),
|
||||
new HumanMessage(userPrompt),
|
||||
]);
|
||||
|
||||
const assessment = response.content as string;
|
||||
|
||||
// Parse risk level (simplified)
|
||||
let riskLevel: 'low' | 'medium' | 'high' = 'medium';
|
||||
if (assessment.includes('RISK_LEVEL: low')) riskLevel = 'low';
|
||||
if (assessment.includes('RISK_LEVEL: high')) riskLevel = 'high';
|
||||
|
||||
return {
|
||||
riskAssessment: assessment,
|
||||
riskLevel,
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Node: Human Approval
|
||||
* Pauses workflow for human review
|
||||
*/
|
||||
export function createHumanApprovalNode(logger: FastifyBaseLogger) {
|
||||
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
|
||||
logger.info('Strategy validation: Awaiting human approval');
|
||||
|
||||
// In real implementation, this would:
|
||||
// 1. Send approval request to user's channel
|
||||
// 2. Store workflow state with interrupt
|
||||
// 3. Wait for user response
|
||||
// 4. Resume with approval decision
|
||||
|
||||
// For now, auto-approve if risk is low/medium and backtest passed
|
||||
const autoApprove = state.backtestPassed &&
|
||||
(state.riskLevel === 'low' || state.riskLevel === 'medium');
|
||||
|
||||
return {
|
||||
humanApproved: autoApprove,
|
||||
approvalComment: autoApprove ? 'Auto-approved: passed validation' : 'Needs manual review',
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Node: Final Recommendation
|
||||
* Generates final recommendation based on all steps
|
||||
*/
|
||||
export function createRecommendationNode(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger
|
||||
) {
|
||||
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
|
||||
logger.info('Strategy validation: Generating recommendation');
|
||||
|
||||
const systemPrompt = `You are the final decision maker for strategy deployment.
|
||||
Based on all validation steps, provide a clear recommendation: approve, reject, or revise.`;
|
||||
|
||||
const userPrompt = `Strategy validation summary:
|
||||
|
||||
Code Review: ${state.codeIssues.length === 0 ? 'Passed' : 'Issues found'}
|
||||
Backtest: ${state.backtestPassed ? 'Passed' : 'Failed'}
|
||||
Risk Level: ${state.riskLevel}
|
||||
Human Approved: ${state.humanApproved}
|
||||
|
||||
Backtest Results:
|
||||
${JSON.stringify(state.backtestResults, null, 2)}
|
||||
|
||||
Risk Assessment:
|
||||
${state.riskAssessment}
|
||||
|
||||
Provide final recommendation (approve/reject/revise) and reasoning:`;
|
||||
|
||||
const response = await model.invoke([
|
||||
new SystemMessage(systemPrompt),
|
||||
new HumanMessage(userPrompt),
|
||||
]);
|
||||
|
||||
const recommendation = response.content as string;
|
||||
|
||||
// Parse recommendation (simplified)
|
||||
let decision: 'approve' | 'reject' | 'revise' = 'revise';
|
||||
if (recommendation.toLowerCase().includes('approve')) decision = 'approve';
|
||||
if (recommendation.toLowerCase().includes('reject')) decision = 'reject';
|
||||
|
||||
return {
|
||||
recommendation: decision,
|
||||
recommendationReason: recommendation,
|
||||
output: recommendation,
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
import { Annotation } from '@langchain/langgraph';
|
||||
import { BaseWorkflowState } from '../base-workflow.js';
|
||||
|
||||
/**
|
||||
* Strategy validation workflow state
|
||||
*
|
||||
* Extends base workflow state with strategy-specific fields
|
||||
*/
|
||||
export const StrategyValidationState = Annotation.Root({
|
||||
...BaseWorkflowState.spec,
|
||||
|
||||
// Input
|
||||
strategyCode: Annotation<string>(),
|
||||
ticker: Annotation<string>(),
|
||||
timeframe: Annotation<string>(),
|
||||
|
||||
// Code review step
|
||||
codeReview: Annotation<string | null>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => null,
|
||||
}),
|
||||
codeIssues: Annotation<string[]>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => [],
|
||||
}),
|
||||
|
||||
// Backtest step
|
||||
backtestResults: Annotation<Record<string, unknown> | null>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => null,
|
||||
}),
|
||||
backtestPassed: Annotation<boolean>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => false,
|
||||
}),
|
||||
|
||||
// Risk assessment step
|
||||
riskAssessment: Annotation<string | null>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => null,
|
||||
}),
|
||||
riskLevel: Annotation<'low' | 'medium' | 'high' | null>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => null,
|
||||
}),
|
||||
|
||||
// Human approval step
|
||||
humanApproved: Annotation<boolean>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => false,
|
||||
}),
|
||||
approvalComment: Annotation<string | null>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => null,
|
||||
}),
|
||||
|
||||
// Validation loop control
|
||||
validationRetryCount: Annotation<number>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => 0,
|
||||
}),
|
||||
needsFixing: Annotation<boolean>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => false,
|
||||
}),
|
||||
|
||||
// Final output
|
||||
recommendation: Annotation<'approve' | 'reject' | 'revise' | null>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => null,
|
||||
}),
|
||||
recommendationReason: Annotation<string | null>({
|
||||
value: (left, right) => right ?? left,
|
||||
default: () => null,
|
||||
}),
|
||||
});
|
||||
|
||||
export type StrategyValidationStateType = typeof StrategyValidationState.State;
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import { type ModelMiddleware, NoopMiddleware, AnthropicCachingMiddleware } from './middleware.js';
|
||||
|
||||
@@ -10,7 +10,7 @@ export { NoopMiddleware, AnthropicCachingMiddleware };
|
||||
* Supported LLM providers
|
||||
*/
|
||||
export enum LLMProvider {
|
||||
ANTHROPIC = 'anthropic',
|
||||
DEEP_INFRA = 'deepinfra',
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -47,11 +47,13 @@ export interface LicenseModelsConfig {
|
||||
* Provider configuration with API keys
|
||||
*/
|
||||
export interface ProviderConfig {
|
||||
anthropicApiKey?: string;
|
||||
deepinfraApiKey?: string;
|
||||
defaultModel?: ModelConfig;
|
||||
licenseModels?: LicenseModelsConfig;
|
||||
}
|
||||
|
||||
const DEEP_INFRA_BASE_URL = 'https://api.deepinfra.com/v1/openai';
|
||||
|
||||
/**
|
||||
* LLM Provider factory
|
||||
* Creates model instances with unified interface across providers
|
||||
@@ -75,8 +77,8 @@ export class LLMProviderFactory {
|
||||
);
|
||||
|
||||
switch (modelConfig.provider) {
|
||||
case LLMProvider.ANTHROPIC:
|
||||
return this.createAnthropicModel(modelConfig);
|
||||
case LLMProvider.DEEP_INFRA:
|
||||
return this.createDeepInfraModel(modelConfig);
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported provider: ${modelConfig.provider}`);
|
||||
@@ -84,22 +86,24 @@ export class LLMProviderFactory {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Anthropic Claude model
|
||||
* Create Deep Infra model via OpenAI-compatible API
|
||||
*/
|
||||
private createAnthropicModel(config: ModelConfig): { model: ChatAnthropic; middleware: AnthropicCachingMiddleware } {
|
||||
if (!this.config.anthropicApiKey) {
|
||||
throw new Error('Anthropic API key not configured');
|
||||
private createDeepInfraModel(config: ModelConfig): { model: ChatOpenAI; middleware: NoopMiddleware } {
|
||||
if (!this.config.deepinfraApiKey) {
|
||||
throw new Error('Deep Infra API key not configured');
|
||||
}
|
||||
|
||||
const model = new ChatAnthropic({
|
||||
const model = new ChatOpenAI({
|
||||
model: config.model,
|
||||
temperature: config.temperature ?? 0.7,
|
||||
maxTokens: config.maxTokens ?? 4096,
|
||||
anthropicApiKey: this.config.anthropicApiKey,
|
||||
clientOptions: { defaultHeaders: { 'anthropic-beta': 'prompt-caching-2024-07-31' } },
|
||||
apiKey: this.config.deepinfraApiKey,
|
||||
configuration: {
|
||||
baseURL: DEEP_INFRA_BASE_URL,
|
||||
},
|
||||
});
|
||||
|
||||
return { model, middleware: new AnthropicCachingMiddleware() };
|
||||
return { model, middleware: new NoopMiddleware() };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -110,13 +114,13 @@ export class LLMProviderFactory {
|
||||
return this.config.defaultModel;
|
||||
}
|
||||
|
||||
if (!this.config.anthropicApiKey) {
|
||||
throw new Error('Anthropic API key not configured');
|
||||
if (!this.config.deepinfraApiKey) {
|
||||
throw new Error('Deep Infra API key not configured');
|
||||
}
|
||||
|
||||
return {
|
||||
provider: LLMProvider.ANTHROPIC,
|
||||
model: 'claude-sonnet-4-6',
|
||||
provider: LLMProvider.DEEP_INFRA,
|
||||
model: 'zai-org/GLM-5',
|
||||
};
|
||||
}
|
||||
|
||||
@@ -132,16 +136,12 @@ export class LLMProviderFactory {
|
||||
* Predefined model configurations
|
||||
*/
|
||||
export const MODELS = {
|
||||
CLAUDE_SONNET: {
|
||||
provider: LLMProvider.ANTHROPIC,
|
||||
model: 'claude-sonnet-4-6',
|
||||
GLM_5: {
|
||||
provider: LLMProvider.DEEP_INFRA,
|
||||
model: 'zai-org/GLM-5',
|
||||
},
|
||||
CLAUDE_HAIKU: {
|
||||
provider: LLMProvider.ANTHROPIC,
|
||||
model: 'claude-haiku-4-5-20251001',
|
||||
},
|
||||
CLAUDE_OPUS: {
|
||||
provider: LLMProvider.ANTHROPIC,
|
||||
model: 'claude-opus-4-6',
|
||||
QWEN_235B: {
|
||||
provider: LLMProvider.DEEP_INFRA,
|
||||
model: 'Qwen/Qwen3-235B-A22B-Instruct-2507',
|
||||
},
|
||||
} as const satisfies Record<string, ModelConfig>;
|
||||
|
||||
@@ -113,17 +113,17 @@ export class ModelRouter {
|
||||
// Fallback to hardcoded defaults
|
||||
if (license.licenseType === 'enterprise') {
|
||||
return isComplex
|
||||
? { provider: LLMProvider.ANTHROPIC, model: 'claude-opus-4-6' }
|
||||
: { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' };
|
||||
? { provider: LLMProvider.DEEP_INFRA, model: 'Qwen/Qwen3-235B-A22B-Instruct-2507' }
|
||||
: { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
|
||||
}
|
||||
|
||||
if (license.licenseType === 'pro') {
|
||||
return isComplex
|
||||
? { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' }
|
||||
: { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
|
||||
? { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' }
|
||||
: { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
|
||||
}
|
||||
|
||||
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
|
||||
return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -141,13 +141,13 @@ export class ModelRouter {
|
||||
// Fallback to hardcoded defaults
|
||||
switch (license.licenseType) {
|
||||
case 'enterprise':
|
||||
return { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' };
|
||||
return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
|
||||
|
||||
case 'pro':
|
||||
return { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' };
|
||||
return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
|
||||
|
||||
case 'free':
|
||||
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
|
||||
return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
|
||||
|
||||
default:
|
||||
return this.defaultModel;
|
||||
@@ -166,8 +166,8 @@ export class ModelRouter {
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: use Haiku for cost efficiency
|
||||
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
|
||||
// Fallback: use GLM-5
|
||||
return { provider: LLMProvider.DEEP_INFRA, model: 'zai-org/GLM-5' };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -195,12 +195,12 @@ export class ModelRouter {
|
||||
|
||||
// Fallback to hardcoded defaults
|
||||
if (license.licenseType === 'free') {
|
||||
const allowedModels = ['claude-haiku-4-5-20251001'];
|
||||
const allowedModels = ['zai-org/GLM-5'];
|
||||
return allowedModels.includes(model.model);
|
||||
}
|
||||
|
||||
if (license.licenseType === 'pro') {
|
||||
const blockedModels = ['claude-opus-4-6'];
|
||||
const blockedModels = ['Qwen/Qwen3-235B-A22B-Instruct-2507'];
|
||||
return !blockedModels.includes(model.model);
|
||||
}
|
||||
|
||||
|
||||
@@ -90,31 +90,28 @@ function loadConfig() {
|
||||
|
||||
// LLM provider API keys and model configuration
|
||||
providerConfig: {
|
||||
anthropicApiKey: secretsData.llm_providers?.anthropic_api_key || process.env.ANTHROPIC_API_KEY,
|
||||
openaiApiKey: secretsData.llm_providers?.openai_api_key || process.env.OPENAI_API_KEY,
|
||||
googleApiKey: secretsData.llm_providers?.google_api_key || process.env.GOOGLE_API_KEY,
|
||||
openrouterApiKey: secretsData.llm_providers?.openrouter_api_key || process.env.OPENROUTER_API_KEY,
|
||||
deepinfraApiKey: secretsData.llm_providers?.deepinfra_api_key || process.env.DEEPINFRA_API_KEY,
|
||||
defaultModel: {
|
||||
provider: configData.defaults?.model_provider || 'anthropic',
|
||||
model: configData.defaults?.model || 'claude-sonnet-4-6',
|
||||
provider: configData.defaults?.model_provider || 'deepinfra',
|
||||
model: configData.defaults?.model || 'zai-org/GLM-5',
|
||||
},
|
||||
licenseModels: {
|
||||
free: {
|
||||
default: configData.license_models?.free?.default || 'claude-haiku-4-5-20251001',
|
||||
cost_optimized: configData.license_models?.free?.cost_optimized || 'claude-haiku-4-5-20251001',
|
||||
complex: configData.license_models?.free?.complex || 'claude-haiku-4-5-20251001',
|
||||
allowed_models: configData.license_models?.free?.allowed_models || ['claude-haiku-4-5-20251001'],
|
||||
default: configData.license_models?.free?.default || 'zai-org/GLM-5',
|
||||
cost_optimized: configData.license_models?.free?.cost_optimized || 'zai-org/GLM-5',
|
||||
complex: configData.license_models?.free?.complex || 'zai-org/GLM-5',
|
||||
allowed_models: configData.license_models?.free?.allowed_models || ['zai-org/GLM-5'],
|
||||
},
|
||||
pro: {
|
||||
default: configData.license_models?.pro?.default || 'claude-sonnet-4-6',
|
||||
cost_optimized: configData.license_models?.pro?.cost_optimized || 'claude-haiku-4-5-20251001',
|
||||
complex: configData.license_models?.pro?.complex || 'claude-sonnet-4-6',
|
||||
blocked_models: configData.license_models?.pro?.blocked_models || ['claude-opus-4-6'],
|
||||
default: configData.license_models?.pro?.default || 'zai-org/GLM-5',
|
||||
cost_optimized: configData.license_models?.pro?.cost_optimized || 'zai-org/GLM-5',
|
||||
complex: configData.license_models?.pro?.complex || 'zai-org/GLM-5',
|
||||
blocked_models: configData.license_models?.pro?.blocked_models || ['Qwen/Qwen3-235B-A22B-Instruct-2507'],
|
||||
},
|
||||
enterprise: {
|
||||
default: configData.license_models?.enterprise?.default || 'claude-sonnet-4-6',
|
||||
cost_optimized: configData.license_models?.enterprise?.cost_optimized || 'claude-haiku-4-5-20251001',
|
||||
complex: configData.license_models?.enterprise?.complex || 'claude-opus-4-6',
|
||||
default: configData.license_models?.enterprise?.default || 'zai-org/GLM-5',
|
||||
cost_optimized: configData.license_models?.enterprise?.cost_optimized || 'zai-org/GLM-5',
|
||||
complex: configData.license_models?.enterprise?.complex || 'Qwen/Qwen3-235B-A22B-Instruct-2507',
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -181,6 +178,9 @@ function loadConfig() {
|
||||
storageClass: configData.kubernetes?.storage_class || process.env.SANDBOX_STORAGE_CLASS || '',
|
||||
imagePullPolicy: configData.kubernetes?.image_pull_policy || process.env.IMAGE_PULL_POLICY || 'Always',
|
||||
},
|
||||
|
||||
// Search API keys
|
||||
tavilyApiKey: secretsData.search?.tavily_api_key || process.env.TAVILY_API_KEY,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -200,10 +200,9 @@ const app = Fastify({
|
||||
},
|
||||
});
|
||||
|
||||
// Validate at least one LLM provider is configured
|
||||
const hasAnyProvider = Object.values(config.providerConfig).some(key => !!key);
|
||||
if (!hasAnyProvider) {
|
||||
app.log.error('At least one LLM provider API key is required (ANTHROPIC_API_KEY, OPENAI_API_KEY, GOOGLE_API_KEY, or OPENROUTER_API_KEY)');
|
||||
// Validate LLM provider is configured
|
||||
if (!config.providerConfig.deepinfraApiKey) {
|
||||
app.log.error('DEEPINFRA_API_KEY is required');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@@ -407,6 +406,8 @@ app.log.debug('Initializing auth routes...');
|
||||
const authRoutes = new AuthRoutes({
|
||||
authService,
|
||||
betterAuth,
|
||||
containerManager,
|
||||
userService,
|
||||
});
|
||||
|
||||
// Register routes
|
||||
@@ -581,6 +582,7 @@ try {
|
||||
ohlcService: () => ohlcService,
|
||||
symbolIndexService: () => symbolIndexService,
|
||||
workspaceManager: undefined, // Will be set per-session
|
||||
tavilyApiKey: config.tavilyApiKey,
|
||||
});
|
||||
|
||||
// Register agent tool configurations
|
||||
@@ -588,20 +590,27 @@ try {
|
||||
toolRegistry.registerAgentTools({
|
||||
agentName: 'main',
|
||||
platformTools: ['symbol_lookup', 'get_chart_data'],
|
||||
mcpTools: ['category_list'], // category_list lets the main agent see existing research scripts
|
||||
mcpTools: ['python_list', 'backtest_strategy', 'list_active_strategies'],
|
||||
});
|
||||
|
||||
// Research subagent: only MCP tools for script creation/execution
|
||||
toolRegistry.registerAgentTools({
|
||||
agentName: 'research',
|
||||
platformTools: [], // No platform tools (works at script level)
|
||||
mcpTools: ['category_*', 'execute_research'],
|
||||
mcpTools: ['python_*', 'execute_research'],
|
||||
});
|
||||
|
||||
// Code reviewer subagent: no tools by default
|
||||
// Indicator subagent: workspace patch + category tools + evaluate_indicator
|
||||
toolRegistry.registerAgentTools({
|
||||
agentName: 'code-reviewer',
|
||||
agentName: 'indicator',
|
||||
platformTools: [],
|
||||
mcpTools: ['workspace_read', 'workspace_patch', 'python_*', 'evaluate_indicator'],
|
||||
});
|
||||
|
||||
// Web explore subagent: platform search/fetch tools only (no MCP needed)
|
||||
toolRegistry.registerAgentTools({
|
||||
agentName: 'web-explore',
|
||||
platformTools: ['web_search', 'fetch_page', 'arxiv_search'],
|
||||
mcpTools: [],
|
||||
});
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
import type { FastifyInstance, FastifyRequest, FastifyReply } from 'fastify';
|
||||
import type { AuthService } from '../auth/auth-service.js';
|
||||
import type { BetterAuthInstance } from '../auth/better-auth-config.js';
|
||||
import type { ContainerManager } from '../k8s/container-manager.js';
|
||||
import type { UserService } from '../db/user-service.js';
|
||||
|
||||
export interface AuthRoutesConfig {
|
||||
authService: AuthService;
|
||||
betterAuth: BetterAuthInstance;
|
||||
containerManager: ContainerManager;
|
||||
userService: UserService;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -74,6 +78,14 @@ export class AuthRoutes {
|
||||
// Ensure user has a license
|
||||
await this.config.authService.ensureUserLicense(result.userId, email);
|
||||
|
||||
// Warm up the sandbox container so it's likely ready by first login
|
||||
this.config.userService.getUserLicense(result.userId).then((license) => {
|
||||
if (license) {
|
||||
this.config.containerManager.ensureContainerRunning(result.userId, license.license, false)
|
||||
.catch((err) => app.log.warn({ err, userId: result.userId }, 'Container warmup on registration failed'));
|
||||
}
|
||||
}).catch((err) => app.log.warn({ err, userId: result.userId }, 'Failed to fetch license for container warmup'));
|
||||
|
||||
// Auto sign in after registration
|
||||
const signInResult = await this.config.authService.signIn(email, password);
|
||||
|
||||
|
||||
@@ -112,28 +112,31 @@ export class OHLCService {
|
||||
return this.formatHistoryResult(data, start_time, end_time, period_seconds, countback);
|
||||
}
|
||||
|
||||
// Step 3: Request missing data via relay
|
||||
// Step 3: Request each missing range from the relay individually so we
|
||||
// only fetch what's actually absent, not the whole requested window.
|
||||
this.logger.info({ ticker, period_seconds, missingRanges: missingRanges.length, dataCount: data.length }, 'Requesting missing OHLC data from relay');
|
||||
|
||||
try {
|
||||
const notification = await this.relayClient.requestHistoricalOHLC(
|
||||
ticker,
|
||||
period_seconds,
|
||||
start_time,
|
||||
end_time
|
||||
// countback is NOT passed as a limit — the ingestor must fetch the full range.
|
||||
// Countback is applied below after we have the complete dataset.
|
||||
);
|
||||
|
||||
this.logger.info({
|
||||
ticker,
|
||||
period_seconds,
|
||||
row_count: notification.row_count,
|
||||
status: notification.status,
|
||||
}, 'Historical data request completed');
|
||||
for (const [rangeStart, rangeEnd] of missingRanges) {
|
||||
const notification = await this.relayClient.requestHistoricalOHLC(
|
||||
ticker,
|
||||
period_seconds,
|
||||
rangeStart,
|
||||
rangeEnd
|
||||
// countback is NOT passed as a limit — the ingestor must fetch the full range.
|
||||
// Countback is applied below after we have the complete dataset.
|
||||
);
|
||||
this.logger.info({
|
||||
ticker,
|
||||
period_seconds,
|
||||
rangeStart: rangeStart.toString(),
|
||||
rangeEnd: rangeEnd.toString(),
|
||||
row_count: notification.row_count,
|
||||
status: notification.status,
|
||||
}, 'Relay range request completed');
|
||||
}
|
||||
|
||||
// Step 4: Query Iceberg again for complete dataset
|
||||
this.logger.info({ ticker, period_seconds, notification_status: notification.status, row_count: notification.row_count }, 'Relay notification received, re-querying Iceberg');
|
||||
data = await this.icebergClient.queryOHLC(ticker, period_seconds, start_time, end_time);
|
||||
this.logger.info({ ticker, period_seconds, dataCount: data.length }, 'Final Iceberg query complete, returning result');
|
||||
|
||||
|
||||
@@ -27,7 +27,8 @@ export function createMCPToolWrapper(
|
||||
toolInfo: MCPToolInfo,
|
||||
mcpClient: MCPClientConnector,
|
||||
logger: FastifyBaseLogger,
|
||||
onImage?: (image: { data: string; mimeType: string }) => void
|
||||
onImage?: (image: { data: string; mimeType: string }) => void,
|
||||
onWorkspaceMutation?: (storeName: string, newState: unknown) => void
|
||||
): DynamicStructuredTool {
|
||||
// Convert MCP input schema to Zod schema
|
||||
const zodSchema = mcpInputSchemaToZod(toolInfo.inputSchema);
|
||||
@@ -42,6 +43,28 @@ export function createMCPToolWrapper(
|
||||
|
||||
logger.info({ tool: toolInfo.name }, 'MCP tool call completed');
|
||||
|
||||
// Fire workspace mutation callback when workspace_patch or workspace_write succeeds.
|
||||
// The sandbox returns {"success": true, "data": <newState>} as a text content item.
|
||||
if (
|
||||
onWorkspaceMutation &&
|
||||
(toolInfo.name === 'workspace_patch' || toolInfo.name === 'workspace_write')
|
||||
) {
|
||||
const content = (result as any)?.content;
|
||||
if (Array.isArray(content)) {
|
||||
for (const item of content) {
|
||||
if (item.type === 'text' && item.text) {
|
||||
try {
|
||||
const parsed = JSON.parse(item.text);
|
||||
if (parsed?.success && parsed?.data !== undefined) {
|
||||
onWorkspaceMutation((input as any).store_name as string, parsed.data);
|
||||
}
|
||||
} catch { /* ignore parse errors */ }
|
||||
break; // only need first text item
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle different MCP result formats
|
||||
if (typeof result === 'string') {
|
||||
return result;
|
||||
@@ -180,7 +203,10 @@ export function createMCPToolWrappers(
|
||||
toolInfos: MCPToolInfo[],
|
||||
mcpClient: MCPClientConnector,
|
||||
logger: FastifyBaseLogger,
|
||||
onImage?: (image: { data: string; mimeType: string }) => void
|
||||
onImage?: (image: { data: string; mimeType: string }) => void,
|
||||
onWorkspaceMutation?: (storeName: string, newState: unknown) => void
|
||||
): DynamicStructuredTool[] {
|
||||
return toolInfos.map(toolInfo => createMCPToolWrapper(toolInfo, mcpClient, logger, onImage));
|
||||
return toolInfos.map(toolInfo =>
|
||||
createMCPToolWrapper(toolInfo, mcpClient, logger, onImage, onWorkspaceMutation)
|
||||
);
|
||||
}
|
||||
|
||||
65
gateway/src/tools/platform/arxiv-search.tool.ts
Normal file
65
gateway/src/tools/platform/arxiv-search.tool.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
|
||||
/**
|
||||
* ArXiv Search Tool
|
||||
*
|
||||
* Searches arXiv for academic papers using the LangChain ArxivRetriever.
|
||||
* Free, no API key required.
|
||||
*/
|
||||
|
||||
export interface ArxivSearchToolConfig {
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
export function createArxivSearchTool(config: ArxivSearchToolConfig): DynamicStructuredTool {
|
||||
const { logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'arxiv_search',
|
||||
description: 'Search arXiv for academic papers. Returns titles, authors, abstracts, and PDF links. Use this for scientific or technical research queries instead of web_search.',
|
||||
schema: z.object({
|
||||
query: z.string().describe('The research query'),
|
||||
max_results: z.number().optional().default(5).describe('Maximum number of papers to return (default: 5)'),
|
||||
}),
|
||||
func: async ({ query, max_results }) => {
|
||||
logger.debug({ query, max_results }, 'Executing arxiv_search tool');
|
||||
|
||||
try {
|
||||
const { ArxivRetriever } = await import('@langchain/community/retrievers/arxiv');
|
||||
|
||||
const retriever = new ArxivRetriever({
|
||||
getFullDocuments: false,
|
||||
maxSearchResults: max_results,
|
||||
});
|
||||
|
||||
const docs = await retriever.invoke(query);
|
||||
|
||||
const results = docs.map(doc => {
|
||||
const meta = doc.metadata as Record<string, any>;
|
||||
// Derive PDF URL from abstract URL: /abs/ID -> /pdf/ID
|
||||
const pdfUrl = typeof meta.url === 'string'
|
||||
? meta.url.replace('/abs/', '/pdf/')
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
title: meta.title,
|
||||
authors: Array.isArray(meta.authors) ? meta.authors : [],
|
||||
abstract: doc.pageContent,
|
||||
published: meta.published,
|
||||
url: meta.url,
|
||||
pdf_url: pdfUrl,
|
||||
};
|
||||
});
|
||||
|
||||
logger.info({ query, resultCount: results.length }, 'arXiv search completed');
|
||||
|
||||
return JSON.stringify({ query, results });
|
||||
} catch (error) {
|
||||
logger.error({ error, query }, 'arxiv_search tool failed');
|
||||
return JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
80
gateway/src/tools/platform/fetch-page.tool.ts
Normal file
80
gateway/src/tools/platform/fetch-page.tool.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
|
||||
const MAX_CONTENT_LENGTH = 50_000;
|
||||
|
||||
/**
|
||||
* Fetch Page Tool
|
||||
*
|
||||
* Fetches a URL and returns its content as text/markdown.
|
||||
* - PDFs are converted to text using pdf-parse
|
||||
* - HTML pages are scraped with cheerio
|
||||
* - Output is truncated to 50k characters
|
||||
*/
|
||||
|
||||
export interface FetchPageToolConfig {
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
export function createFetchPageTool(config: FetchPageToolConfig): DynamicStructuredTool {
|
||||
const { logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'fetch_page',
|
||||
description: 'Fetch a web page or PDF and return its text content. PDFs are automatically converted to markdown. Use this after web_search or arxiv_search to read the full content of a result.',
|
||||
schema: z.object({
|
||||
url: z.string().url().describe('The URL to fetch'),
|
||||
}),
|
||||
func: async ({ url }) => {
|
||||
logger.debug({ url }, 'Executing fetch_page tool');
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
headers: { 'User-Agent': 'Mozilla/5.0 (compatible; research-agent/1.0)' },
|
||||
signal: AbortSignal.timeout(30_000),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return JSON.stringify({ error: `HTTP ${response.status}: ${response.statusText}`, url });
|
||||
}
|
||||
|
||||
const contentType = response.headers.get('content-type') ?? '';
|
||||
const isPdf = contentType.includes('pdf') || url.toLowerCase().endsWith('.pdf');
|
||||
|
||||
let content: string;
|
||||
|
||||
if (isPdf) {
|
||||
const buffer = Buffer.from(await response.arrayBuffer());
|
||||
const { PDFParse } = await import('pdf-parse');
|
||||
const arrayBuffer = buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength);
|
||||
const parser = new PDFParse({ data: arrayBuffer });
|
||||
const result = await parser.getText();
|
||||
content = result.text;
|
||||
logger.debug({ url, chars: content.length, pages: result.pages.length }, 'PDF text extracted');
|
||||
} else {
|
||||
const html = await response.text();
|
||||
const { load } = await import('cheerio');
|
||||
const $ = load(html);
|
||||
|
||||
// Remove non-content elements
|
||||
$('script, style, nav, footer, header, aside, [role="navigation"]').remove();
|
||||
|
||||
// Prefer article/main content
|
||||
const main = $('article, main, [role="main"]').first();
|
||||
content = (main.length ? main : $('body')).text().replace(/\s{3,}/g, '\n\n').trim();
|
||||
|
||||
logger.debug({ url, chars: content.length }, 'HTML page scraped');
|
||||
}
|
||||
|
||||
const truncated = content.length > MAX_CONTENT_LENGTH;
|
||||
const output = truncated ? content.slice(0, MAX_CONTENT_LENGTH) + '\n\n[content truncated]' : content;
|
||||
|
||||
return JSON.stringify({ url, content: output, truncated });
|
||||
} catch (error) {
|
||||
logger.error({ error, url }, 'fetch_page tool failed');
|
||||
return JSON.stringify({ error: error instanceof Error ? error.message : String(error), url });
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
53
gateway/src/tools/platform/indicator-agent.tool.ts
Normal file
53
gateway/src/tools/platform/indicator-agent.tool.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { IndicatorSubagent } from '../../harness/subagents/indicator/index.js';
|
||||
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
|
||||
|
||||
export interface IndicatorAgentToolConfig {
|
||||
indicatorSubagent: IndicatorSubagent;
|
||||
context: SubagentContext;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a LangChain tool that delegates to the indicator subagent.
|
||||
* Mirrors the pattern of research-agent.tool.ts.
|
||||
*/
|
||||
export function createIndicatorAgentTool(config: IndicatorAgentToolConfig): DynamicStructuredTool {
|
||||
const { indicatorSubagent, context, logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'indicator',
|
||||
description: `Delegate to the indicator subagent for all indicator-related tasks on the chart.
|
||||
|
||||
Use this tool for:
|
||||
- Reading which indicators are currently on the chart and explaining what they show
|
||||
- Adding indicators to the chart ("show RSI", "add Bollinger Bands with std=1.5")
|
||||
- Modifying indicator parameters ("change MACD fast to 8", "set RSI length to 21")
|
||||
- Removing indicators ("remove all moving averages", "clear the volume indicators")
|
||||
- Toggling indicator visibility
|
||||
- Creating custom indicators using Python scripts
|
||||
- Recommending indicators for a given strategy or analysis goal
|
||||
|
||||
ALWAYS use this tool for any request about the chart's indicators.
|
||||
NEVER modify the indicators workspace store directly.`,
|
||||
schema: z.object({
|
||||
instruction: z.string().describe(
|
||||
'The indicator task to perform. Be specific about which indicators, parameters, ' +
|
||||
'and what changes are needed. Include relevant context like the current symbol ' +
|
||||
'if the user mentioned it.'
|
||||
),
|
||||
}),
|
||||
func: async ({ instruction }: { instruction: string }): Promise<string> => {
|
||||
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to indicator subagent');
|
||||
|
||||
try {
|
||||
return await indicatorSubagent.execute(context, instruction);
|
||||
} catch (error) {
|
||||
logger.error({ error, errorMessage: (error as Error)?.message }, 'Indicator subagent failed');
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
49
gateway/src/tools/platform/web-explore-agent.tool.ts
Normal file
49
gateway/src/tools/platform/web-explore-agent.tool.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { WebExploreSubagent } from '../../harness/subagents/web-explore/index.js';
|
||||
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
|
||||
|
||||
export interface WebExploreAgentToolConfig {
|
||||
webExploreSubagent: WebExploreSubagent;
|
||||
context: SubagentContext;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a LangChain tool that delegates to the web-explore subagent.
|
||||
* The subagent decides whether to use web search or arXiv based on the instruction.
|
||||
*/
|
||||
export function createWebExploreAgentTool(config: WebExploreAgentToolConfig): DynamicStructuredTool {
|
||||
const { webExploreSubagent, context, logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'web_explore',
|
||||
description: `Search the web or academic databases and return a summarized answer.
|
||||
|
||||
Use this tool when the user asks about:
|
||||
- Current events, news, or real-time information
|
||||
- Documentation, tutorials, or how-to guides
|
||||
- Academic papers, research findings, or scientific topics
|
||||
- Any topic that benefits from external sources
|
||||
|
||||
The subagent will search the web (or arXiv for academic queries), fetch relevant content, and return a markdown summary with cited sources.`,
|
||||
schema: z.object({
|
||||
instruction: z.string().describe(
|
||||
'What to search for and summarize. Be specific — include the topic, what aspects matter, ' +
|
||||
'and any context that helps narrow the search (e.g. "recent papers on momentum factor in equities" ' +
|
||||
'or "how to configure rate limiting in Fastify").'
|
||||
),
|
||||
}),
|
||||
func: async ({ instruction }: { instruction: string }): Promise<string> => {
|
||||
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to web-explore subagent');
|
||||
|
||||
try {
|
||||
return await webExploreSubagent.execute(context, instruction);
|
||||
} catch (error) {
|
||||
logger.error({ error, errorMessage: (error as Error)?.message }, 'Web explore subagent failed');
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
65
gateway/src/tools/platform/web-search.tool.ts
Normal file
65
gateway/src/tools/platform/web-search.tool.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
|
||||
/**
|
||||
* Web Search Tool
|
||||
*
|
||||
* Calls the Tavily REST API directly. The config interface is intentionally
|
||||
* minimal so the underlying provider can be swapped without touching callers.
|
||||
*/
|
||||
|
||||
export interface WebSearchToolConfig {
|
||||
apiKey: string;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
export function createWebSearchTool(config: WebSearchToolConfig): DynamicStructuredTool {
|
||||
const { apiKey, logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'web_search',
|
||||
description: 'Search the web. Returns titles, URLs, and content summaries. Use this for general web searches. For academic/scientific papers, prefer arxiv_search instead.',
|
||||
schema: z.object({
|
||||
query: z.string().describe('The search query'),
|
||||
max_results: z.number().optional().default(8).describe('Maximum number of results to return (default: 8)'),
|
||||
}),
|
||||
func: async ({ query, max_results }) => {
|
||||
logger.debug({ query, max_results }, 'Executing web_search tool');
|
||||
|
||||
try {
|
||||
const response = await fetch('https://api.tavily.com/search', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
api_key: apiKey,
|
||||
query,
|
||||
max_results,
|
||||
search_depth: 'basic',
|
||||
}),
|
||||
signal: AbortSignal.timeout(30_000),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`Tavily API error ${response.status}: ${text}`);
|
||||
}
|
||||
|
||||
const data = await response.json() as { results?: Array<{ title: string; url: string; content: string }> };
|
||||
|
||||
const items = (data.results ?? []).map(r => ({
|
||||
title: r.title,
|
||||
url: r.url,
|
||||
snippet: r.content,
|
||||
}));
|
||||
|
||||
logger.info({ query, resultCount: items.length }, 'Web search completed');
|
||||
|
||||
return JSON.stringify({ query, results: items });
|
||||
} catch (error) {
|
||||
logger.error({ error, query, errorMessage: error instanceof Error ? error.message : String(error) }, 'web_search tool failed');
|
||||
return JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -6,6 +6,9 @@ import type { SymbolIndexService } from '../services/symbol-index-service.js';
|
||||
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
|
||||
import { createSymbolLookupTool } from './platform/symbol-lookup.tool.js';
|
||||
import { createGetChartDataTool } from './platform/get-chart-data.tool.js';
|
||||
import { createWebSearchTool } from './platform/web-search.tool.js';
|
||||
import { createFetchPageTool } from './platform/fetch-page.tool.js';
|
||||
import { createArxivSearchTool } from './platform/arxiv-search.tool.js';
|
||||
import { createMCPToolWrappers, type MCPToolInfo } from './mcp/mcp-tool-wrapper.js';
|
||||
|
||||
/**
|
||||
@@ -13,13 +16,13 @@ import { createMCPToolWrappers, type MCPToolInfo } from './mcp/mcp-tool-wrapper.
|
||||
* Specifies which tools are available to which agent
|
||||
*/
|
||||
export interface AgentToolConfig {
|
||||
/** Agent name (e.g., 'main', 'research', 'code-reviewer') */
|
||||
/** Agent name (e.g., 'main', 'research', 'web-explore') */
|
||||
agentName: string;
|
||||
|
||||
/** Platform tool names to include */
|
||||
platformTools: string[];
|
||||
|
||||
/** MCP tool patterns/names to include (supports wildcards like 'category_*') */
|
||||
/** MCP tool patterns/names to include (supports wildcards like 'python_*') */
|
||||
mcpTools: string[];
|
||||
}
|
||||
|
||||
@@ -31,6 +34,7 @@ export interface PlatformServices {
|
||||
ohlcService?: OHLCService | (() => OHLCService | undefined);
|
||||
symbolIndexService?: SymbolIndexService | (() => SymbolIndexService | undefined);
|
||||
workspaceManager?: WorkspaceManager | (() => WorkspaceManager | undefined);
|
||||
tavilyApiKey?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -81,7 +85,8 @@ export class ToolRegistry {
|
||||
mcpClient?: MCPClientConnector,
|
||||
availableMCPTools?: MCPToolInfo[],
|
||||
workspaceManager?: WorkspaceManager,
|
||||
onImage?: (image: { data: string; mimeType: string }) => void
|
||||
onImage?: (image: { data: string; mimeType: string }) => void,
|
||||
onWorkspaceMutation?: (storeName: string, newState: unknown) => void
|
||||
): Promise<DynamicStructuredTool[]> {
|
||||
const config = this.agentToolConfigs.get(agentName);
|
||||
|
||||
@@ -105,7 +110,7 @@ export class ToolRegistry {
|
||||
// Add MCP tools (if MCP client and tools are available)
|
||||
if (mcpClient && availableMCPTools && availableMCPTools.length > 0) {
|
||||
const filteredMCPTools = this.filterMCPTools(availableMCPTools, config.mcpTools);
|
||||
const mcpToolInstances = createMCPToolWrappers(filteredMCPTools, mcpClient, this.logger, onImage);
|
||||
const mcpToolInstances = createMCPToolWrappers(filteredMCPTools, mcpClient, this.logger, onImage, onWorkspaceMutation);
|
||||
tools.push(...mcpToolInstances);
|
||||
|
||||
this.logger.debug(
|
||||
@@ -180,6 +185,25 @@ export class ToolRegistry {
|
||||
break;
|
||||
}
|
||||
|
||||
case 'web_search': {
|
||||
if (this.platformServices.tavilyApiKey) {
|
||||
tool = createWebSearchTool({ apiKey: this.platformServices.tavilyApiKey, logger: this.logger });
|
||||
} else {
|
||||
this.logger.warn('TAVILY_API_KEY not configured — web_search tool unavailable');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'fetch_page': {
|
||||
tool = createFetchPageTool({ logger: this.logger });
|
||||
break;
|
||||
}
|
||||
|
||||
case 'arxiv_search': {
|
||||
tool = createArxivSearchTool({ logger: this.logger });
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
this.logger.warn({ tool: toolName }, 'Unknown platform tool');
|
||||
return null;
|
||||
@@ -202,7 +226,7 @@ export class ToolRegistry {
|
||||
|
||||
/**
|
||||
* Filter MCP tools based on patterns/names
|
||||
* Supports wildcards like 'category_*' or exact names like 'execute_research'
|
||||
* Supports wildcards like 'python_*' or exact names like 'execute_research'
|
||||
*/
|
||||
private filterMCPTools(availableTools: MCPToolInfo[], patterns: string[]): MCPToolInfo[] {
|
||||
if (patterns.length === 0) {
|
||||
@@ -221,7 +245,7 @@ export class ToolRegistry {
|
||||
|
||||
/**
|
||||
* Check if a tool name matches a pattern
|
||||
* Supports wildcards: 'category_*' matches 'category_write', 'category_read', etc.
|
||||
* Supports wildcards: 'python_*' matches 'python_write', 'python_read', etc.
|
||||
*/
|
||||
private matchesPattern(toolName: string, pattern: string): boolean {
|
||||
if (pattern === toolName) {
|
||||
|
||||
@@ -11,11 +11,11 @@
|
||||
* TradingView bar format (used by web frontend)
|
||||
*/
|
||||
export interface TradingViewBar {
|
||||
time: number; // Unix timestamp in SECONDS
|
||||
open: number | null; // null for gap bars (no trades that period)
|
||||
high: number | null;
|
||||
low: number | null;
|
||||
close: number | null;
|
||||
time: number; // Unix timestamp in SECONDS
|
||||
open: number; // always non-null — ingestor forward-fills interior gaps
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume?: number | null;
|
||||
// Optional extra columns from ohlc.proto
|
||||
buy_vol?: number;
|
||||
@@ -31,13 +31,13 @@ export interface TradingViewBar {
|
||||
* Backend OHLC format (from Iceberg)
|
||||
*/
|
||||
export interface BackendOHLC {
|
||||
timestamp: bigint; // Unix timestamp in NANOSECONDS — kept as bigint to preserve precision
|
||||
ticker: string; // Nautilus format: "BTC/USDT.BINANCE"
|
||||
timestamp: bigint; // Unix timestamp in NANOSECONDS — kept as bigint to preserve precision
|
||||
ticker: string; // Nautilus format: "BTC/USDT.BINANCE"
|
||||
period_seconds: number;
|
||||
open: number | null; // null for gap bars (no trades that period)
|
||||
high: number | null;
|
||||
low: number | null;
|
||||
close: number | null;
|
||||
open: number; // always non-null — ingestor forward-fills interior gaps
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number | null;
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ export const LICENSE_TIER_TEMPLATES: Record<LicenseTier, License> = {
|
||||
memoryRequest: '512Mi', memoryLimit: '2Gi',
|
||||
cpuRequest: '250m', cpuLimit: '2000m',
|
||||
storage: '10Gi', tmpSizeLimit: '256Mi',
|
||||
enableIdleShutdown: true, idleTimeoutMinutes: 60,
|
||||
enableIdleShutdown: false, idleTimeoutMinutes: 0,
|
||||
},
|
||||
},
|
||||
enterprise: {
|
||||
|
||||
@@ -55,6 +55,20 @@ export class ContainerSync {
|
||||
this.logger = logger.child({ component: 'ContainerSync' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a raw MCP callTool response into the tool's return value.
|
||||
* MCP tool results are wrapped as: { content: [{ type: 'text', text: '<json>' }] }
|
||||
*/
|
||||
private parseMcpResult(raw: unknown): unknown {
|
||||
const r = raw as any;
|
||||
const text = r?.content?.[0]?.text ?? r?.[0]?.text;
|
||||
if (typeof text === 'string') {
|
||||
return JSON.parse(text);
|
||||
}
|
||||
// Already unwrapped (shouldn't happen in practice)
|
||||
return raw;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a workspace store from the container.
|
||||
* Returns the stored state or indicates the store doesn't exist.
|
||||
@@ -68,7 +82,7 @@ export class ContainerSync {
|
||||
try {
|
||||
this.logger.debug({ store: storeName }, 'Loading store from container');
|
||||
|
||||
const result = (await this.mcpClient.callTool('workspace_read', {
|
||||
const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_read', {
|
||||
store_name: storeName,
|
||||
})) as { exists: boolean; data?: unknown; error?: string };
|
||||
|
||||
@@ -104,7 +118,7 @@ export class ContainerSync {
|
||||
try {
|
||||
this.logger.debug({ store: storeName }, 'Saving store to container');
|
||||
|
||||
const result = (await this.mcpClient.callTool('workspace_write', {
|
||||
const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_write', {
|
||||
store_name: storeName,
|
||||
data: state,
|
||||
})) as { success: boolean; error?: string };
|
||||
@@ -136,7 +150,7 @@ export class ContainerSync {
|
||||
try {
|
||||
this.logger.debug({ store: storeName, patchOps: patch.length }, 'Patching store in container');
|
||||
|
||||
const result = (await this.mcpClient.callTool('workspace_patch', {
|
||||
const result = this.parseMcpResult(await this.mcpClient.callTool('workspace_patch', {
|
||||
store_name: storeName,
|
||||
patch,
|
||||
})) as { success: boolean; data?: unknown; error?: string };
|
||||
|
||||
@@ -59,12 +59,12 @@ class SyncEntry {
|
||||
|
||||
/**
|
||||
* Set state directly (used for loading from container).
|
||||
* Resets sequence to 0.
|
||||
* Sets sequence to 1 so clients at seq 0 (empty state) receive a full snapshot.
|
||||
*/
|
||||
setState(newState: unknown): void {
|
||||
this.state = deepClone(newState);
|
||||
this.lastSnapshot = deepClone(newState);
|
||||
this.seq = 0;
|
||||
this.seq = 1;
|
||||
this.history = [];
|
||||
}
|
||||
|
||||
|
||||
@@ -272,12 +272,84 @@ export interface Shape {
|
||||
*/
|
||||
export type ShapesStore = Record<string, Shape>;
|
||||
|
||||
/**
|
||||
* Parameter schema entry for a custom indicator.
|
||||
*/
|
||||
export interface CustomIndicatorParam {
|
||||
type: 'int' | 'float' | 'bool' | 'string';
|
||||
default: any;
|
||||
description?: string;
|
||||
min?: number;
|
||||
max?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-series plot configuration for a custom indicator output column.
|
||||
* style maps to LineStudyPlotStyle: 0=Line, 1=Histogram, 3=Dots/Cross,
|
||||
* 4=Area, 5=Columns, 6=Circles, 9=StepLine.
|
||||
*/
|
||||
export interface PlotConfig {
|
||||
style: number;
|
||||
color?: string;
|
||||
linewidth?: number;
|
||||
visible?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shaded region between two plots ("plot_plot") or two bands ("hline_hline").
|
||||
*/
|
||||
export interface FilledAreaConfig {
|
||||
id: string;
|
||||
type: 'plot_plot' | 'hline_hline';
|
||||
series1: string;
|
||||
series2: string;
|
||||
color?: string;
|
||||
opacity?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Horizontal reference line (e.g. RSI overbought/oversold level).
|
||||
* linestyle: 0=solid, 1=dotted, 2=dashed.
|
||||
*/
|
||||
export interface BandConfig {
|
||||
id: string;
|
||||
value: number;
|
||||
color?: string;
|
||||
linewidth?: number;
|
||||
linestyle?: number;
|
||||
visible?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Output column descriptor for a custom indicator.
|
||||
*/
|
||||
export interface CustomIndicatorColumn {
|
||||
name: string;
|
||||
display_name?: string;
|
||||
description?: string;
|
||||
plot?: PlotConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Metadata needed to auto-construct a TradingView custom study.
|
||||
* Populated by the indicator subagent when adding a custom_ indicator.
|
||||
*/
|
||||
export interface CustomIndicatorMetadata {
|
||||
display_name: string;
|
||||
parameters: Record<string, CustomIndicatorParam>;
|
||||
input_series: string[];
|
||||
output_columns: CustomIndicatorColumn[];
|
||||
pane: 'price' | 'separate';
|
||||
filled_areas?: FilledAreaConfig[];
|
||||
bands?: BandConfig[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicator instance on TradingView chart.
|
||||
*/
|
||||
export interface IndicatorInstance {
|
||||
id: string;
|
||||
talib_name: string;
|
||||
pandas_ta_name: string;
|
||||
instance_name: string;
|
||||
parameters: Record<string, any>;
|
||||
tv_study_id?: string;
|
||||
@@ -289,6 +361,8 @@ export interface IndicatorInstance {
|
||||
created_at?: number;
|
||||
modified_at?: number;
|
||||
original_id?: string;
|
||||
/** Populated for custom_ indicators; drives TV custom study auto-construction. */
|
||||
custom_metadata?: CustomIndicatorMetadata;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user