sandbox connected and streaming
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
import type { BetterAuthInstance } from './better-auth-config.js';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { Pool } from 'pg';
|
||||
import { LICENSE_TIER_TEMPLATES } from '../types/user.js';
|
||||
|
||||
export interface AuthServiceConfig {
|
||||
auth: BetterAuthInstance;
|
||||
@@ -202,11 +203,11 @@ export class AuthService {
|
||||
);
|
||||
|
||||
if (licenseCheck.rows.length === 0) {
|
||||
// Create default free license
|
||||
// Create default free license — copy the full tier template so every field is present
|
||||
await client.query(
|
||||
`INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url)
|
||||
VALUES ($1, $2, 'free', 'pending')`,
|
||||
[userId, email]
|
||||
`INSERT INTO user_licenses (user_id, email, license, mcp_server_url)
|
||||
VALUES ($1, $2, $3::jsonb, 'pending')`,
|
||||
[userId, email, JSON.stringify(LICENSE_TIER_TEMPLATES.free)]
|
||||
);
|
||||
|
||||
this.config.logger.info({ userId }, 'Created default free license for new user');
|
||||
|
||||
@@ -56,7 +56,7 @@ export class Authenticator {
|
||||
this.config.logger.info({ userId }, 'Ensuring user container is running');
|
||||
const { mcpEndpoint, wasCreated, isSpinningUp } = await this.config.containerManager.ensureContainerRunning(
|
||||
userId,
|
||||
license,
|
||||
license.license,
|
||||
false // Don't wait for ready
|
||||
);
|
||||
|
||||
@@ -72,9 +72,6 @@ export class Authenticator {
|
||||
);
|
||||
}
|
||||
|
||||
// Update license with actual MCP endpoint
|
||||
license.mcpServerUrl = mcpEndpoint;
|
||||
|
||||
const sessionId = `ws_${userId}_${Date.now()}`;
|
||||
|
||||
return {
|
||||
@@ -83,7 +80,8 @@ export class Authenticator {
|
||||
channelType: ChannelType.WEBSOCKET,
|
||||
channelUserId: userId, // For WebSocket, same as userId
|
||||
sessionId,
|
||||
license,
|
||||
license: license.license,
|
||||
mcpServerUrl: mcpEndpoint,
|
||||
authenticatedAt: new Date(),
|
||||
},
|
||||
isSpinningUp,
|
||||
@@ -123,7 +121,7 @@ export class Authenticator {
|
||||
this.config.logger.info({ userId }, 'Ensuring user container is running');
|
||||
const { mcpEndpoint, wasCreated } = await this.config.containerManager.ensureContainerRunning(
|
||||
userId,
|
||||
license
|
||||
license.license
|
||||
);
|
||||
|
||||
this.config.logger.info(
|
||||
@@ -131,9 +129,6 @@ export class Authenticator {
|
||||
'Container is ready'
|
||||
);
|
||||
|
||||
// Update license with actual MCP endpoint
|
||||
license.mcpServerUrl = mcpEndpoint;
|
||||
|
||||
const sessionId = `tg_${telegramUserId}_${Date.now()}`;
|
||||
|
||||
return {
|
||||
@@ -141,7 +136,8 @@ export class Authenticator {
|
||||
channelType: ChannelType.TELEGRAM,
|
||||
channelUserId: telegramUserId,
|
||||
sessionId,
|
||||
license,
|
||||
license: license.license,
|
||||
mcpServerUrl: mcpEndpoint,
|
||||
authenticatedAt: new Date(),
|
||||
};
|
||||
} catch (error) {
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import type { FastifyInstance, FastifyRequest, FastifyReply } from 'fastify';
|
||||
import type { Authenticator } from '../auth/authenticator.js';
|
||||
import { AgentHarness } from '../harness/agent-harness.js';
|
||||
import type { AgentHarness, HarnessFactory } from '../harness/agent-harness.js';
|
||||
import type { InboundMessage } from '../types/messages.js';
|
||||
import { randomUUID } from 'crypto';
|
||||
|
||||
import type { ProviderConfig } from '../llm/provider.js';
|
||||
import type { ChannelAdapter, ChannelCapabilities } from '../workspace/index.js';
|
||||
|
||||
export interface TelegramHandlerConfig {
|
||||
authenticator: Authenticator;
|
||||
providerConfig: ProviderConfig;
|
||||
telegramBotToken: string;
|
||||
createHarness: HarnessFactory;
|
||||
}
|
||||
|
||||
interface TelegramUpdate {
|
||||
@@ -33,12 +33,18 @@ interface TelegramUpdate {
|
||||
};
|
||||
}
|
||||
|
||||
interface TelegramSession {
|
||||
harness: AgentHarness;
|
||||
lastActivity: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Telegram webhook handler
|
||||
*/
|
||||
export class TelegramHandler {
|
||||
private config: TelegramHandlerConfig;
|
||||
private sessions = new Map<string, AgentHarness>();
|
||||
private sessions = new Map<string, TelegramSession>();
|
||||
private chatIds = new Map<string, number>(); // sessionId -> chatId
|
||||
|
||||
constructor(config: TelegramHandlerConfig) {
|
||||
this.config = config;
|
||||
@@ -90,18 +96,59 @@ export class TelegramHandler {
|
||||
return;
|
||||
}
|
||||
|
||||
// Store chatId for this session
|
||||
this.chatIds.set(authContext.sessionId, chatId);
|
||||
|
||||
// Create Telegram channel adapter
|
||||
const telegramAdapter: ChannelAdapter = {
|
||||
sendSnapshot: () => {
|
||||
// Telegram doesn't support sync protocol
|
||||
},
|
||||
sendPatch: () => {
|
||||
// Telegram doesn't support sync protocol
|
||||
},
|
||||
sendText: (msg) => {
|
||||
this.sendTelegramMessage(chatId, msg.text).catch((err) => {
|
||||
logger.error({ error: err }, 'Failed to send Telegram text');
|
||||
});
|
||||
},
|
||||
sendChunk: () => {
|
||||
// Telegram doesn't support streaming; full response sent after handleMessage resolves
|
||||
},
|
||||
sendImage: (msg) => {
|
||||
this.sendTelegramPhoto(chatId, msg.data, msg.mimeType, msg.caption).catch((err) => {
|
||||
logger.error({ error: err }, 'Failed to send Telegram image');
|
||||
});
|
||||
},
|
||||
getCapabilities: (): ChannelCapabilities => ({
|
||||
supportsSync: false,
|
||||
supportsImages: true,
|
||||
supportsMarkdown: true,
|
||||
supportsStreaming: false,
|
||||
supportsTradingViewEmbed: false,
|
||||
}),
|
||||
};
|
||||
|
||||
// Get or create harness
|
||||
let harness = this.sessions.get(authContext.sessionId);
|
||||
if (!harness) {
|
||||
harness = new AgentHarness({
|
||||
let session = this.sessions.get(authContext.sessionId);
|
||||
if (!session) {
|
||||
const harness = this.config.createHarness({
|
||||
userId: authContext.userId,
|
||||
sessionId: authContext.sessionId,
|
||||
license: authContext.license,
|
||||
providerConfig: this.config.providerConfig,
|
||||
mcpServerUrl: authContext.mcpServerUrl,
|
||||
logger,
|
||||
channelAdapter: telegramAdapter,
|
||||
channelType: authContext.channelType,
|
||||
channelUserId: authContext.channelUserId,
|
||||
});
|
||||
await harness.initialize();
|
||||
this.sessions.set(authContext.sessionId, harness);
|
||||
session = { harness, lastActivity: Date.now() };
|
||||
this.sessions.set(authContext.sessionId, session);
|
||||
} else {
|
||||
// Update channel adapter and activity timestamp for existing session
|
||||
session.harness.setChannelAdapter(telegramAdapter);
|
||||
session.lastActivity = Date.now();
|
||||
}
|
||||
|
||||
// Process message
|
||||
@@ -114,7 +161,7 @@ export class TelegramHandler {
|
||||
timestamp: new Date(),
|
||||
};
|
||||
|
||||
const response = await harness.handleMessage(inboundMessage);
|
||||
const response = await session.harness.handleMessage(inboundMessage);
|
||||
|
||||
// Send response back to Telegram
|
||||
await this.sendTelegramMessage(chatId, response.content);
|
||||
@@ -127,7 +174,7 @@ export class TelegramHandler {
|
||||
}
|
||||
|
||||
/**
|
||||
* Send message to Telegram chat
|
||||
* Send text message to Telegram chat
|
||||
*/
|
||||
private async sendTelegramMessage(chatId: number, text: string): Promise<void> {
|
||||
const url = `https://api.telegram.org/bot${this.config.telegramBotToken}/sendMessage`;
|
||||
@@ -155,10 +202,80 @@ export class TelegramHandler {
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup old sessions (call periodically)
|
||||
* Send photo to Telegram chat
|
||||
* Converts base64 image data to a buffer and sends via sendPhoto API
|
||||
*/
|
||||
async cleanupSessions(_maxAgeMs = 30 * 60 * 1000): Promise<void> {
|
||||
// TODO: Track session last activity and cleanup
|
||||
// For now, sessions persist until server restart
|
||||
private async sendTelegramPhoto(
|
||||
chatId: number,
|
||||
base64Data: string,
|
||||
mimeType: string,
|
||||
caption?: string
|
||||
): Promise<void> {
|
||||
const url = `https://api.telegram.org/bot${this.config.telegramBotToken}/sendPhoto`;
|
||||
|
||||
try {
|
||||
// Convert base64 to buffer
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64');
|
||||
|
||||
// Determine filename from mimeType
|
||||
const extension = mimeType.split('/')[1] || 'png';
|
||||
const filename = `image.${extension}`;
|
||||
|
||||
// Create FormData for multipart upload
|
||||
const formData = new FormData();
|
||||
formData.append('chat_id', chatId.toString());
|
||||
formData.append('photo', new Blob([imageBuffer], { type: mimeType }), filename);
|
||||
if (caption) {
|
||||
formData.append('caption', caption);
|
||||
}
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`Telegram API error: ${response.statusText} - ${errorText}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to send Telegram photo:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up sessions that have been idle longer than maxAgeMs.
|
||||
* Triggers Iceberg flush for each expired session via harness.cleanup().
|
||||
*/
|
||||
async cleanupSessions(maxAgeMs = 30 * 60 * 1000): Promise<void> {
|
||||
const now = Date.now();
|
||||
const expired: string[] = [];
|
||||
|
||||
for (const [sessionId, session] of this.sessions) {
|
||||
if (now - session.lastActivity > maxAgeMs) {
|
||||
expired.push(sessionId);
|
||||
}
|
||||
}
|
||||
|
||||
for (const sessionId of expired) {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (session) {
|
||||
await session.harness.cleanup().catch(() => {});
|
||||
this.sessions.delete(sessionId);
|
||||
this.chatIds.delete(sessionId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush and clean up all active sessions.
|
||||
* Called during graceful shutdown.
|
||||
*/
|
||||
async endAllSessions(): Promise<void> {
|
||||
const cleanups = Array.from(this.sessions.values()).map(s => s.harness.cleanup());
|
||||
await Promise.allSettled(cleanups);
|
||||
this.sessions.clear();
|
||||
this.chatIds.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import type { FastifyInstance, FastifyRequest } from 'fastify';
|
||||
import type { WebSocket } from '@fastify/websocket';
|
||||
import type { Authenticator } from '../auth/authenticator.js';
|
||||
import { AgentHarness } from '../harness/agent-harness.js';
|
||||
import type { AgentHarness, HarnessFactory } from '../harness/agent-harness.js';
|
||||
import type { InboundMessage } from '../types/messages.js';
|
||||
import { randomUUID } from 'crypto';
|
||||
|
||||
import type { ProviderConfig } from '../llm/provider.js';
|
||||
import type { SessionRegistry, EventSubscriber, Session } from '../events/index.js';
|
||||
import type { OHLCService } from '../services/ohlc-service.js';
|
||||
import type { SymbolIndexService } from '../services/symbol-index-service.js';
|
||||
@@ -29,12 +27,18 @@ function jsonStringifySafe(obj: any): string {
|
||||
);
|
||||
}
|
||||
|
||||
export type SessionStatus = 'authenticating' | 'spinning_up' | 'initializing' | 'ready' | 'error'
|
||||
|
||||
function sendStatus(socket: WebSocket, status: SessionStatus, message: string): void {
|
||||
socket.send(JSON.stringify({ type: 'status', status, message }))
|
||||
}
|
||||
|
||||
export interface WebSocketHandlerConfig {
|
||||
authenticator: Authenticator;
|
||||
containerManager: ContainerManager;
|
||||
providerConfig: ProviderConfig;
|
||||
sessionRegistry: SessionRegistry;
|
||||
eventSubscriber: EventSubscriber;
|
||||
createHarness: HarnessFactory;
|
||||
ohlcService?: OHLCService; // Optional for historical data support
|
||||
symbolIndexService?: SymbolIndexService; // Optional for symbol search
|
||||
}
|
||||
@@ -78,13 +82,7 @@ export class WebSocketHandler {
|
||||
const logger = app.log;
|
||||
|
||||
// Send initial connecting message
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'status',
|
||||
status: 'authenticating',
|
||||
message: 'Authenticating...',
|
||||
})
|
||||
);
|
||||
sendStatus(socket, 'authenticating', 'Authenticating...');
|
||||
|
||||
// Authenticate (returns immediately if container is spinning up)
|
||||
const { authContext, isSpinningUp } = await this.config.authenticator.authenticateWebSocket(request);
|
||||
@@ -105,33 +103,23 @@ export class WebSocketHandler {
|
||||
'WebSocket connection authenticated'
|
||||
);
|
||||
|
||||
// If container is spinning up, send status and start background polling
|
||||
// If container is spinning up, wait for it to be ready before continuing
|
||||
if (isSpinningUp) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'status',
|
||||
status: 'spinning_up',
|
||||
message: 'Your workspace is starting up, please wait...',
|
||||
})
|
||||
);
|
||||
sendStatus(socket, 'spinning_up', 'Your workspace is starting up, please wait...');
|
||||
|
||||
// Start background polling for container readiness
|
||||
this.pollContainerReadiness(socket, authContext, app).catch((error) => {
|
||||
logger.error({ error, userId: authContext.userId }, 'Error polling container readiness');
|
||||
});
|
||||
const ready = await this.config.containerManager.waitForContainerReady(authContext.userId, 120000);
|
||||
if (!ready) {
|
||||
logger.warn({ userId: authContext.userId }, 'Container failed to become ready within timeout');
|
||||
socket.send(JSON.stringify({ type: 'error', message: 'Workspace failed to start. Please try again later.' }));
|
||||
socket.close(1011, 'Container startup timeout');
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't return - continue with session setup so we can receive messages once ready
|
||||
} else {
|
||||
// Send workspace starting message
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'status',
|
||||
status: 'initializing',
|
||||
message: 'Starting your workspace...',
|
||||
})
|
||||
);
|
||||
logger.info({ userId: authContext.userId }, 'Container is ready, proceeding with session setup');
|
||||
}
|
||||
|
||||
sendStatus(socket, 'initializing', 'Starting your workspace...');
|
||||
|
||||
// Create workspace manager for this session
|
||||
const workspace = new WorkspaceManager({
|
||||
userId: authContext.userId,
|
||||
@@ -149,6 +137,34 @@ export class WebSocketHandler {
|
||||
sendPatch: (msg: PatchMessage) => {
|
||||
socket.send(JSON.stringify(msg));
|
||||
},
|
||||
sendText: (msg) => {
|
||||
socket.send(JSON.stringify({
|
||||
type: 'text',
|
||||
text: msg.text,
|
||||
}));
|
||||
},
|
||||
sendChunk: (content) => {
|
||||
socket.send(JSON.stringify({
|
||||
type: 'agent_chunk',
|
||||
content,
|
||||
done: false,
|
||||
}));
|
||||
},
|
||||
sendImage: (msg) => {
|
||||
socket.send(JSON.stringify({
|
||||
type: 'image',
|
||||
data: msg.data,
|
||||
mimeType: msg.mimeType,
|
||||
caption: msg.caption,
|
||||
}));
|
||||
},
|
||||
sendToolCall: (toolName, label) => {
|
||||
socket.send(JSON.stringify({
|
||||
type: 'agent_tool_call',
|
||||
toolName,
|
||||
label: label ?? toolName,
|
||||
}));
|
||||
},
|
||||
getCapabilities: (): ChannelCapabilities => ({
|
||||
supportsSync: true,
|
||||
supportsImages: true,
|
||||
@@ -167,14 +183,17 @@ export class WebSocketHandler {
|
||||
workspace.setAdapter(wsAdapter);
|
||||
this.workspaces.set(authContext.sessionId, workspace);
|
||||
|
||||
// Create agent harness with workspace manager
|
||||
harness = new AgentHarness({
|
||||
// Create agent harness via factory (storage deps injected by factory)
|
||||
harness = this.config.createHarness({
|
||||
userId: authContext.userId,
|
||||
sessionId: authContext.sessionId,
|
||||
license: authContext.license,
|
||||
providerConfig: this.config.providerConfig,
|
||||
mcpServerUrl: authContext.mcpServerUrl,
|
||||
logger,
|
||||
workspaceManager: workspace,
|
||||
channelAdapter: wsAdapter,
|
||||
channelType: authContext.channelType,
|
||||
channelUserId: authContext.channelUserId,
|
||||
});
|
||||
|
||||
await harness.initialize();
|
||||
@@ -182,7 +201,7 @@ export class WebSocketHandler {
|
||||
|
||||
// Register session for event system
|
||||
// Container endpoint is derived from the MCP server URL (same container, different port)
|
||||
const containerEventEndpoint = this.getContainerEventEndpoint(authContext.license.mcpServerUrl);
|
||||
const containerEventEndpoint = this.getContainerEventEndpoint(authContext.mcpServerUrl);
|
||||
|
||||
const session: Session = {
|
||||
userId: authContext.userId,
|
||||
@@ -203,18 +222,16 @@ export class WebSocketHandler {
|
||||
'Session registered for events'
|
||||
);
|
||||
|
||||
// Send connected message (only if not spinning up - otherwise sent by pollContainerReadiness)
|
||||
if (!isSpinningUp) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'connected',
|
||||
sessionId: authContext.sessionId,
|
||||
userId: authContext.userId,
|
||||
licenseType: authContext.license.licenseType,
|
||||
message: 'Connected to Dexorder AI',
|
||||
})
|
||||
);
|
||||
}
|
||||
sendStatus(socket, 'ready', 'Your workspace is ready!');
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'connected',
|
||||
sessionId: authContext.sessionId,
|
||||
userId: authContext.userId,
|
||||
licenseType: authContext.license.licenseType,
|
||||
message: 'Connected to Dexorder AI',
|
||||
})
|
||||
);
|
||||
|
||||
// Handle messages
|
||||
socket.on('message', async (data: Buffer) => {
|
||||
@@ -241,19 +258,16 @@ export class WebSocketHandler {
|
||||
return;
|
||||
}
|
||||
|
||||
// Stream response chunks to client
|
||||
// Chunks are streamed via channelAdapter.sendChunk() during handleMessage
|
||||
try {
|
||||
for await (const chunk of harness.streamMessage(inboundMessage)) {
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'agent_chunk',
|
||||
content: chunk,
|
||||
done: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
// Acknowledge receipt immediately so the client can show the seen indicator
|
||||
socket.send(JSON.stringify({ type: 'agent_chunk', content: '', done: false }));
|
||||
|
||||
// Send final chunk with done flag
|
||||
logger.info('Calling harness.handleMessage');
|
||||
await harness.handleMessage(inboundMessage);
|
||||
|
||||
// Send done marker after all chunks have been streamed
|
||||
logger.debug('Sending done marker to client');
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'agent_chunk',
|
||||
@@ -331,73 +345,11 @@ export class WebSocketHandler {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Poll for container readiness in the background
|
||||
* Sends notification to client when container is ready
|
||||
*/
|
||||
private async pollContainerReadiness(
|
||||
socket: WebSocket,
|
||||
authContext: any,
|
||||
app: FastifyInstance
|
||||
): Promise<void> {
|
||||
const logger = app.log;
|
||||
const userId = authContext.userId;
|
||||
|
||||
logger.info({ userId }, 'Starting background poll for container readiness');
|
||||
|
||||
try {
|
||||
// Wait for container to become ready (2 minute timeout)
|
||||
const ready = await this.config.containerManager.waitForContainerReady(userId, 120000);
|
||||
|
||||
if (ready) {
|
||||
logger.info({ userId }, 'Container is now ready, notifying client');
|
||||
|
||||
// Send ready notification
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'status',
|
||||
status: 'ready',
|
||||
message: 'Your workspace is ready!',
|
||||
})
|
||||
);
|
||||
|
||||
// Also send the 'connected' message
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'connected',
|
||||
sessionId: authContext.sessionId,
|
||||
userId: authContext.userId,
|
||||
licenseType: authContext.license.licenseType,
|
||||
message: 'Connected to Dexorder AI',
|
||||
})
|
||||
);
|
||||
} else {
|
||||
logger.warn({ userId }, 'Container failed to become ready within timeout');
|
||||
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
message: 'Workspace failed to start. Please try again later.',
|
||||
})
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error({ error, userId }, 'Error waiting for container readiness');
|
||||
|
||||
socket.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
message: 'Error starting workspace. Please try again later.',
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Derive the container's XPUB event endpoint from the MCP server URL.
|
||||
*
|
||||
* MCP URL format: http://agent-user-abc123.dexorder-agents.svc.cluster.local:3000
|
||||
* Event endpoint: tcp://agent-user-abc123.dexorder-agents.svc.cluster.local:5570
|
||||
* MCP URL format: http://sandbox-user-abc123.dexorder-sandboxes.svc.cluster.local:3000
|
||||
* Event endpoint: tcp://sandbox-user-abc123.dexorder-sandboxes.svc.cluster.local:5570
|
||||
*/
|
||||
private getContainerEventEndpoint(mcpServerUrl: string): string {
|
||||
try {
|
||||
@@ -578,4 +530,14 @@ export class WebSocketHandler {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush and clean up all active sessions.
|
||||
* Called during graceful shutdown to ensure conversations are persisted.
|
||||
*/
|
||||
async endAllSessions(): Promise<void> {
|
||||
const cleanups = Array.from(this.harnesses.values()).map(h => h.cleanup());
|
||||
await Promise.allSettled(cleanups);
|
||||
this.harnesses.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ export interface DuckDBConfig {
|
||||
s3Endpoint?: string;
|
||||
s3AccessKey?: string;
|
||||
s3SecretKey?: string;
|
||||
conversationsBucket?: string; // S3 bucket for conversation cold storage
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -40,6 +41,7 @@ export class DuckDBClient {
|
||||
accessKey?: string;
|
||||
secretKey?: string;
|
||||
};
|
||||
private conversationsBucket?: string;
|
||||
private logger: FastifyBaseLogger;
|
||||
private initialized = false;
|
||||
|
||||
@@ -49,6 +51,7 @@ export class DuckDBClient {
|
||||
this.catalogUri = config.catalogUri;
|
||||
this.ohlcCatalogUri = config.ohlcCatalogUri || config.catalogUri;
|
||||
this.ohlcNamespace = config.ohlcNamespace || 'trading';
|
||||
this.conversationsBucket = config.conversationsBucket;
|
||||
this.s3Config = {
|
||||
endpoint: config.s3Endpoint,
|
||||
accessKey: config.s3AccessKey,
|
||||
@@ -190,7 +193,23 @@ export class DuckDBClient {
|
||||
);
|
||||
|
||||
if (!tablePath) {
|
||||
this.logger.warn('Conversations table not found');
|
||||
// Fallback: scan Parquet files written directly to conversations bucket
|
||||
if (this.conversationsBucket) {
|
||||
this.logger.debug({ userId, sessionId }, 'REST catalog miss, scanning Parquet cold storage');
|
||||
const parquetPath = `s3://${this.conversationsBucket}/gateway/conversations/**/user_id=${userId}/${sessionId}.parquet`;
|
||||
const fallbackSql = `
|
||||
SELECT id, user_id, session_id, role, content, metadata, timestamp
|
||||
FROM read_parquet('${parquetPath}')
|
||||
ORDER BY timestamp ASC
|
||||
${options?.limit ? `LIMIT ${options.limit}` : ''}
|
||||
`;
|
||||
try {
|
||||
return await this.query(fallbackSql);
|
||||
} catch {
|
||||
// File may not exist yet
|
||||
}
|
||||
}
|
||||
this.logger.warn('Conversations table not found and no cold storage configured');
|
||||
return [];
|
||||
}
|
||||
|
||||
@@ -526,6 +545,65 @@ export class DuckDBClient {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Append a batch of conversation messages as a Parquet file in S3.
|
||||
* Called once per session at session end to avoid small-file fragmentation.
|
||||
*/
|
||||
async appendMessages(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
messages: Array<{
|
||||
id: string;
|
||||
user_id: string;
|
||||
session_id: string;
|
||||
role: string;
|
||||
content: string;
|
||||
metadata: string;
|
||||
timestamp: number;
|
||||
}>
|
||||
): Promise<void> {
|
||||
await this.initialize();
|
||||
|
||||
if (!this.conversationsBucket || messages.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const now = new Date();
|
||||
const year = now.getUTCFullYear();
|
||||
const month = String(now.getUTCMonth() + 1).padStart(2, '0');
|
||||
const s3Path = `s3://${this.conversationsBucket}/gateway/conversations/year=${year}/month=${month}/user_id=${userId}/${sessionId}.parquet`;
|
||||
|
||||
// Use a timestamp-based name to avoid cross-session collisions
|
||||
const tempTable = `msg_flush_${Date.now()}`;
|
||||
|
||||
try {
|
||||
await this.query(`
|
||||
CREATE TEMP TABLE ${tempTable} (
|
||||
id VARCHAR,
|
||||
user_id VARCHAR,
|
||||
session_id VARCHAR,
|
||||
role VARCHAR,
|
||||
content VARCHAR,
|
||||
metadata VARCHAR,
|
||||
timestamp BIGINT
|
||||
)
|
||||
`);
|
||||
|
||||
for (const msg of messages) {
|
||||
await this.query(
|
||||
`INSERT INTO ${tempTable} VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||
[msg.id, msg.user_id, msg.session_id, msg.role, msg.content, msg.metadata, msg.timestamp]
|
||||
);
|
||||
}
|
||||
|
||||
await this.query(`COPY ${tempTable} TO '${s3Path}' (FORMAT PARQUET)`);
|
||||
|
||||
this.logger.info({ userId, sessionId, count: messages.length, s3Path }, 'Conversation flushed to Parquet');
|
||||
} finally {
|
||||
await this.query(`DROP TABLE IF EXISTS ${tempTable}`).catch(() => {});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the DuckDB connection
|
||||
*/
|
||||
|
||||
@@ -27,6 +27,9 @@ export interface IcebergConfig {
|
||||
// OHLC/Trading data catalog (can be same or different from conversation catalog)
|
||||
ohlcCatalogUri?: string;
|
||||
ohlcNamespace?: string;
|
||||
|
||||
// S3 bucket for conversation cold storage (Parquet flush at session end)
|
||||
conversationsBucket?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -99,6 +102,7 @@ export class IcebergClient {
|
||||
s3Endpoint: config.s3Endpoint,
|
||||
s3AccessKey: config.s3AccessKey,
|
||||
s3SecretKey: config.s3SecretKey,
|
||||
conversationsBucket: config.conversationsBucket,
|
||||
},
|
||||
logger
|
||||
);
|
||||
@@ -137,6 +141,18 @@ export class IcebergClient {
|
||||
return this.duckdb.queryCheckpoint(userId, sessionId, checkpointId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Append a batch of conversation messages as a Parquet file in S3.
|
||||
* Called once per session at session end.
|
||||
*/
|
||||
async appendMessages(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
messages: IcebergMessage[]
|
||||
): Promise<void> {
|
||||
return this.duckdb.appendMessages(userId, sessionId, messages);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get table metadata
|
||||
*/
|
||||
|
||||
@@ -41,11 +41,8 @@ export class UserService {
|
||||
`SELECT
|
||||
user_id as "userId",
|
||||
email,
|
||||
license_type as "licenseType",
|
||||
features,
|
||||
resource_limits as "resourceLimits",
|
||||
license,
|
||||
mcp_server_url as "mcpServerUrl",
|
||||
preferred_model as "preferredModel",
|
||||
expires_at as "expiresAt",
|
||||
created_at as "createdAt",
|
||||
updated_at as "updatedAt"
|
||||
@@ -65,11 +62,8 @@ export class UserService {
|
||||
return UserLicenseSchema.parse({
|
||||
userId: row.userId,
|
||||
email: row.email,
|
||||
licenseType: row.licenseType,
|
||||
features: row.features,
|
||||
resourceLimits: row.resourceLimits,
|
||||
license: row.license,
|
||||
mcpServerUrl: row.mcpServerUrl,
|
||||
preferredModel: row.preferredModel,
|
||||
expiresAt: row.expiresAt,
|
||||
createdAt: row.createdAt,
|
||||
updatedAt: row.updatedAt,
|
||||
|
||||
@@ -1,25 +1,29 @@
|
||||
# Agent Harness
|
||||
|
||||
Comprehensive agent orchestration system for Dexorder AI platform, built on LangChain.js and LangGraph.js.
|
||||
Comprehensive agent orchestration system for Dexorder AI platform, built on LangChain.js deep agents architecture.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
gateway/src/harness/
|
||||
├── memory/ # Storage layer (Redis + Iceberg + Qdrant)
|
||||
├── skills/ # Individual capabilities (markdown + TypeScript)
|
||||
├── subagents/ # Specialized agents with multi-file memory
|
||||
├── workflows/ # LangGraph state machines
|
||||
├── tools/ # Platform tools (non-MCP)
|
||||
├── config/ # Configuration files
|
||||
└── index.ts # Main exports
|
||||
gateway/src/
|
||||
├── harness/
|
||||
│ ├── memory/ # Storage layer (Redis + Iceberg + Qdrant)
|
||||
│ ├── subagents/ # Specialized agents with multi-file memory
|
||||
│ ├── workflows/ # LangGraph state machines
|
||||
│ ├── prompts/ # System prompts
|
||||
│ ├── agent-harness.ts # Main orchestrator
|
||||
│ └── index.ts # Exports
|
||||
└── tools/ # LangChain tools (platform + MCP)
|
||||
├── platform/ # Local platform tools
|
||||
├── mcp/ # Remote MCP tool wrappers
|
||||
└── tool-registry.ts # Tool-to-agent routing
|
||||
```
|
||||
|
||||
## Core Components
|
||||
|
||||
### 1. Memory Layer (`memory/`)
|
||||
|
||||
Tiered storage architecture as per [architecture discussion](/chat/harness-rag.txt):
|
||||
Tiered storage architecture:
|
||||
|
||||
- **Redis**: Hot state (active sessions, checkpoints)
|
||||
- **Iceberg**: Cold storage (durable conversations, analytics)
|
||||
@@ -32,27 +36,32 @@ Tiered storage architecture as per [architecture discussion](/chat/harness-rag.t
|
||||
- `embedding-service.ts`: Text→vector conversion
|
||||
- `session-context.ts`: User context with channel metadata
|
||||
|
||||
### 2. Skills (`skills/`)
|
||||
### 2. Tools (`../tools/`)
|
||||
|
||||
Self-contained capabilities with markdown definitions:
|
||||
Standard LangChain tools following deep agents best practices:
|
||||
|
||||
- `*.skill.md`: Human-readable documentation
|
||||
- `*.ts`: Implementation extending `BaseSkill`
|
||||
- Input validation and error handling
|
||||
- Can use LLM, MCP tools, or platform tools
|
||||
**Platform Tools** (local services):
|
||||
- `symbol_lookup`: Symbol search and metadata resolution
|
||||
- `get_chart_data`: OHLCV data with workspace defaults
|
||||
|
||||
**MCP Tools** (remote, per-user):
|
||||
- Dynamically discovered from user's MCP server
|
||||
- Wrapped as standard LangChain `DynamicStructuredTool`
|
||||
- Filtered per-agent via `ToolRegistry`
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
import { MarketAnalysisSkill } from './skills';
|
||||
import { getToolRegistry } from '../tools';
|
||||
|
||||
const skill = new MarketAnalysisSkill(logger, model);
|
||||
const result = await skill.execute({
|
||||
context: userContext,
|
||||
parameters: { ticker: 'BTC/USDT', period: '4h' }
|
||||
});
|
||||
const toolRegistry = getToolRegistry();
|
||||
const tools = await toolRegistry.getToolsForAgent(
|
||||
'main',
|
||||
mcpClient,
|
||||
availableMCPTools
|
||||
);
|
||||
```
|
||||
|
||||
See [skills/README.md](skills/README.md) for authoring guide.
|
||||
See `../tools/tool-registry.ts` for tool configuration.
|
||||
|
||||
### 3. Subagents (`subagents/`)
|
||||
|
||||
@@ -75,11 +84,20 @@ subagents/
|
||||
- Split memory into logical files (better organization)
|
||||
- Model overrides
|
||||
- Capability tagging
|
||||
- Configurable tool access via ToolRegistry
|
||||
|
||||
**Tool Configuration** (in `config.yaml`):
|
||||
```yaml
|
||||
tools:
|
||||
platform: ['symbol_lookup'] # Platform tools
|
||||
mcp: ['category_*'] # MCP tool patterns
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
const codeReviewer = await createCodeReviewerSubagent(model, logger, basePath);
|
||||
const review = await codeReviewer.execute({ userContext }, strategyCode);
|
||||
const tools = await toolRegistry.getToolsForAgent('research', mcpClient, availableMCPTools);
|
||||
const subagent = await createResearchSubagent(model, logger, basePath, mcpClient, tools);
|
||||
const result = await subagent.execute({ userContext }, instruction);
|
||||
```
|
||||
|
||||
### 4. Workflows (`workflows/`)
|
||||
|
||||
@@ -1,22 +1,56 @@
|
||||
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
|
||||
import { HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { UserLicense } from '../types/user.js';
|
||||
import type { License } from '../types/user.js';
|
||||
import { ChannelType } from '../types/user.js';
|
||||
import type { ConversationStore } from './memory/conversation-store.js';
|
||||
import type { InboundMessage, OutboundMessage } from '../types/messages.js';
|
||||
import { MCPClientConnector } from './mcp-client.js';
|
||||
import { CONTEXT_URIS, type ResourceContent } from '../types/resources.js';
|
||||
import { LLMProviderFactory, type ProviderConfig } from '../llm/provider.js';
|
||||
import { ModelRouter, RoutingStrategy } from '../llm/router.js';
|
||||
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
|
||||
import type { ChannelAdapter } from '../workspace/index.js';
|
||||
import type { ResearchSubagent } from './subagents/research/index.js';
|
||||
import type { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { getToolRegistry } from '../tools/tool-registry.js';
|
||||
import type { MCPToolInfo } from '../tools/mcp/mcp-tool-wrapper.js';
|
||||
import { createResearchAgentTool } from '../tools/platform/research-agent.tool.js';
|
||||
import { createUserContext } from './memory/session-context.js';
|
||||
import { readFile } from 'fs/promises';
|
||||
import { join, dirname } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
export interface AgentHarnessConfig {
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
/**
|
||||
* Session-specific config provided by channel handlers.
|
||||
* Contains only per-connection details — no infrastructure dependencies.
|
||||
*/
|
||||
export interface HarnessSessionConfig {
|
||||
userId: string;
|
||||
sessionId: string;
|
||||
license: UserLicense;
|
||||
providerConfig: ProviderConfig;
|
||||
license: License;
|
||||
mcpServerUrl: string;
|
||||
logger: FastifyBaseLogger;
|
||||
workspaceManager?: WorkspaceManager;
|
||||
channelAdapter?: ChannelAdapter;
|
||||
channelType?: ChannelType;
|
||||
channelUserId?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function type for creating AgentHarness instances.
|
||||
* Created in main.ts with infrastructure (storage, providerConfig) captured in closure.
|
||||
* Channel handlers call this factory without knowing about Redis or Iceberg.
|
||||
*/
|
||||
export type HarnessFactory = (sessionConfig: HarnessSessionConfig) => AgentHarness;
|
||||
|
||||
export interface AgentHarnessConfig extends HarnessSessionConfig {
|
||||
providerConfig: ProviderConfig;
|
||||
conversationStore?: ConversationStore;
|
||||
historyLimit: number;
|
||||
researchSubagent?: ResearchSubagent;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -27,32 +61,59 @@ export interface AgentHarnessConfig {
|
||||
* 1. Fetches context from user's MCP resources
|
||||
* 2. Routes to appropriate LLM model
|
||||
* 3. Calls LLM with embedded context
|
||||
* 4. Routes tool calls to user's MCP or platform tools
|
||||
* 4. Routes tool calls to platform tools or user's MCP tools
|
||||
* 5. Saves messages back to user's MCP
|
||||
*/
|
||||
export class AgentHarness {
|
||||
private static systemPromptTemplate: string | null = null;
|
||||
|
||||
private config: AgentHarnessConfig;
|
||||
private modelFactory: LLMProviderFactory;
|
||||
private modelRouter: ModelRouter;
|
||||
private mcpClient: MCPClientConnector;
|
||||
private workspaceManager?: WorkspaceManager;
|
||||
private lastWorkspaceSeq: number = 0;
|
||||
private channelAdapter?: ChannelAdapter;
|
||||
private isFirstMessage: boolean = true;
|
||||
private researchSubagent?: ResearchSubagent;
|
||||
private availableMCPTools: MCPToolInfo[] = [];
|
||||
private researchImageCapture: Array<{ data: string; mimeType: string }> = [];
|
||||
private conversationStore?: ConversationStore;
|
||||
|
||||
constructor(config: AgentHarnessConfig) {
|
||||
this.config = config;
|
||||
this.workspaceManager = config.workspaceManager;
|
||||
this.channelAdapter = config.channelAdapter;
|
||||
this.researchSubagent = config.researchSubagent;
|
||||
|
||||
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
|
||||
this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
|
||||
this.conversationStore = config.conversationStore;
|
||||
|
||||
this.mcpClient = new MCPClientConnector({
|
||||
userId: config.userId,
|
||||
mcpServerUrl: config.license.mcpServerUrl,
|
||||
mcpServerUrl: config.mcpServerUrl,
|
||||
logger: config.logger,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Load system prompt template from file (cached)
|
||||
*/
|
||||
private static async loadSystemPromptTemplate(): Promise<string> {
|
||||
if (!AgentHarness.systemPromptTemplate) {
|
||||
const templatePath = join(__dirname, 'prompts', 'system-prompt.md');
|
||||
AgentHarness.systemPromptTemplate = await readFile(templatePath, 'utf-8');
|
||||
}
|
||||
return AgentHarness.systemPromptTemplate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the channel adapter (can be called after construction)
|
||||
*/
|
||||
setChannelAdapter(adapter: ChannelAdapter): void {
|
||||
this.channelAdapter = adapter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize harness and connect to user's MCP server
|
||||
*/
|
||||
@@ -64,6 +125,13 @@ export class AgentHarness {
|
||||
|
||||
try {
|
||||
await this.mcpClient.connect();
|
||||
|
||||
// Discover available MCP tools from user's server
|
||||
await this.discoverMCPTools();
|
||||
|
||||
// Initialize research subagent if not provided
|
||||
await this.initializeResearchSubagent();
|
||||
|
||||
this.config.logger.info('Agent harness initialized');
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error }, 'Failed to initialize agent harness');
|
||||
@@ -71,46 +139,384 @@ export class AgentHarness {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Discover available MCP tools from user's server
|
||||
*/
|
||||
private async discoverMCPTools(): Promise<void> {
|
||||
try {
|
||||
this.config.logger.debug('Discovering MCP tools from user server');
|
||||
|
||||
// Call MCP client to list tools
|
||||
const tools = await this.mcpClient.listTools();
|
||||
|
||||
// Convert to MCPToolInfo format
|
||||
this.availableMCPTools = tools.map(tool => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema as any,
|
||||
}));
|
||||
|
||||
this.config.logger.info(
|
||||
{
|
||||
toolCount: this.availableMCPTools.length,
|
||||
toolNames: this.availableMCPTools.map(t => t.name),
|
||||
},
|
||||
'MCP tools discovered'
|
||||
);
|
||||
} catch (error) {
|
||||
this.config.logger.warn(
|
||||
{
|
||||
error,
|
||||
errorMessage: (error as Error)?.message,
|
||||
errorName: (error as Error)?.name,
|
||||
errorCode: (error as any)?.code,
|
||||
},
|
||||
'Failed to discover MCP tools - continuing without remote tools'
|
||||
);
|
||||
// Don't throw - MCP tools are optional, agent can still work with platform tools
|
||||
this.availableMCPTools = [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize research subagent
|
||||
*/
|
||||
private async initializeResearchSubagent(): Promise<void> {
|
||||
if (this.researchSubagent) {
|
||||
this.config.logger.debug('Research subagent already provided');
|
||||
return;
|
||||
}
|
||||
|
||||
this.config.logger.debug('Creating research subagent for session');
|
||||
|
||||
try {
|
||||
const { createResearchSubagent } = await import('./subagents/research/index.js');
|
||||
|
||||
// Create a model for the research subagent
|
||||
const model = await this.modelRouter.route(
|
||||
'research analysis', // dummy query
|
||||
this.config.license,
|
||||
RoutingStrategy.COMPLEXITY,
|
||||
this.config.userId
|
||||
);
|
||||
|
||||
// Get tools for research subagent from registry
|
||||
// Images from MCP responses are captured via onImage and routed to the subagent
|
||||
const toolRegistry = getToolRegistry();
|
||||
const researchTools = await toolRegistry.getToolsForAgent(
|
||||
'research',
|
||||
this.mcpClient,
|
||||
this.availableMCPTools,
|
||||
this.workspaceManager,
|
||||
(img) => this.researchImageCapture.push(img)
|
||||
);
|
||||
|
||||
// Path resolution: use the compiled output path
|
||||
const researchSubagentPath = join(__dirname, 'subagents', 'research');
|
||||
this.config.logger.debug({ researchSubagentPath }, 'Using research subagent path');
|
||||
|
||||
this.researchSubagent = await createResearchSubagent(
|
||||
model,
|
||||
this.config.logger,
|
||||
researchSubagentPath,
|
||||
this.mcpClient,
|
||||
researchTools,
|
||||
this.researchImageCapture
|
||||
);
|
||||
|
||||
this.config.logger.info(
|
||||
{
|
||||
toolCount: researchTools.length,
|
||||
toolNames: researchTools.map(t => t.name),
|
||||
},
|
||||
'Research subagent created successfully'
|
||||
);
|
||||
} catch (error) {
|
||||
this.config.logger.error(
|
||||
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
|
||||
'Failed to create research subagent'
|
||||
);
|
||||
// Don't throw - research subagent is optional
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute model with tool calling loop
|
||||
* Handles multi-turn tool calls until the model produces a final text response
|
||||
*/
|
||||
private async executeWithToolCalling(
|
||||
model: any,
|
||||
messages: BaseMessage[],
|
||||
tools: DynamicStructuredTool[],
|
||||
maxIterations: number = 2
|
||||
): Promise<string> {
|
||||
this.config.logger.info(
|
||||
{ toolCount: tools.length, maxIterations },
|
||||
'Starting tool calling loop'
|
||||
);
|
||||
|
||||
const messagesCopy = [...messages];
|
||||
let iterations = 0;
|
||||
|
||||
while (iterations < maxIterations) {
|
||||
iterations++;
|
||||
this.config.logger.info(
|
||||
{
|
||||
iteration: iterations,
|
||||
messageCount: messagesCopy.length,
|
||||
lastMessageType: messagesCopy[messagesCopy.length - 1]?.constructor.name,
|
||||
},
|
||||
'Tool calling loop iteration'
|
||||
);
|
||||
|
||||
this.config.logger.debug('Streaming model response...');
|
||||
let response: any = null;
|
||||
try {
|
||||
const stream = await model.stream(messagesCopy);
|
||||
for await (const chunk of stream) {
|
||||
if (typeof chunk.content === 'string' && chunk.content.length > 0) {
|
||||
this.channelAdapter?.sendChunk(chunk.content);
|
||||
} else if (Array.isArray(chunk.content)) {
|
||||
for (const block of chunk.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
this.channelAdapter?.sendChunk(block.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
response = response ? response.concat(chunk) : chunk;
|
||||
}
|
||||
} catch (invokeError: any) {
|
||||
this.config.logger.error(
|
||||
{
|
||||
error: invokeError,
|
||||
errorMessage: invokeError?.message,
|
||||
errorStack: invokeError?.stack,
|
||||
iteration: iterations,
|
||||
messageCount: messagesCopy.length,
|
||||
},
|
||||
'Model streaming failed in tool calling loop'
|
||||
);
|
||||
throw invokeError;
|
||||
}
|
||||
|
||||
this.config.logger.info(
|
||||
{
|
||||
hasContent: !!response.content,
|
||||
contentLength: typeof response.content === 'string' ? response.content.length : 0,
|
||||
hasToolCalls: !!response.tool_calls,
|
||||
toolCallCount: response.tool_calls?.length || 0,
|
||||
},
|
||||
'Model response received'
|
||||
);
|
||||
|
||||
// Check if model wants to call tools
|
||||
if (!response.tool_calls || response.tool_calls.length === 0) {
|
||||
// No tool calls - return final response
|
||||
let finalContent: string;
|
||||
if (typeof response.content === 'string') {
|
||||
finalContent = response.content;
|
||||
} else if (Array.isArray(response.content)) {
|
||||
finalContent = response.content
|
||||
.filter((block: any) => block.type === 'text')
|
||||
.map((block: any) => block.text || '')
|
||||
.join('');
|
||||
} else {
|
||||
finalContent = JSON.stringify(response.content);
|
||||
}
|
||||
this.config.logger.info(
|
||||
{ finalContentLength: finalContent.length, iterations },
|
||||
'Tool calling loop complete - no more tool calls'
|
||||
);
|
||||
return finalContent;
|
||||
}
|
||||
|
||||
this.config.logger.info(
|
||||
{ toolCalls: response.tool_calls.map((tc: any) => tc.name) },
|
||||
'Processing tool calls'
|
||||
);
|
||||
|
||||
// Add assistant message with tool calls to history
|
||||
messagesCopy.push(response);
|
||||
|
||||
// Execute each tool call
|
||||
for (const toolCall of response.tool_calls) {
|
||||
this.config.logger.info(
|
||||
{ tool: toolCall.name, args: toolCall.args },
|
||||
'Executing tool call'
|
||||
);
|
||||
|
||||
const tool = tools.find(t => t.name === toolCall.name);
|
||||
|
||||
if (!tool) {
|
||||
this.config.logger.warn({ tool: toolCall.name }, 'Tool not found');
|
||||
messagesCopy.push(
|
||||
new ToolMessage({
|
||||
content: `Error: Tool '${toolCall.name}' not found`,
|
||||
tool_call_id: toolCall.id,
|
||||
})
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
this.channelAdapter?.sendToolCall?.(toolCall.name, this.getToolLabel(toolCall.name));
|
||||
const result = await tool.func(toolCall.args);
|
||||
|
||||
// Process result to extract images and send them via channel adapter
|
||||
const processedResult = this.processToolResult(result, toolCall.name);
|
||||
|
||||
this.config.logger.debug(
|
||||
{
|
||||
tool: toolCall.name,
|
||||
originalResultLength: result.length,
|
||||
processedResultLength: processedResult.length,
|
||||
},
|
||||
'Tool result processed'
|
||||
);
|
||||
|
||||
messagesCopy.push(
|
||||
new ToolMessage({
|
||||
content: processedResult,
|
||||
tool_call_id: toolCall.id,
|
||||
})
|
||||
);
|
||||
|
||||
this.config.logger.info(
|
||||
{ tool: toolCall.name, resultLength: processedResult.length },
|
||||
'Tool execution completed'
|
||||
);
|
||||
} catch (error) {
|
||||
this.config.logger.error(
|
||||
{
|
||||
error,
|
||||
errorMessage: (error as Error)?.message,
|
||||
errorStack: (error as Error)?.stack,
|
||||
tool: toolCall.name,
|
||||
args: toolCall.args,
|
||||
},
|
||||
'Tool execution failed'
|
||||
);
|
||||
|
||||
messagesCopy.push(
|
||||
new ToolMessage({
|
||||
content: `Error: ${error}`,
|
||||
tool_call_id: toolCall.id,
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Max iterations reached - return what we have
|
||||
this.config.logger.warn('Max tool calling iterations reached');
|
||||
return 'I apologize, but I encountered an issue processing your request. Please try rephrasing your question.';
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming message from user
|
||||
*/
|
||||
async handleMessage(message: InboundMessage): Promise<OutboundMessage> {
|
||||
this.config.logger.info(
|
||||
{ messageId: message.messageId, userId: message.userId },
|
||||
{ messageId: message.messageId, userId: message.userId, content: message.content.substring(0, 100) },
|
||||
'Processing user message'
|
||||
);
|
||||
|
||||
try {
|
||||
// 1. Fetch context resources from user's MCP server
|
||||
this.config.logger.debug('Fetching context resources from MCP');
|
||||
const contextResources = await this.fetchContextResources();
|
||||
// 1. Build system prompt from template
|
||||
this.config.logger.debug('Building system prompt');
|
||||
const systemPrompt = await this.buildSystemPrompt();
|
||||
this.config.logger.debug({ systemPromptLength: systemPrompt.length }, 'System prompt built');
|
||||
|
||||
// 2. Build system prompt from resources
|
||||
const systemPrompt = this.buildSystemPrompt(contextResources);
|
||||
// 2. Load recent conversation history
|
||||
const channelKey = this.config.channelType ?? ChannelType.WEBSOCKET;
|
||||
const storedMessages = this.conversationStore
|
||||
? await this.conversationStore.getRecentMessages(
|
||||
this.config.userId, this.config.sessionId, this.config.historyLimit, channelKey
|
||||
)
|
||||
: [];
|
||||
const history = this.conversationStore
|
||||
? this.conversationStore.toLangChainMessages(storedMessages)
|
||||
: [];
|
||||
this.config.logger.debug({ historyLength: history.length }, 'Conversation history loaded');
|
||||
|
||||
// 3. Build messages with conversation context from MCP
|
||||
const messages = this.buildMessages(message, contextResources);
|
||||
|
||||
// 4. Route to appropriate model
|
||||
// 4. Get the configured model
|
||||
this.config.logger.debug('Routing to model');
|
||||
const model = await this.modelRouter.route(
|
||||
message.content,
|
||||
this.config.license,
|
||||
RoutingStrategy.COMPLEXITY
|
||||
RoutingStrategy.COMPLEXITY,
|
||||
this.config.userId
|
||||
);
|
||||
this.config.logger.info({ modelName: model.constructor.name }, 'Model selected');
|
||||
|
||||
// 5. Build LangChain messages
|
||||
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages);
|
||||
const langchainMessages = this.buildLangChainMessages(systemPrompt, history, message.content);
|
||||
this.config.logger.debug({ messageCount: langchainMessages.length }, 'LangChain messages built');
|
||||
|
||||
// 6. Call LLM with streaming
|
||||
this.config.logger.debug('Invoking LLM');
|
||||
const response = await model.invoke(langchainMessages);
|
||||
// 6. Get tools for main agent from registry
|
||||
const toolRegistry = getToolRegistry();
|
||||
const tools = await toolRegistry.getToolsForAgent(
|
||||
'main',
|
||||
this.mcpClient,
|
||||
this.availableMCPTools,
|
||||
this.workspaceManager // Pass session workspace manager
|
||||
);
|
||||
|
||||
// 7. Extract text response (tool handling TODO)
|
||||
const assistantMessage = response.content as string;
|
||||
// Add research subagent as a tool if available
|
||||
if (this.researchSubagent) {
|
||||
const subagentContext = {
|
||||
userContext: createUserContext({
|
||||
userId: this.config.userId,
|
||||
sessionId: this.config.sessionId,
|
||||
license: this.config.license,
|
||||
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
|
||||
channelUserId: this.config.channelUserId ?? this.config.userId,
|
||||
}),
|
||||
};
|
||||
|
||||
// TODO: Save messages to Iceberg conversation table instead of MCP
|
||||
// Should batch-insert periodically or on session end to avoid many small Parquet files
|
||||
// await icebergConversationStore.appendMessages([...]);
|
||||
tools.push(createResearchAgentTool({
|
||||
researchSubagent: this.researchSubagent,
|
||||
context: subagentContext,
|
||||
logger: this.config.logger,
|
||||
}));
|
||||
}
|
||||
|
||||
this.config.logger.info(
|
||||
{
|
||||
toolCount: tools.length,
|
||||
toolNames: tools.map(t => t.name),
|
||||
},
|
||||
'Tools loaded for main agent'
|
||||
);
|
||||
|
||||
// 7. Bind tools to model
|
||||
const modelWithTools = tools.length > 0 && model.bindTools ? model.bindTools(tools) : model;
|
||||
|
||||
if (tools.length > 0) {
|
||||
this.config.logger.info(
|
||||
{ modelType: modelWithTools.constructor.name, toolsBound: tools.length > 0 && !!model.bindTools },
|
||||
'Model bound with tools'
|
||||
);
|
||||
}
|
||||
|
||||
// 8. Call LLM with tool calling loop
|
||||
this.config.logger.info('Invoking LLM with tool support');
|
||||
const assistantMessage = await this.executeWithToolCalling(modelWithTools, langchainMessages, tools);
|
||||
|
||||
this.config.logger.info(
|
||||
{ responseLength: assistantMessage.length },
|
||||
'LLM response received'
|
||||
);
|
||||
|
||||
// Save user message and assistant response to conversation store
|
||||
if (this.conversationStore) {
|
||||
await this.conversationStore.saveMessage(
|
||||
this.config.userId, this.config.sessionId, 'user', message.content, undefined, channelKey
|
||||
);
|
||||
await this.conversationStore.saveMessage(
|
||||
this.config.userId, this.config.sessionId, 'assistant', assistantMessage, undefined, channelKey
|
||||
);
|
||||
}
|
||||
|
||||
// Mark first message as processed
|
||||
if (this.isFirstMessage) {
|
||||
@@ -129,214 +535,174 @@ export class AgentHarness {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream response from LLM
|
||||
*/
|
||||
async *streamMessage(message: InboundMessage): AsyncGenerator<string> {
|
||||
try {
|
||||
// Fetch context
|
||||
const contextResources = await this.fetchContextResources();
|
||||
const systemPrompt = this.buildSystemPrompt(contextResources);
|
||||
const messages = this.buildMessages(message, contextResources);
|
||||
|
||||
// Route to model
|
||||
const model = await this.modelRouter.route(
|
||||
message.content,
|
||||
this.config.license,
|
||||
RoutingStrategy.COMPLEXITY
|
||||
);
|
||||
|
||||
// Build messages
|
||||
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages);
|
||||
|
||||
// Stream response
|
||||
const stream = await model.stream(langchainMessages);
|
||||
|
||||
let fullResponse = '';
|
||||
for await (const chunk of stream) {
|
||||
const content = chunk.content as string;
|
||||
fullResponse += content;
|
||||
yield content;
|
||||
}
|
||||
|
||||
// TODO: Save messages to Iceberg conversation table instead of MCP
|
||||
// Should batch-insert periodically or on session end to avoid many small Parquet files
|
||||
// await icebergConversationStore.appendMessages([
|
||||
// { role: 'user', content: message.content, timestamp: message.timestamp },
|
||||
// { role: 'assistant', content: fullResponse, timestamp: new Date() }
|
||||
// ]);
|
||||
|
||||
// Mark first message as processed
|
||||
if (this.isFirstMessage) {
|
||||
this.isFirstMessage = false;
|
||||
}
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error }, 'Error streaming message');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch context resources from user's MCP server
|
||||
*/
|
||||
private async fetchContextResources(): Promise<ResourceContent[]> {
|
||||
const contextUris = [
|
||||
CONTEXT_URIS.USER_PROFILE,
|
||||
CONTEXT_URIS.CONVERSATION_SUMMARY,
|
||||
CONTEXT_URIS.WORKSPACE_STATE,
|
||||
CONTEXT_URIS.SYSTEM_PROMPT,
|
||||
];
|
||||
|
||||
const resources = await Promise.all(
|
||||
contextUris.map(async (uri) => {
|
||||
try {
|
||||
return await this.mcpClient.readResource(uri);
|
||||
} catch (error) {
|
||||
this.config.logger.warn({ error, uri }, 'Failed to fetch resource, using empty');
|
||||
return { uri, text: '' };
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
return resources;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build messages array with context from resources
|
||||
*/
|
||||
private buildMessages(
|
||||
currentMessage: InboundMessage,
|
||||
contextResources: ResourceContent[]
|
||||
): Array<{ role: string; content: string }> {
|
||||
const conversationSummary = contextResources.find(
|
||||
(r) => r.uri === CONTEXT_URIS.CONVERSATION_SUMMARY
|
||||
);
|
||||
|
||||
const messages: Array<{ role: string; content: string }> = [];
|
||||
|
||||
// Add conversation context as a system-like user message
|
||||
if (conversationSummary?.text) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: `[Previous Conversation Context]\n${conversationSummary.text}`,
|
||||
});
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: 'I understand the context from our previous conversations.',
|
||||
});
|
||||
}
|
||||
|
||||
// Add workspace delta (for subsequent turns)
|
||||
const workspaceDelta = this.buildWorkspaceDelta();
|
||||
if (workspaceDelta) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: workspaceDelta,
|
||||
});
|
||||
}
|
||||
|
||||
// Add current user message
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: currentMessage.content,
|
||||
});
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert to LangChain message format
|
||||
*/
|
||||
private buildLangChainMessages(
|
||||
systemPrompt: string,
|
||||
messages: Array<{ role: string; content: string }>
|
||||
history: BaseMessage[],
|
||||
currentUserMessage: string
|
||||
): BaseMessage[] {
|
||||
const langchainMessages: BaseMessage[] = [new SystemMessage(systemPrompt)];
|
||||
|
||||
for (const msg of messages) {
|
||||
if (msg.role === 'user') {
|
||||
langchainMessages.push(new HumanMessage(msg.content));
|
||||
} else if (msg.role === 'assistant') {
|
||||
langchainMessages.push(new AIMessage(msg.content));
|
||||
}
|
||||
}
|
||||
|
||||
return langchainMessages;
|
||||
return [
|
||||
new SystemMessage(systemPrompt),
|
||||
...history,
|
||||
new HumanMessage(currentUserMessage),
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Build system prompt from platform base + user resources
|
||||
* Build system prompt from template
|
||||
*/
|
||||
private buildSystemPrompt(contextResources: ResourceContent[]): string {
|
||||
const userProfile = contextResources.find((r) => r.uri === CONTEXT_URIS.USER_PROFILE);
|
||||
const customPrompt = contextResources.find((r) => r.uri === CONTEXT_URIS.SYSTEM_PROMPT);
|
||||
const workspaceState = contextResources.find((r) => r.uri === CONTEXT_URIS.WORKSPACE_STATE);
|
||||
|
||||
// Base platform prompt
|
||||
let prompt = `You are a helpful AI assistant for Dexorder, an AI-first trading platform.
|
||||
You help users research markets, develop indicators and strategies, and analyze trading data.
|
||||
|
||||
User license: ${this.config.license.licenseType}
|
||||
Available features: ${JSON.stringify(this.config.license.features, null, 2)}`;
|
||||
|
||||
// Add user profile context
|
||||
if (userProfile?.text) {
|
||||
prompt += `\n\n# User Profile\n${userProfile.text}`;
|
||||
}
|
||||
|
||||
// Add workspace context from MCP resource (if available)
|
||||
if (workspaceState?.text) {
|
||||
prompt += `\n\n# Current Workspace (from MCP)\n${workspaceState.text}`;
|
||||
}
|
||||
private async buildSystemPrompt(): Promise<string> {
|
||||
// Load template and populate with license info
|
||||
const template = await AgentHarness.loadSystemPromptTemplate();
|
||||
let prompt = template
|
||||
.replace('{{licenseType}}', this.config.license.licenseType)
|
||||
.replace('{{features}}', JSON.stringify(this.config.license.features, null, 2));
|
||||
|
||||
// Add full workspace state from WorkspaceManager (first message only)
|
||||
if (this.isFirstMessage && this.workspaceManager) {
|
||||
const workspaceJSON = this.workspaceManager.serializeState();
|
||||
prompt += `\n\n# Workspace State (JSON)\n\`\`\`json\n${workspaceJSON}\n\`\`\``;
|
||||
|
||||
// Record current workspace sequence for delta tracking
|
||||
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
|
||||
}
|
||||
|
||||
// Add user's custom instructions (highest priority)
|
||||
if (customPrompt?.text) {
|
||||
prompt += `\n\n# User Instructions\n${customPrompt.text}`;
|
||||
prompt += `\n\n# Current Workspace State\n\`\`\`json\n${workspaceJSON}\n\`\`\``;
|
||||
}
|
||||
|
||||
return prompt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build workspace delta message for subsequent turns.
|
||||
* Returns null if no changes since last message.
|
||||
* Map tool names to user-friendly status labels.
|
||||
*/
|
||||
private buildWorkspaceDelta(): string | null {
|
||||
if (!this.workspaceManager || this.isFirstMessage) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const changes = this.workspaceManager.getChangesSince(this.lastWorkspaceSeq);
|
||||
|
||||
if (Object.keys(changes).length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Format changes as JSON
|
||||
const deltaJSON = JSON.stringify(changes, null, 2);
|
||||
|
||||
// Update sequence marker
|
||||
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
|
||||
|
||||
return `[Workspace Changes Since Last Turn]\n\`\`\`json\n${deltaJSON}\n\`\`\``;
|
||||
private getToolLabel(toolName: string): string {
|
||||
const labels: Record<string, string> = {
|
||||
research_agent: 'Researching...',
|
||||
get_chart_data: 'Fetching chart data...',
|
||||
symbol_lookup: 'Looking up symbol...',
|
||||
};
|
||||
return labels[toolName] ?? `Running ${toolName}...`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process tool result to extract images and send via channel adapter.
|
||||
* Returns text-only version for LLM context (no base64 image data).
|
||||
*/
|
||||
private processToolResult(result: string, toolName: string): string {
|
||||
// Most tools return plain strings - only process JSON results
|
||||
if (!result || typeof result !== 'string') {
|
||||
return String(result || '');
|
||||
}
|
||||
|
||||
// Try to parse as JSON
|
||||
let parsedResult: any;
|
||||
try {
|
||||
parsedResult = JSON.parse(result);
|
||||
} catch {
|
||||
// Not JSON, return as-is
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check if result has images array (from ResearchSubagent)
|
||||
if (parsedResult && Array.isArray(parsedResult.images) && parsedResult.images.length > 0) {
|
||||
this.config.logger.info(
|
||||
{ tool: toolName, imageCount: parsedResult.images.length },
|
||||
'Extracting images from tool result'
|
||||
);
|
||||
|
||||
// Send each image via channel adapter
|
||||
for (const image of parsedResult.images) {
|
||||
if (image.data && image.mimeType) {
|
||||
if (this.channelAdapter) {
|
||||
this.config.logger.debug({ mimeType: image.mimeType }, 'Sending image to channel');
|
||||
this.channelAdapter.sendImage({
|
||||
data: image.data,
|
||||
mimeType: image.mimeType,
|
||||
caption: undefined,
|
||||
});
|
||||
} else {
|
||||
this.config.logger.warn('No channel adapter set, cannot send image');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create text-only version for LLM
|
||||
const textOnlyResult = {
|
||||
...parsedResult,
|
||||
images: undefined,
|
||||
imageCount: parsedResult.images.length,
|
||||
};
|
||||
|
||||
// Clean up undefined values
|
||||
Object.keys(textOnlyResult).forEach(key => {
|
||||
if (textOnlyResult[key] === undefined) {
|
||||
delete textOnlyResult[key];
|
||||
}
|
||||
});
|
||||
|
||||
return JSON.stringify(textOnlyResult);
|
||||
}
|
||||
|
||||
// Check for nested chart_images object
|
||||
if (parsedResult && parsedResult.chart_images && typeof parsedResult.chart_images === 'object') {
|
||||
this.config.logger.info(
|
||||
{ tool: toolName, chartCount: Object.keys(parsedResult.chart_images).length },
|
||||
'Extracting chart images from tool result'
|
||||
);
|
||||
|
||||
// Send each chart image via channel adapter
|
||||
for (const [chartId, chartData] of Object.entries(parsedResult.chart_images)) {
|
||||
const chart = chartData as any;
|
||||
if (chart.type === 'image' && chart.data) {
|
||||
if (this.channelAdapter) {
|
||||
this.config.logger.debug({ chartId }, 'Sending chart image to channel');
|
||||
this.channelAdapter.sendImage({
|
||||
data: chart.data,
|
||||
mimeType: 'image/png',
|
||||
caption: undefined,
|
||||
});
|
||||
} else {
|
||||
this.config.logger.warn('No channel adapter set, cannot send chart image');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create text-only version for LLM
|
||||
const textOnlyResult = {
|
||||
...parsedResult,
|
||||
chart_images: undefined,
|
||||
chartCount: Object.keys(parsedResult.chart_images).length,
|
||||
};
|
||||
|
||||
// Clean up undefined values
|
||||
Object.keys(textOnlyResult).forEach(key => {
|
||||
if (textOnlyResult[key] === undefined) {
|
||||
delete textOnlyResult[key];
|
||||
}
|
||||
});
|
||||
|
||||
return JSON.stringify(textOnlyResult);
|
||||
}
|
||||
|
||||
// No images found, return stringified result
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup resources
|
||||
* End the session: flush conversation to cold storage, then release resources.
|
||||
* Called by channel handlers on disconnect, session expiry, or graceful shutdown.
|
||||
*/
|
||||
async cleanup(): Promise<void> {
|
||||
this.config.logger.info('Cleaning up agent harness');
|
||||
|
||||
if (this.conversationStore) {
|
||||
const channelKey = this.config.channelType ?? ChannelType.WEBSOCKET;
|
||||
try {
|
||||
await this.conversationStore.flushToIceberg(
|
||||
this.config.userId, this.config.sessionId, this.config.historyLimit, channelKey
|
||||
);
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error }, 'Failed to flush conversation to Iceberg during cleanup');
|
||||
}
|
||||
}
|
||||
|
||||
await this.mcpClient.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,6 @@
|
||||
// Memory
|
||||
export * from './memory/index.js';
|
||||
|
||||
// Skills
|
||||
export * from './skills/index.js';
|
||||
|
||||
// Subagents
|
||||
export * from './subagents/index.js';
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ export class MCPClientConnector {
|
||||
|
||||
/**
|
||||
* List available tools from user's MCP server
|
||||
* Filters to only return tools marked as agent_accessible
|
||||
* Returns all available tools from the MCP server
|
||||
*/
|
||||
async listTools(): Promise<Array<{ name: string; description?: string; inputSchema?: any }>> {
|
||||
if (!this.client || !this.connected) {
|
||||
@@ -96,36 +96,54 @@ export class MCPClientConnector {
|
||||
}
|
||||
|
||||
try {
|
||||
this.config.logger.debug('Requesting tool list from MCP server');
|
||||
const response = await this.client.listTools();
|
||||
|
||||
// Filter tools to only include agent-accessible ones
|
||||
const tools = response.tools
|
||||
.filter((tool: any) => {
|
||||
// Check if tool has agent_accessible annotation
|
||||
const annotations = tool.annotations || {};
|
||||
return annotations.agent_accessible === true;
|
||||
})
|
||||
.map((tool: any) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
}));
|
||||
this.config.logger.debug(
|
||||
{
|
||||
hasTools: !!response.tools,
|
||||
toolCount: response.tools?.length || 0,
|
||||
},
|
||||
'Received tool list response'
|
||||
);
|
||||
|
||||
// Handle case where response.tools might be undefined
|
||||
if (!response.tools || !Array.isArray(response.tools)) {
|
||||
this.config.logger.warn('MCP server returned no tools array');
|
||||
return [];
|
||||
}
|
||||
|
||||
// Return all tools - agent-to-tool binding is handled by the tool registry
|
||||
const tools = response.tools.map((tool: any) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
}));
|
||||
|
||||
this.config.logger.debug(
|
||||
{ totalTools: response.tools.length, agentAccessibleTools: tools.length },
|
||||
'Listed MCP tools with filtering'
|
||||
{ toolCount: tools.length },
|
||||
'Listed MCP tools'
|
||||
);
|
||||
|
||||
return tools;
|
||||
} catch (error) {
|
||||
this.config.logger.error({ error }, 'Failed to list MCP tools');
|
||||
this.config.logger.error(
|
||||
{
|
||||
error,
|
||||
errorMessage: (error as Error)?.message,
|
||||
errorName: (error as Error)?.name,
|
||||
errorCode: (error as any)?.code,
|
||||
errorStack: (error as Error)?.stack,
|
||||
},
|
||||
'Failed to list MCP tools'
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List available resources from user's MCP server
|
||||
* Filters to only return resources marked as agent_accessible
|
||||
* Returns all available resources from the MCP server
|
||||
*/
|
||||
async listResources(): Promise<Array<{ uri: string; name: string; description?: string; mimeType?: string }>> {
|
||||
if (!this.client || !this.connected) {
|
||||
@@ -135,23 +153,17 @@ export class MCPClientConnector {
|
||||
try {
|
||||
const response = await this.client.listResources();
|
||||
|
||||
// Filter resources to only include agent-accessible ones
|
||||
const resources = response.resources
|
||||
.filter((resource: any) => {
|
||||
// Check if resource has agent_accessible annotation
|
||||
const annotations = resource.annotations || {};
|
||||
return annotations.agent_accessible === true;
|
||||
})
|
||||
.map((resource: any) => ({
|
||||
uri: resource.uri,
|
||||
name: resource.name,
|
||||
description: resource.description,
|
||||
mimeType: resource.mimeType,
|
||||
}));
|
||||
// Return all resources - agent-to-resource binding is handled by the tool registry
|
||||
const resources = response.resources.map((resource: any) => ({
|
||||
uri: resource.uri,
|
||||
name: resource.name,
|
||||
description: resource.description,
|
||||
mimeType: resource.mimeType,
|
||||
}));
|
||||
|
||||
this.config.logger.debug(
|
||||
{ totalResources: response.resources.length, agentAccessibleResources: resources.length },
|
||||
'Listed MCP resources with filtering'
|
||||
{ resourceCount: resources.length },
|
||||
'Listed MCP resources'
|
||||
);
|
||||
|
||||
return resources;
|
||||
|
||||
@@ -2,6 +2,7 @@ import type Redis from 'ioredis';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
|
||||
import type { IcebergClient } from '../../clients/iceberg-client.js';
|
||||
|
||||
/**
|
||||
* Message record for storage
|
||||
@@ -17,36 +18,36 @@ export interface StoredMessage {
|
||||
}
|
||||
|
||||
/**
|
||||
* Conversation store: Redis (hot) + Iceberg (cold)
|
||||
* Conversation store: Redis (hot) + Iceberg/Parquet (cold)
|
||||
*
|
||||
* Hot path: Recent messages in Redis for fast access
|
||||
* Cold path: Full history in Iceberg for durability and analytics
|
||||
* Hot path: Recent messages in Redis for fast context loading
|
||||
* Cold path: Full session flushed as a single Parquet file at session end
|
||||
*
|
||||
* Architecture:
|
||||
* - Redis stores last N messages per session with TTL
|
||||
* - Iceberg stores all messages partitioned by user_id, session_id
|
||||
* - Supports time-travel queries for debugging and analysis
|
||||
* - Parquet file written to S3 at session close (one file per session)
|
||||
* - Cold read falls back to Parquet scan when Redis TTL has expired
|
||||
*/
|
||||
export class ConversationStore {
|
||||
private readonly HOT_MESSAGE_LIMIT = 50; // Keep last 50 messages in Redis
|
||||
private readonly HOT_MESSAGE_LIMIT = 50; // Redis buffer ceiling
|
||||
private readonly HOT_TTL_SECONDS = 3600; // 1 hour
|
||||
|
||||
constructor(
|
||||
private redis: Redis,
|
||||
private logger: FastifyBaseLogger
|
||||
// TODO: Add Iceberg catalog
|
||||
// private iceberg: IcebergCatalog
|
||||
private logger: FastifyBaseLogger,
|
||||
private icebergClient?: IcebergClient
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Save a message to both Redis and Iceberg
|
||||
* Save a message to Redis hot path
|
||||
*/
|
||||
async saveMessage(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
role: 'user' | 'assistant' | 'system',
|
||||
content: string,
|
||||
metadata?: Record<string, unknown>
|
||||
metadata?: Record<string, unknown>,
|
||||
channelType?: string
|
||||
): Promise<void> {
|
||||
const message: StoredMessage = {
|
||||
id: `${userId}:${sessionId}:${Date.now()}`,
|
||||
@@ -60,20 +61,10 @@ export class ConversationStore {
|
||||
|
||||
this.logger.debug({ userId, sessionId, role }, 'Saving message');
|
||||
|
||||
// Hot: Add to Redis list (LPUSH for newest first)
|
||||
const key = this.getRedisKey(userId, sessionId);
|
||||
const key = this.getRedisKey(userId, sessionId, channelType);
|
||||
await this.redis.lpush(key, JSON.stringify(message));
|
||||
|
||||
// Trim to keep only recent messages
|
||||
await this.redis.ltrim(key, 0, this.HOT_MESSAGE_LIMIT - 1);
|
||||
|
||||
// Set TTL
|
||||
await this.redis.expire(key, this.HOT_TTL_SECONDS);
|
||||
|
||||
// Cold: Async append to Iceberg
|
||||
this.appendToIceberg(message).catch((error) => {
|
||||
this.logger.error({ error, userId, sessionId }, 'Failed to append message to Iceberg');
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -82,9 +73,10 @@ export class ConversationStore {
|
||||
async getRecentMessages(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
limit: number = 20
|
||||
limit: number,
|
||||
channelType?: string
|
||||
): Promise<StoredMessage[]> {
|
||||
const key = this.getRedisKey(userId, sessionId);
|
||||
const key = this.getRedisKey(userId, sessionId, channelType);
|
||||
const messages = await this.redis.lrange(key, 0, limit - 1);
|
||||
|
||||
return messages
|
||||
@@ -101,37 +93,70 @@ export class ConversationStore {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get full conversation history from Iceberg (cold path)
|
||||
* Get full conversation history — Redis first, falls back to Iceberg cold path
|
||||
*/
|
||||
async getFullHistory(
|
||||
userId: string,
|
||||
sessionId: string,
|
||||
limit: number,
|
||||
channelType?: string,
|
||||
timeRange?: { start: number; end: number }
|
||||
): Promise<StoredMessage[]> {
|
||||
this.logger.debug({ userId, sessionId, timeRange }, 'Loading full history from Iceberg');
|
||||
this.logger.debug({ userId, sessionId }, 'Loading full history');
|
||||
|
||||
// TODO: Implement Iceberg query
|
||||
// const table = this.iceberg.loadTable('gateway.conversations');
|
||||
// const filters = [
|
||||
// EqualTo('user_id', userId),
|
||||
// EqualTo('session_id', sessionId),
|
||||
// ];
|
||||
//
|
||||
// if (timeRange) {
|
||||
// filters.push(GreaterThanOrEqual('timestamp', timeRange.start));
|
||||
// filters.push(LessThanOrEqual('timestamp', timeRange.end));
|
||||
// }
|
||||
//
|
||||
// const df = await table.scan({
|
||||
// row_filter: And(...filters)
|
||||
// }).to_pandas();
|
||||
//
|
||||
// if (!df.empty) {
|
||||
// return df.sort_values('timestamp').to_dict('records');
|
||||
// }
|
||||
// Try Redis hot path first
|
||||
const hot = await this.getRecentMessages(userId, sessionId, limit, channelType);
|
||||
if (hot.length > 0) {
|
||||
return hot;
|
||||
}
|
||||
|
||||
// Fallback to Redis if Iceberg not available
|
||||
return await this.getRecentMessages(userId, sessionId, 1000);
|
||||
// Fall back to Iceberg cold path (post-TTL recovery)
|
||||
if (this.icebergClient) {
|
||||
this.logger.debug({ userId, sessionId }, 'Redis miss, querying Iceberg cold path');
|
||||
const coldMessages = await this.icebergClient.queryMessages(userId, sessionId, {
|
||||
startTime: timeRange?.start,
|
||||
endTime: timeRange?.end,
|
||||
limit,
|
||||
});
|
||||
return coldMessages.map((m) => ({
|
||||
id: m.id,
|
||||
userId: m.user_id,
|
||||
sessionId: m.session_id,
|
||||
role: m.role as StoredMessage['role'],
|
||||
content: m.content,
|
||||
timestamp: m.timestamp,
|
||||
}));
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush the full session from Redis to Iceberg as a single Parquet file.
|
||||
* Called once at session end — prevents small-file fragmentation.
|
||||
*/
|
||||
async flushToIceberg(userId: string, sessionId: string, limit: number, channelType?: string): Promise<void> {
|
||||
if (!this.icebergClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const messages = await this.getRecentMessages(userId, sessionId, limit, channelType);
|
||||
if (messages.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const icebergMessages = messages.map((m) => ({
|
||||
id: m.id,
|
||||
user_id: m.userId,
|
||||
session_id: m.sessionId,
|
||||
role: m.role,
|
||||
content: m.content,
|
||||
metadata: JSON.stringify(m.metadata || {}),
|
||||
timestamp: m.timestamp,
|
||||
}));
|
||||
|
||||
await this.icebergClient.appendMessages(userId, sessionId, icebergMessages);
|
||||
this.logger.info({ userId, sessionId, count: icebergMessages.length }, 'Conversation flushed to Iceberg');
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -155,9 +180,9 @@ export class ConversationStore {
|
||||
/**
|
||||
* Delete all messages for a session (Redis only, Iceberg handled separately)
|
||||
*/
|
||||
async deleteSession(userId: string, sessionId: string): Promise<void> {
|
||||
async deleteSession(userId: string, sessionId: string, channelType?: string): Promise<void> {
|
||||
this.logger.info({ userId, sessionId }, 'Deleting session from Redis');
|
||||
const key = this.getRedisKey(userId, sessionId);
|
||||
const key = this.getRedisKey(userId, sessionId, channelType);
|
||||
await this.redis.del(key);
|
||||
}
|
||||
|
||||
@@ -167,62 +192,22 @@ export class ConversationStore {
|
||||
async deleteUserData(userId: string): Promise<void> {
|
||||
this.logger.info({ userId }, 'Deleting all user messages for GDPR compliance');
|
||||
|
||||
// Delete from Redis
|
||||
const pattern = `conv:${userId}:*`;
|
||||
const keys = await this.redis.keys(pattern);
|
||||
if (keys.length > 0) {
|
||||
await this.redis.del(...keys);
|
||||
}
|
||||
|
||||
// Delete from Iceberg
|
||||
// Note: For GDPR compliance, need to:
|
||||
// 1. Send delete command via Kafka OR
|
||||
// 2. Use Iceberg REST API to delete rows (if supported) OR
|
||||
// 3. Coordinate with Flink job to handle deletes
|
||||
//
|
||||
// Iceberg delete flow:
|
||||
// - Mark rows for deletion (equality delete files)
|
||||
// - Run compaction to physically remove
|
||||
// - Expire old snapshots
|
||||
|
||||
this.logger.info({ userId }, 'User messages deleted from Redis - Iceberg GDPR delete not yet implemented');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Redis key for conversation
|
||||
* Get Redis key for conversation, namespaced by channel type
|
||||
*/
|
||||
private getRedisKey(userId: string, sessionId: string): string {
|
||||
return `conv:${userId}:${sessionId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Append message to Iceberg for durable storage
|
||||
*
|
||||
* Note: For production, send to Kafka topic that Flink consumes:
|
||||
* - Topic: gateway_conversations
|
||||
* - Flink job writes to gateway.conversations Iceberg table
|
||||
* - Ensures consistent write pattern with rest of system
|
||||
*/
|
||||
private async appendToIceberg(message: StoredMessage): Promise<void> {
|
||||
// TODO: Send to Kafka topic for Flink processing
|
||||
// const kafkaMessage = {
|
||||
// id: message.id,
|
||||
// user_id: message.userId,
|
||||
// session_id: message.sessionId,
|
||||
// role: message.role,
|
||||
// content: message.content,
|
||||
// metadata: JSON.stringify(message.metadata || {}),
|
||||
// timestamp: message.timestamp,
|
||||
// };
|
||||
// await this.kafkaProducer.send({
|
||||
// topic: 'gateway_conversations',
|
||||
// messages: [{ value: JSON.stringify(kafkaMessage) }]
|
||||
// });
|
||||
|
||||
this.logger.debug(
|
||||
{ messageId: message.id, userId: message.userId, sessionId: message.sessionId },
|
||||
'Message append to Iceberg (via Kafka) not yet implemented'
|
||||
);
|
||||
private getRedisKey(userId: string, sessionId: string, channelType?: string): string {
|
||||
return channelType
|
||||
? `conv:${channelType}:${userId}:${sessionId}`
|
||||
: `conv:${userId}:${sessionId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -241,7 +226,7 @@ export class ConversationStore {
|
||||
}
|
||||
|
||||
const messages = await this.getRecentMessages(userId, sessionId, count);
|
||||
const timestamps = messages.map((m) => m.timestamp / 1000); // Convert to milliseconds
|
||||
const timestamps = messages.map((m) => m.timestamp / 1000);
|
||||
|
||||
return {
|
||||
messageCount: count,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { UserLicense, ChannelType } from '../../types/user.js';
|
||||
import type { License, ChannelType } from '../../types/user.js';
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
|
||||
/**
|
||||
@@ -62,7 +62,7 @@ export interface UserContext {
|
||||
// Identity
|
||||
userId: string;
|
||||
sessionId: string;
|
||||
license: UserLicense;
|
||||
license: License;
|
||||
|
||||
// Channel context (for multi-channel routing)
|
||||
activeChannel: ActiveChannel;
|
||||
@@ -146,7 +146,7 @@ export function getDefaultCapabilities(channelType: ChannelType): ChannelCapabil
|
||||
export function createUserContext(params: {
|
||||
userId: string;
|
||||
sessionId: string;
|
||||
license: UserLicense;
|
||||
license: License;
|
||||
channelType: ChannelType;
|
||||
channelUserId: string;
|
||||
channelCapabilities?: Partial<ChannelCapabilities>;
|
||||
|
||||
99
gateway/src/harness/prompts/system-prompt.md
Normal file
99
gateway/src/harness/prompts/system-prompt.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# Dexorder AI Assistant System Prompt
|
||||
|
||||
You are a helpful AI assistant for Dexorder, an AI-first trading platform.
|
||||
You help users research markets, develop indicators and strategies, and analyze trading data.
|
||||
|
||||
**User License:** {{licenseType}}
|
||||
|
||||
**Available Features:**
|
||||
{{features}}
|
||||
|
||||
---
|
||||
|
||||
# Important Instructions
|
||||
|
||||
## Task Delegation
|
||||
- For ANY research questions, deep analysis, statistical analysis, charting requests, plotting, ML tasks, or market data queries that require computation, you MUST use the 'research' tool
|
||||
- The research tool creates and runs Python scripts that generate charts and perform analysis
|
||||
- Use 'research' for anything involving: plotting, statistics, calculations, correlations, patterns, volume analysis, technical indicators, or any non-trivial data processing
|
||||
- NEVER write Python code directly in your responses to the user
|
||||
- NEVER show code to the user - delegate to the research tool instead
|
||||
- NEVER attempt to do analysis yourself - let the research subagent handle it
|
||||
|
||||
## Available Tools
|
||||
You have access to the following tools:
|
||||
|
||||
### research
|
||||
**This is your PRIMARY tool for any analysis, computation, charting, or plotting tasks.**
|
||||
|
||||
Creates and runs Python research scripts via a specialized research subagent.
|
||||
The subagent autonomously writes code, executes it, handles errors, and generates charts.
|
||||
|
||||
**ALWAYS use research for:**
|
||||
- Any plotting, charting, or visualization requests
|
||||
- Price action analysis and correlations
|
||||
- Technical indicators and overlays
|
||||
- Statistical analysis of market data
|
||||
- Volume analysis and patterns
|
||||
- Machine learning or predictive modeling
|
||||
- Any data-intensive computations
|
||||
- Multi-symbol comparisons
|
||||
- Custom calculations or transformations
|
||||
- Deep analysis requiring Python libraries (pandas, numpy, scipy, matplotlib, etc.)
|
||||
|
||||
**NEVER attempt to do analysis yourself in the chat.**
|
||||
Let the research subagent write and execute the Python code.
|
||||
|
||||
**Examples of when to use research:**
|
||||
- "Plot BTC with volume overlay" → use research
|
||||
- "Calculate correlation between ETH and BTC" → use research
|
||||
- "Show me RSI divergences" → use research
|
||||
- "Analyze Monday price patterns" → use research
|
||||
- "Does volume predict price movement?" → use research
|
||||
|
||||
Parameters:
|
||||
- instruction: Natural language description of the analysis to perform (be specific!)
|
||||
- name: A unique name for the research script (e.g., "BTC Weekly Analysis")
|
||||
|
||||
Example usage:
|
||||
- User: "Does Friday price action correlate with Monday?"
|
||||
- You: Call research tool with instruction="Analyze correlation between Friday and Monday price action during NY trading hours (9:30-4:00 ET)", name="Friday-Monday Correlation"
|
||||
|
||||
### symbol-lookup
|
||||
Look up trading symbols and get metadata.
|
||||
Use this when users mention tickers or need symbol information.
|
||||
|
||||
### get-chart-data
|
||||
**IMPORTANT: This is for QUICK, CASUAL information ONLY. This tool just returns raw data - it does NOT create charts or plots.**
|
||||
|
||||
Use ONLY when the user wants to:
|
||||
- Quickly glance at recent price data
|
||||
- Get a rough sense of current market conditions
|
||||
- Check basic OHLC values
|
||||
- Retrieve raw data without any processing
|
||||
|
||||
**DO NOT use get-chart-data for:**
|
||||
- Plotting, charting, or any visualization
|
||||
- Statistical analysis or correlations
|
||||
- Calculations or data transformations
|
||||
- Multi-symbol comparisons
|
||||
- Volume analysis or patterns
|
||||
- Any non-trivial computation
|
||||
- Technical indicators or overlays
|
||||
|
||||
**For anything beyond casual data retrieval, use the 'research' tool instead.**
|
||||
The research tool can create proper analysis with charts, statistics, and computations.
|
||||
|
||||
**Time Parameters:** Both from_time and to_time accept:
|
||||
- Unix timestamps as numbers (e.g., 1774126800)
|
||||
- Unix timestamps as strings (e.g., "1774126800")
|
||||
- Date strings (e.g., "2 days ago", "2024-01-01", "yesterday")
|
||||
|
||||
## Workspace Tools (MCP)
|
||||
You also have access to workspace persistence tools via MCP:
|
||||
|
||||
- **workspace_read(store_name)**: Read a workspace store (returns JSON object)
|
||||
- **workspace_write(store_name, data)**: Write/overwrite a workspace store
|
||||
- **workspace_patch(store_name, patch)**: Apply JSON patch to a workspace store
|
||||
|
||||
These are useful for persisting user preferences, analysis results, and custom data across sessions.
|
||||
@@ -1,146 +0,0 @@
|
||||
# Skills
|
||||
|
||||
Skills are individual capabilities that the agent can use to accomplish tasks. Each skill is a self-contained unit with:
|
||||
|
||||
- A markdown definition file (`*.skill.md`)
|
||||
- A TypeScript implementation extending `BaseSkill`
|
||||
- Clear input/output contracts
|
||||
- Parameter validation
|
||||
- Error handling
|
||||
|
||||
## Skill Structure
|
||||
|
||||
```
|
||||
skills/
|
||||
├── base-skill.ts # Base class
|
||||
├── {skill-name}.skill.md # Definition
|
||||
├── {skill-name}.ts # Implementation
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
## Creating a New Skill
|
||||
|
||||
### 1. Create the Definition File
|
||||
|
||||
Create `{skill-name}.skill.md`:
|
||||
|
||||
```markdown
|
||||
# My Skill
|
||||
|
||||
**Version:** 1.0.0
|
||||
**Author:** Your Name
|
||||
**Tags:** category1, category2
|
||||
|
||||
## Description
|
||||
What does this skill do?
|
||||
|
||||
## Inputs
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| param1 | string | Yes | What it does |
|
||||
|
||||
## Outputs
|
||||
What does it return?
|
||||
|
||||
## Example Usage
|
||||
Show code example
|
||||
```
|
||||
|
||||
### 2. Create the Implementation
|
||||
|
||||
Create `{skill-name}.ts`:
|
||||
|
||||
```typescript
|
||||
import { BaseSkill, SkillInput, SkillResult, SkillMetadata } from './base-skill.js';
|
||||
|
||||
export class MySkill extends BaseSkill {
|
||||
getMetadata(): SkillMetadata {
|
||||
return {
|
||||
name: 'my-skill',
|
||||
description: 'What it does',
|
||||
version: '1.0.0',
|
||||
};
|
||||
}
|
||||
|
||||
getParametersSchema(): Record<string, unknown> {
|
||||
return {
|
||||
type: 'object',
|
||||
required: ['param1'],
|
||||
properties: {
|
||||
param1: { type: 'string' },
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
validateInput(parameters: Record<string, unknown>): boolean {
|
||||
return typeof parameters.param1 === 'string';
|
||||
}
|
||||
|
||||
async execute(input: SkillInput): Promise<SkillResult> {
|
||||
this.logStart(input);
|
||||
|
||||
try {
|
||||
// Your implementation here
|
||||
const result = this.success({ data: 'result' });
|
||||
this.logEnd(result);
|
||||
return result;
|
||||
} catch (error) {
|
||||
return this.error(error as Error);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Register the Skill
|
||||
|
||||
Add to `index.ts`:
|
||||
|
||||
```typescript
|
||||
export { MySkill } from './my-skill.js';
|
||||
```
|
||||
|
||||
## Using Skills in Workflows
|
||||
|
||||
Skills can be used in LangGraph workflows:
|
||||
|
||||
```typescript
|
||||
import { MarketAnalysisSkill } from '../skills/market-analysis.js';
|
||||
|
||||
const analyzeNode = async (state) => {
|
||||
const skill = new MarketAnalysisSkill(logger, model);
|
||||
const result = await skill.execute({
|
||||
context: state.userContext,
|
||||
parameters: {
|
||||
ticker: state.ticker,
|
||||
period: '4h',
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
analysis: result.data,
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Single Responsibility**: Each skill should do one thing well
|
||||
2. **Validation**: Always validate inputs thoroughly
|
||||
3. **Error Handling**: Use try/catch and return meaningful errors
|
||||
4. **Logging**: Use `logStart()` and `logEnd()` helpers
|
||||
5. **Documentation**: Keep the `.skill.md` file up to date
|
||||
6. **Testing**: Write unit tests for skill logic
|
||||
7. **Idempotency**: Skills should be safe to retry
|
||||
|
||||
## Available Skills
|
||||
|
||||
- **market-analysis**: Analyze market conditions and trends
|
||||
- *(Add more as you build them)*
|
||||
|
||||
## Skill Categories
|
||||
|
||||
- **Market Data**: Query and analyze market information
|
||||
- **Trading**: Execute trades, manage positions
|
||||
- **Analysis**: Technical and fundamental analysis
|
||||
- **Risk**: Risk assessment and management
|
||||
- **Utilities**: Helper functions and utilities
|
||||
@@ -1,128 +0,0 @@
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { UserContext } from '../memory/session-context.js';
|
||||
|
||||
/**
|
||||
* Skill metadata
|
||||
*/
|
||||
export interface SkillMetadata {
|
||||
name: string;
|
||||
description: string;
|
||||
version: string;
|
||||
author?: string;
|
||||
tags?: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Skill input parameters
|
||||
*/
|
||||
export interface SkillInput {
|
||||
context: UserContext;
|
||||
parameters: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Skill execution result
|
||||
*/
|
||||
export interface SkillResult {
|
||||
success: boolean;
|
||||
data?: unknown;
|
||||
error?: string;
|
||||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Base skill interface
|
||||
*
|
||||
* Skills are individual capabilities that the agent can use.
|
||||
* Each skill is defined by:
|
||||
* - A markdown file (*.skill.md) describing purpose, inputs, outputs
|
||||
* - A TypeScript implementation extending BaseSkill
|
||||
*
|
||||
* Skills can use:
|
||||
* - LLM calls for reasoning
|
||||
* - User's MCP server tools
|
||||
* - Platform tools (market data, charts, etc.)
|
||||
*/
|
||||
export abstract class BaseSkill {
|
||||
protected logger: FastifyBaseLogger;
|
||||
protected model?: BaseChatModel;
|
||||
|
||||
constructor(logger: FastifyBaseLogger, model?: BaseChatModel) {
|
||||
this.logger = logger;
|
||||
this.model = model;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get skill metadata
|
||||
*/
|
||||
abstract getMetadata(): SkillMetadata;
|
||||
|
||||
/**
|
||||
* Validate input parameters
|
||||
*/
|
||||
abstract validateInput(parameters: Record<string, unknown>): boolean;
|
||||
|
||||
/**
|
||||
* Execute the skill
|
||||
*/
|
||||
abstract execute(input: SkillInput): Promise<SkillResult>;
|
||||
|
||||
/**
|
||||
* Get required parameters schema (JSON Schema format)
|
||||
*/
|
||||
abstract getParametersSchema(): Record<string, unknown>;
|
||||
|
||||
/**
|
||||
* Helper: Log skill execution start
|
||||
*/
|
||||
protected logStart(input: SkillInput): void {
|
||||
const metadata = this.getMetadata();
|
||||
this.logger.info(
|
||||
{
|
||||
skill: metadata.name,
|
||||
userId: input.context.userId,
|
||||
sessionId: input.context.sessionId,
|
||||
parameters: input.parameters,
|
||||
},
|
||||
'Starting skill execution'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: Log skill execution end
|
||||
*/
|
||||
protected logEnd(result: SkillResult): void {
|
||||
const metadata = this.getMetadata();
|
||||
this.logger.info(
|
||||
{
|
||||
skill: metadata.name,
|
||||
success: result.success,
|
||||
error: result.error,
|
||||
},
|
||||
'Skill execution completed'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: Create success result
|
||||
*/
|
||||
protected success(data: unknown, metadata?: Record<string, unknown>): SkillResult {
|
||||
return {
|
||||
success: true,
|
||||
data,
|
||||
metadata,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: Create error result
|
||||
*/
|
||||
protected error(error: string | Error, metadata?: Record<string, unknown>): SkillResult {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : error,
|
||||
metadata,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
// Skills exports
|
||||
|
||||
export {
|
||||
BaseSkill,
|
||||
type SkillMetadata,
|
||||
type SkillInput,
|
||||
type SkillResult,
|
||||
} from './base-skill.js';
|
||||
|
||||
export { MarketAnalysisSkill } from './market-analysis.js';
|
||||
@@ -1,78 +0,0 @@
|
||||
# Market Analysis Skill
|
||||
|
||||
**Version:** 1.0.0
|
||||
**Author:** Dexorder AI Platform
|
||||
**Tags:** market-data, analysis, trading
|
||||
|
||||
## Description
|
||||
|
||||
Analyzes market conditions for a given ticker and timeframe. Provides insights on:
|
||||
- Price trends and patterns
|
||||
- Volume analysis
|
||||
- Support and resistance levels
|
||||
- Market sentiment indicators
|
||||
|
||||
## Inputs
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `ticker` | string | Yes | Market identifier (e.g., "BINANCE:BTC/USDT") |
|
||||
| `period` | string | Yes | Analysis period ("1h", "4h", "1d", "1w") |
|
||||
| `startTime` | number | No | Start timestamp (microseconds), defaults to 7 days ago |
|
||||
| `endTime` | number | No | End timestamp (microseconds), defaults to now |
|
||||
| `indicators` | string[] | No | Additional indicators to include (e.g., ["RSI", "MACD"]) |
|
||||
|
||||
## Outputs
|
||||
|
||||
```typescript
|
||||
{
|
||||
success: true,
|
||||
data: {
|
||||
ticker: string,
|
||||
period: string,
|
||||
timeRange: { start: number, end: number },
|
||||
trend: "bullish" | "bearish" | "neutral",
|
||||
priceChange: number,
|
||||
volumeProfile: {
|
||||
average: number,
|
||||
recent: number,
|
||||
trend: "increasing" | "decreasing" | "stable"
|
||||
},
|
||||
supportLevels: number[],
|
||||
resistanceLevels: number[],
|
||||
indicators: Record<string, unknown>,
|
||||
analysis: string // LLM-generated natural language analysis
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Example Usage
|
||||
|
||||
```typescript
|
||||
const skill = new MarketAnalysisSkill(logger, model);
|
||||
|
||||
const result = await skill.execute({
|
||||
context: userContext,
|
||||
parameters: {
|
||||
ticker: "BINANCE:BTC/USDT",
|
||||
period: "4h",
|
||||
indicators: ["RSI", "MACD"]
|
||||
}
|
||||
});
|
||||
|
||||
console.log(result.data.analysis);
|
||||
// "Bitcoin is showing bullish momentum with RSI at 65 and MACD crossing above signal line..."
|
||||
```
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
- Queries OHLC data from Iceberg warehouse
|
||||
- Uses LLM for natural language analysis
|
||||
- Caches results for 5 minutes to reduce computation
|
||||
- Falls back to reduced analysis if Iceberg unavailable
|
||||
|
||||
## Dependencies
|
||||
|
||||
- Iceberg client (market data)
|
||||
- LLM model (analysis generation)
|
||||
- User's MCP server (optional custom indicators)
|
||||
@@ -1,198 +0,0 @@
|
||||
import { BaseSkill, type SkillInput, type SkillResult, type SkillMetadata } from './base-skill.js';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||
|
||||
/**
|
||||
* Market analysis skill implementation
|
||||
*
|
||||
* See market-analysis.skill.md for full documentation
|
||||
*/
|
||||
export class MarketAnalysisSkill extends BaseSkill {
|
||||
constructor(logger: FastifyBaseLogger, model?: BaseChatModel) {
|
||||
super(logger, model);
|
||||
}
|
||||
|
||||
getMetadata(): SkillMetadata {
|
||||
return {
|
||||
name: 'market-analysis',
|
||||
description: 'Analyze market conditions for a given ticker and timeframe',
|
||||
version: '1.0.0',
|
||||
author: 'Dexorder AI Platform',
|
||||
tags: ['market-data', 'analysis', 'trading'],
|
||||
};
|
||||
}
|
||||
|
||||
getParametersSchema(): Record<string, unknown> {
|
||||
return {
|
||||
type: 'object',
|
||||
required: ['ticker', 'period'],
|
||||
properties: {
|
||||
ticker: {
|
||||
type: 'string',
|
||||
description: 'Market identifier (e.g., "BINANCE:BTC/USDT")',
|
||||
},
|
||||
period: {
|
||||
type: 'string',
|
||||
enum: ['1h', '4h', '1d', '1w'],
|
||||
description: 'Analysis period',
|
||||
},
|
||||
startTime: {
|
||||
type: 'number',
|
||||
description: 'Start timestamp in microseconds',
|
||||
},
|
||||
endTime: {
|
||||
type: 'number',
|
||||
description: 'End timestamp in microseconds',
|
||||
},
|
||||
indicators: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
description: 'Additional indicators to include',
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
validateInput(parameters: Record<string, unknown>): boolean {
|
||||
if (!parameters.ticker || typeof parameters.ticker !== 'string') {
|
||||
return false;
|
||||
}
|
||||
if (!parameters.period || typeof parameters.period !== 'string') {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
async execute(input: SkillInput): Promise<SkillResult> {
|
||||
this.logStart(input);
|
||||
|
||||
if (!this.validateInput(input.parameters)) {
|
||||
return this.error('Invalid parameters: ticker and period are required');
|
||||
}
|
||||
|
||||
try {
|
||||
const ticker = input.parameters.ticker as string;
|
||||
const period = input.parameters.period as string;
|
||||
const indicators = (input.parameters.indicators as string[]) || [];
|
||||
|
||||
// 1. Fetch OHLC data from Iceberg
|
||||
// TODO: Implement Iceberg query
|
||||
// const ohlcData = await this.fetchOHLCData(ticker, period, startTime, endTime);
|
||||
const ohlcData = this.getMockOHLCData(); // Placeholder
|
||||
|
||||
// 2. Calculate technical indicators
|
||||
const analysis = this.calculateAnalysis(ohlcData, indicators);
|
||||
|
||||
// 3. Generate natural language analysis using LLM
|
||||
let narrativeAnalysis = '';
|
||||
if (this.model) {
|
||||
narrativeAnalysis = await this.generateNarrativeAnalysis(
|
||||
ticker,
|
||||
period,
|
||||
analysis
|
||||
);
|
||||
}
|
||||
|
||||
const result = this.success({
|
||||
ticker,
|
||||
period,
|
||||
timeRange: {
|
||||
start: ohlcData.startTime,
|
||||
end: ohlcData.endTime,
|
||||
},
|
||||
trend: analysis.trend,
|
||||
priceChange: analysis.priceChange,
|
||||
volumeProfile: analysis.volumeProfile,
|
||||
supportLevels: analysis.supportLevels,
|
||||
resistanceLevels: analysis.resistanceLevels,
|
||||
indicators: analysis.indicators,
|
||||
analysis: narrativeAnalysis,
|
||||
});
|
||||
|
||||
this.logEnd(result);
|
||||
return result;
|
||||
} catch (error) {
|
||||
const result = this.error(error as Error);
|
||||
this.logEnd(result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate technical analysis from OHLC data
|
||||
*/
|
||||
private calculateAnalysis(
|
||||
ohlcData: any,
|
||||
_requestedIndicators: string[]
|
||||
): any {
|
||||
// TODO: Implement proper technical analysis
|
||||
// This is a simplified placeholder
|
||||
|
||||
const priceChange = ((ohlcData.close - ohlcData.open) / ohlcData.open) * 100;
|
||||
const trend = priceChange > 1 ? 'bullish' : priceChange < -1 ? 'bearish' : 'neutral';
|
||||
|
||||
return {
|
||||
trend,
|
||||
priceChange,
|
||||
volumeProfile: {
|
||||
average: ohlcData.avgVolume,
|
||||
recent: ohlcData.currentVolume,
|
||||
trend: ohlcData.currentVolume > ohlcData.avgVolume ? 'increasing' : 'decreasing',
|
||||
},
|
||||
supportLevels: [ohlcData.low * 0.98, ohlcData.low * 0.95],
|
||||
resistanceLevels: [ohlcData.high * 1.02, ohlcData.high * 1.05],
|
||||
indicators: {},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate natural language analysis using LLM
|
||||
*/
|
||||
private async generateNarrativeAnalysis(
|
||||
ticker: string,
|
||||
period: string,
|
||||
analysis: any
|
||||
): Promise<string> {
|
||||
if (!this.model) {
|
||||
return 'LLM not available for narrative analysis';
|
||||
}
|
||||
|
||||
const systemPrompt = `You are a professional market analyst.
|
||||
Provide concise, actionable market analysis based on technical data.
|
||||
Focus on key insights and avoid jargon.`;
|
||||
|
||||
const userPrompt = `Analyze the following market data for ${ticker} (${period}):
|
||||
|
||||
Trend: ${analysis.trend}
|
||||
Price Change: ${analysis.priceChange.toFixed(2)}%
|
||||
Volume: ${analysis.volumeProfile.trend}
|
||||
Support Levels: ${analysis.supportLevels.join(', ')}
|
||||
Resistance Levels: ${analysis.resistanceLevels.join(', ')}
|
||||
|
||||
Provide a 2-3 sentence analysis suitable for a trading decision.`;
|
||||
|
||||
const response = await this.model.invoke([
|
||||
new SystemMessage(systemPrompt),
|
||||
new HumanMessage(userPrompt),
|
||||
]);
|
||||
|
||||
return response.content as string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock OHLC data (placeholder until Iceberg integration)
|
||||
*/
|
||||
private getMockOHLCData(): any {
|
||||
return {
|
||||
startTime: Date.now() - 7 * 24 * 60 * 60 * 1000,
|
||||
endTime: Date.now(),
|
||||
open: 50000,
|
||||
high: 52000,
|
||||
low: 49000,
|
||||
close: 51500,
|
||||
avgVolume: 1000000,
|
||||
currentVolume: 1200000,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,8 @@ import type { BaseMessage } from '@langchain/core/messages';
|
||||
import { SystemMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { UserContext } from '../memory/session-context.js';
|
||||
import type { MCPClientConnector } from '../mcp-client.js';
|
||||
import type { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { readFile } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
|
||||
@@ -17,6 +19,10 @@ export interface SubagentConfig {
|
||||
memoryFiles: string[]; // Memory files to load from memory/ directory
|
||||
capabilities: string[];
|
||||
systemPromptFile?: string; // Path to system-prompt.md
|
||||
tools?: {
|
||||
platform?: string[]; // Platform tool names
|
||||
mcp?: string[]; // MCP tool patterns/names
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -52,15 +58,21 @@ export abstract class BaseSubagent {
|
||||
protected config: SubagentConfig;
|
||||
protected systemPrompt?: string;
|
||||
protected memoryContext: string[] = [];
|
||||
protected mcpClient?: MCPClientConnector;
|
||||
protected tools: DynamicStructuredTool[] = [];
|
||||
|
||||
constructor(
|
||||
config: SubagentConfig,
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger
|
||||
logger: FastifyBaseLogger,
|
||||
mcpClient?: MCPClientConnector,
|
||||
tools?: DynamicStructuredTool[]
|
||||
) {
|
||||
this.config = config;
|
||||
this.model = model;
|
||||
this.logger = logger;
|
||||
this.mcpClient = mcpClient;
|
||||
this.tools = tools || [];
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -176,4 +188,56 @@ export abstract class BaseSubagent {
|
||||
hasCapability(capability: string): boolean {
|
||||
return this.config.capabilities.includes(capability);
|
||||
}
|
||||
|
||||
/**
|
||||
* Call a tool on the user's MCP server
|
||||
*
|
||||
* @param name Tool name
|
||||
* @param args Tool arguments
|
||||
* @returns Tool result
|
||||
* @throws Error if MCP client not available or tool call fails
|
||||
*/
|
||||
protected async callMCPTool(name: string, args: Record<string, unknown>): Promise<unknown> {
|
||||
if (!this.mcpClient) {
|
||||
throw new Error('MCP client not available for this subagent');
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.debug({ tool: name, args }, 'Calling MCP tool from subagent');
|
||||
const result = await this.mcpClient.callTool(name, args);
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.logger.error({ error, tool: name }, 'MCP tool call failed');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if MCP client is available
|
||||
*/
|
||||
protected hasMCPClient(): boolean {
|
||||
return this.mcpClient !== undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tools available to this subagent
|
||||
*/
|
||||
getTools(): DynamicStructuredTool[] {
|
||||
return this.tools;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set tools for this subagent (used during initialization)
|
||||
*/
|
||||
setTools(tools: DynamicStructuredTool[]): void {
|
||||
this.tools = tools;
|
||||
this.logger.debug(
|
||||
{
|
||||
subagent: this.config.name,
|
||||
toolCount: tools.length,
|
||||
toolNames: tools.map(t => t.name),
|
||||
},
|
||||
'Tools set for subagent'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,8 +19,8 @@ import type { FastifyBaseLogger } from 'fastify';
|
||||
* - best-practices.md: Industry standards
|
||||
*/
|
||||
export class CodeReviewerSubagent extends BaseSubagent {
|
||||
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger) {
|
||||
super(config, model, logger);
|
||||
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger, mcpClient?: any, tools?: any[]) {
|
||||
super(config, model, logger, mcpClient, tools);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -72,7 +72,9 @@ export class CodeReviewerSubagent extends BaseSubagent {
|
||||
export async function createCodeReviewerSubagent(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
basePath: string
|
||||
basePath: string,
|
||||
mcpClient?: any,
|
||||
tools?: any[]
|
||||
): Promise<CodeReviewerSubagent> {
|
||||
const { readFile } = await import('fs/promises');
|
||||
const { join } = await import('path');
|
||||
@@ -84,7 +86,7 @@ export async function createCodeReviewerSubagent(
|
||||
const config = yaml.load(configContent) as SubagentConfig;
|
||||
|
||||
// Create and initialize subagent
|
||||
const subagent = new CodeReviewerSubagent(config, model, logger);
|
||||
const subagent = new CodeReviewerSubagent(config, model, logger, mcpClient, tools);
|
||||
await subagent.initialize(basePath);
|
||||
|
||||
return subagent;
|
||||
|
||||
@@ -10,3 +10,9 @@ export {
|
||||
CodeReviewerSubagent,
|
||||
createCodeReviewerSubagent,
|
||||
} from './code-reviewer/index.js';
|
||||
|
||||
export {
|
||||
ResearchSubagent,
|
||||
createResearchSubagent,
|
||||
type ResearchResult,
|
||||
} from './research/index.js';
|
||||
|
||||
2
gateway/src/harness/subagents/research/.gitignore
vendored
Normal file
2
gateway/src/harness/subagents/research/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Auto-generated at build time by bin/build
|
||||
api-source/
|
||||
31
gateway/src/harness/subagents/research/config.yaml
Normal file
31
gateway/src/harness/subagents/research/config.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
# Research Subagent Configuration
|
||||
|
||||
name: research
|
||||
description: Creates and runs Python research scripts for market analysis, charting, and statistical analysis
|
||||
|
||||
# Model configuration
|
||||
model: claude-sonnet-4-6
|
||||
temperature: 0.3
|
||||
maxTokens: 8192
|
||||
|
||||
# Memory files to load from memory/ directory
|
||||
memoryFiles:
|
||||
- api-reference.md
|
||||
- usage-examples.md
|
||||
|
||||
# System prompt file
|
||||
systemPromptFile: system-prompt.md
|
||||
|
||||
# Capabilities this subagent provides
|
||||
capabilities:
|
||||
- research_scripting
|
||||
- data_analysis
|
||||
- charting
|
||||
- statistical_analysis
|
||||
|
||||
# Tools available to this subagent
|
||||
tools:
|
||||
platform: [] # No platform tools needed (works at script level)
|
||||
mcp:
|
||||
- category_* # All category_ tools (write, edit, read, list)
|
||||
- execute_research # Script execution tool
|
||||
209
gateway/src/harness/subagents/research/index.ts
Normal file
209
gateway/src/harness/subagents/research/index.ts
Normal file
@@ -0,0 +1,209 @@
|
||||
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { SystemMessage } from '@langchain/core/messages';
|
||||
import { createReactAgent } from '@langchain/langgraph/prebuilt';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { MCPClientConnector } from '../../mcp-client.js';
|
||||
|
||||
/**
|
||||
* Result from research subagent execution
|
||||
*/
|
||||
export interface ResearchResult {
|
||||
text: string;
|
||||
images: Array<{
|
||||
data: string;
|
||||
mimeType: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Research Subagent
|
||||
*
|
||||
* Specialized agent for creating and running Python research scripts.
|
||||
* Uses category_* MCP tools to:
|
||||
* - Create/edit research scripts with DataAPI and ChartingAPI
|
||||
* - Execute scripts and capture matplotlib charts
|
||||
* - Iterate on errors with autonomous coding loop
|
||||
*
|
||||
* The subagent has direct access to MCP tools and handles the full
|
||||
* coding loop without requiring skill-level orchestration.
|
||||
*
|
||||
* Images from script execution are extracted and returned separately
|
||||
* but are NOT loaded into the LLM context (pass-through only).
|
||||
*/
|
||||
export class ResearchSubagent extends BaseSubagent {
|
||||
private lastImages: Array<{data: string; mimeType: string}> = [];
|
||||
// Shared with the MCP tool wrappers — populated as tools run, cleared per execution
|
||||
private imageCapture: Array<{data: string; mimeType: string}> = [];
|
||||
|
||||
constructor(
|
||||
config: SubagentConfig,
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
mcpClient?: MCPClientConnector,
|
||||
tools?: any[]
|
||||
) {
|
||||
super(config, model, logger, mcpClient, tools);
|
||||
}
|
||||
|
||||
setImageCapture(capture: Array<{data: string; mimeType: string}>): void {
|
||||
this.imageCapture = capture;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute research request using LangGraph's createReactAgent.
|
||||
* This is the standard LangChain pattern for agents with tool access —
|
||||
* createReactAgent handles the tool calling loop automatically.
|
||||
*/
|
||||
async execute(context: SubagentContext, instruction: string): Promise<string> {
|
||||
this.logger.info(
|
||||
{
|
||||
subagent: this.getName(),
|
||||
userId: context.userContext.userId,
|
||||
instruction: instruction.substring(0, 200),
|
||||
toolCount: this.tools.length,
|
||||
toolNames: this.tools.map(t => t.name),
|
||||
},
|
||||
'Research subagent starting'
|
||||
);
|
||||
|
||||
if (!this.hasMCPClient()) {
|
||||
throw new Error('MCP client not available for research subagent');
|
||||
}
|
||||
|
||||
if (this.tools.length === 0) {
|
||||
this.logger.warn('Research subagent has no tools — cannot write or execute scripts');
|
||||
}
|
||||
|
||||
// Clear previous images (in-place so tool wrappers keep the same array reference)
|
||||
this.imageCapture.length = 0;
|
||||
this.lastImages = [];
|
||||
|
||||
// Build system prompt (with memory context appended)
|
||||
const initialMessages = this.buildMessages(context, instruction);
|
||||
// buildMessages returns [SystemMessage, ...history, HumanMessage]
|
||||
// Extract system content for createReactAgent's prompt parameter
|
||||
const systemMessage = initialMessages[0];
|
||||
const humanMessage = initialMessages[initialMessages.length - 1];
|
||||
|
||||
// createReactAgent is the standard LangChain/LangGraph pattern for tool-using agents.
|
||||
// It manages the tool calling loop, message accumulation, and termination automatically.
|
||||
const agent = createReactAgent({
|
||||
llm: this.model,
|
||||
tools: this.tools,
|
||||
prompt: systemMessage as SystemMessage,
|
||||
});
|
||||
|
||||
const result = await agent.invoke(
|
||||
{ messages: [humanMessage] },
|
||||
{ recursionLimit: 20 }
|
||||
);
|
||||
|
||||
// The final message in the graph output is the agent's last AIMessage
|
||||
const allMessages: any[] = result.messages ?? [];
|
||||
|
||||
this.logger.info(
|
||||
{ messageCount: allMessages.length },
|
||||
'Research subagent graph completed'
|
||||
);
|
||||
|
||||
// Images were captured in real-time by the MCP tool wrappers into this.imageCapture
|
||||
this.lastImages = [...this.imageCapture];
|
||||
|
||||
// Return the final AI response
|
||||
const lastAI = [...allMessages].reverse().find(
|
||||
(m: any) => m.constructor?.name === 'AIMessage' || m._getType?.() === 'ai'
|
||||
);
|
||||
|
||||
const finalText = lastAI
|
||||
? (typeof lastAI.content === 'string' ? lastAI.content : JSON.stringify(lastAI.content))
|
||||
: 'Research completed.';
|
||||
|
||||
this.logger.info(
|
||||
{ textLength: finalText.length, imageCount: this.lastImages.length },
|
||||
'Research subagent finished'
|
||||
);
|
||||
|
||||
return finalText;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute with full result including images
|
||||
* This is the method that ResearchSkill should use
|
||||
*/
|
||||
async executeWithImages(context: SubagentContext, instruction: string): Promise<ResearchResult> {
|
||||
const text = await this.execute(context, instruction);
|
||||
return {
|
||||
text,
|
||||
images: this.lastImages,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get images from last execution
|
||||
*/
|
||||
getLastImages(): Array<{data: string; mimeType: string}> {
|
||||
return this.lastImages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream research execution
|
||||
*/
|
||||
async *stream(context: SubagentContext, instruction: string): AsyncGenerator<string> {
|
||||
this.logger.info(
|
||||
{
|
||||
subagent: this.getName(),
|
||||
userId: context.userContext.userId,
|
||||
},
|
||||
'Streaming research request'
|
||||
);
|
||||
|
||||
if (!this.hasMCPClient()) {
|
||||
throw new Error('MCP client not available for research subagent');
|
||||
}
|
||||
|
||||
// Clear previous images
|
||||
this.lastImages = [];
|
||||
|
||||
const messages = this.buildMessages(context, instruction);
|
||||
|
||||
const stream = await this.model.stream(messages);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
if (typeof chunk.content === 'string') {
|
||||
yield chunk.content;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory function to create and initialize ResearchSubagent
|
||||
*/
|
||||
export async function createResearchSubagent(
|
||||
model: BaseChatModel,
|
||||
logger: FastifyBaseLogger,
|
||||
basePath: string,
|
||||
mcpClient?: MCPClientConnector,
|
||||
tools?: any[],
|
||||
imageCapture?: Array<{data: string; mimeType: string}>
|
||||
): Promise<ResearchSubagent> {
|
||||
const { readFile } = await import('fs/promises');
|
||||
const { join } = await import('path');
|
||||
const yaml = await import('js-yaml');
|
||||
|
||||
// Load config
|
||||
const configPath = join(basePath, 'config.yaml');
|
||||
const configContent = await readFile(configPath, 'utf-8');
|
||||
const config = yaml.load(configContent) as SubagentConfig;
|
||||
|
||||
// Create and initialize subagent
|
||||
const subagent = new ResearchSubagent(config, model, logger, mcpClient, tools);
|
||||
if (imageCapture !== undefined) {
|
||||
subagent.setImageCapture(imageCapture);
|
||||
}
|
||||
await subagent.initialize(basePath);
|
||||
|
||||
return subagent;
|
||||
}
|
||||
480
gateway/src/harness/subagents/research/memory/api-reference.md
Normal file
480
gateway/src/harness/subagents/research/memory/api-reference.md
Normal file
@@ -0,0 +1,480 @@
|
||||
# Dexorder Research API Reference
|
||||
|
||||
This file contains the complete Python API source code with full docstrings.
|
||||
These files are copied verbatim from `sandbox/dexorder/api/`.
|
||||
|
||||
The API provides access to market data and charting capabilities for research scripts.
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Research scripts access the API via:
|
||||
```python
|
||||
from dexorder.api import get_api
|
||||
api = get_api()
|
||||
```
|
||||
|
||||
The API instance provides:
|
||||
- `api.data` - DataAPI for fetching OHLC market data
|
||||
- `api.charting` - ChartingAPI for creating financial charts
|
||||
|
||||
---
|
||||
|
||||
## Complete API Source Code
|
||||
|
||||
The following sections contain the verbatim Python source files with complete
|
||||
type hints, docstrings, and examples.
|
||||
|
||||
|
||||
### api.py
|
||||
```python
|
||||
"""
|
||||
Main DexOrder API - provides access to market data and charting.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from .charting_api import ChartingAPI
|
||||
from .data_api import DataAPI
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class API:
|
||||
"""
|
||||
Main API for accessing market data and creating charts.
|
||||
|
||||
This is the primary interface for research scripts and trading strategies.
|
||||
Access this via get_api() in research scripts.
|
||||
|
||||
Attributes:
|
||||
data: DataAPI for fetching historical and current market data
|
||||
charting: ChartingAPI for creating candlestick charts and visualizations
|
||||
|
||||
Example:
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
|
||||
api = get_api()
|
||||
|
||||
# Fetch data
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time="2021-12-20",
|
||||
end_time="2021-12-21"
|
||||
))
|
||||
|
||||
# Create chart
|
||||
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT 1H")
|
||||
"""
|
||||
|
||||
def __init__(self, charting: ChartingAPI, data: DataAPI):
|
||||
self.charting: ChartingAPI = charting
|
||||
self.data: DataAPI = data
|
||||
```
|
||||
|
||||
|
||||
### data_api.py
|
||||
```python
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, List
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from dexorder.utils import TimestampInput
|
||||
|
||||
|
||||
class DataAPI(ABC):
|
||||
"""
|
||||
API for accessing market data.
|
||||
|
||||
Provides methods to query OHLC (Open, High, Low, Close) candlestick data
|
||||
for cryptocurrency markets.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def historical_ohlc(
|
||||
self,
|
||||
ticker: str,
|
||||
period_seconds: int,
|
||||
start_time: TimestampInput,
|
||||
end_time: TimestampInput,
|
||||
extra_columns: Optional[List[str]] = None,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Fetch historical OHLC candlestick data for a market.
|
||||
|
||||
Args:
|
||||
ticker: Market identifier in format "EXCHANGE:SYMBOL"
|
||||
Examples: "BINANCE:BTC/USDT", "COINBASE:ETH/USD"
|
||||
period_seconds: Candle period in seconds
|
||||
Common values:
|
||||
- 60 (1 minute)
|
||||
- 300 (5 minutes)
|
||||
- 900 (15 minutes)
|
||||
- 3600 (1 hour)
|
||||
- 86400 (1 day)
|
||||
- 604800 (1 week)
|
||||
start_time: Start of time range. Accepts:
|
||||
- Unix timestamp in seconds (int/float): 1640000000
|
||||
- Date string: "2021-12-20" or "2021-12-20 12:00:00"
|
||||
- datetime object: datetime(2021, 12, 20)
|
||||
- pandas Timestamp: pd.Timestamp("2021-12-20")
|
||||
end_time: End of time range. Same formats as start_time.
|
||||
extra_columns: Optional additional columns to include beyond the standard
|
||||
OHLC columns. Available options:
|
||||
- "volume" - Total volume (decimal float)
|
||||
- "buy_vol" - Buy-side volume (decimal float)
|
||||
- "sell_vol" - Sell-side volume (decimal float)
|
||||
- "open_time", "high_time", "low_time", "close_time" (timestamps)
|
||||
- "open_interest" (for futures markets)
|
||||
- "ticker", "period_seconds"
|
||||
|
||||
Returns:
|
||||
DataFrame with candlestick data sorted by timestamp (ascending).
|
||||
Standard columns (always included):
|
||||
- timestamp: Period start time in microseconds
|
||||
- open: Opening price (decimal float)
|
||||
- high: Highest price (decimal float)
|
||||
- low: Lowest price (decimal float)
|
||||
- close: Closing price (decimal float)
|
||||
|
||||
Plus any columns specified in extra_columns.
|
||||
|
||||
All prices and volumes are automatically converted to decimal floats
|
||||
using market metadata. No manual conversion is needed.
|
||||
|
||||
Returns empty DataFrame if no data is available.
|
||||
|
||||
Examples:
|
||||
# Basic OHLC with Unix timestamp
|
||||
df = await api.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time=1640000000,
|
||||
end_time=1640086400
|
||||
)
|
||||
|
||||
# Using date strings with volume
|
||||
df = await api.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time="2021-12-20",
|
||||
end_time="2021-12-21",
|
||||
extra_columns=["volume"]
|
||||
)
|
||||
|
||||
# Using datetime objects
|
||||
from datetime import datetime
|
||||
df = await api.historical_ohlc(
|
||||
ticker="COINBASE:ETH/USD",
|
||||
period_seconds=300,
|
||||
start_time=datetime(2021, 12, 20, 9, 30),
|
||||
end_time=datetime(2021, 12, 20, 16, 30),
|
||||
extra_columns=["volume", "buy_vol", "sell_vol"]
|
||||
)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def latest_ohlc(
|
||||
self,
|
||||
ticker: str,
|
||||
period_seconds: int,
|
||||
length: int = 1,
|
||||
extra_columns: Optional[List[str]] = None,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Query the most recent OHLC candles for a ticker.
|
||||
|
||||
This method fetches the latest N completed candles without needing to
|
||||
specify exact timestamps. Useful for real-time analysis and indicators.
|
||||
|
||||
Args:
|
||||
ticker: Market identifier in format "EXCHANGE:SYMBOL"
|
||||
Examples: "BINANCE:BTC/USDT", "COINBASE:ETH/USD"
|
||||
period_seconds: OHLC candle period in seconds
|
||||
Common values: 60 (1m), 300 (5m), 900 (15m), 3600 (1h),
|
||||
86400 (1d), 604800 (1w)
|
||||
length: Number of most recent candles to return (default: 1)
|
||||
extra_columns: Optional list of additional column names to include.
|
||||
Same column options as historical_ohlc:
|
||||
- "volume", "buy_vol", "sell_vol"
|
||||
- "open_time", "high_time", "low_time", "close_time"
|
||||
- "open_interest", "ticker", "period_seconds"
|
||||
|
||||
Returns:
|
||||
Pandas DataFrame with the same column structure as historical_ohlc,
|
||||
containing the N most recent completed candles sorted by timestamp.
|
||||
Returns empty DataFrame if no data is available.
|
||||
|
||||
Examples:
|
||||
# Get the last candle
|
||||
df = await api.latest_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600
|
||||
)
|
||||
# Returns: timestamp, open, high, low, close
|
||||
|
||||
# Get the last 50 5-minute candles with volume
|
||||
df = await api.latest_ohlc(
|
||||
ticker="COINBASE:ETH/USD",
|
||||
period_seconds=300,
|
||||
length=50,
|
||||
extra_columns=["volume", "buy_vol", "sell_vol"]
|
||||
)
|
||||
|
||||
# Get recent candles with all timing data
|
||||
df = await api.latest_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=60,
|
||||
length=100,
|
||||
extra_columns=["open_time", "high_time", "low_time", "close_time"]
|
||||
)
|
||||
|
||||
Note:
|
||||
This method returns only completed candles. The current (incomplete)
|
||||
candle is not included.
|
||||
"""
|
||||
pass
|
||||
|
||||
```
|
||||
|
||||
|
||||
### charting_api.py
|
||||
```python
|
||||
import logging
|
||||
from abc import abstractmethod, ABC
|
||||
from typing import Optional, Tuple, List
|
||||
|
||||
import pandas as pd
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.figure import Figure
|
||||
|
||||
|
||||
class ChartingAPI(ABC):
|
||||
"""
|
||||
API for creating financial charts and visualizations.
|
||||
|
||||
Provides methods to create candlestick charts, add technical indicator panels,
|
||||
and build custom visualizations. All figures are automatically captured and
|
||||
returned to the client as images.
|
||||
|
||||
Basic workflow:
|
||||
1. Create a chart with plot_ohlc() → returns Figure and Axes
|
||||
2. Optionally overlay indicators on the main axes (e.g., moving averages)
|
||||
3. Optionally add indicator panels below with add_indicator_panel()
|
||||
4. Figures are automatically captured (no need to save manually)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def plot_ohlc(
|
||||
self,
|
||||
df: pd.DataFrame,
|
||||
title: Optional[str] = None,
|
||||
volume: bool = False,
|
||||
style: str = "charles",
|
||||
figsize: Tuple[int, int] = (12, 8),
|
||||
**kwargs
|
||||
) -> Tuple[Figure, plt.Axes]:
|
||||
"""
|
||||
Create a candlestick chart from OHLC data.
|
||||
|
||||
Args:
|
||||
df: DataFrame with OHLC data. Required columns: open, high, low, close.
|
||||
Column names are case-insensitive.
|
||||
title: Chart title (optional)
|
||||
volume: If True, shows volume bars below the candlesticks (requires 'volume' column)
|
||||
style: Visual style for the chart. Available styles:
|
||||
"charles" (default), "binance", "blueskies", "brasil", "checkers",
|
||||
"classic", "mike", "nightclouds", "sas", "starsandstripes", "yahoo"
|
||||
figsize: Figure size as (width, height) in inches. Default: (12, 8)
|
||||
**kwargs: Additional styling arguments
|
||||
|
||||
Returns:
|
||||
Tuple of (Figure, Axes):
|
||||
- Figure: matplotlib Figure object
|
||||
- Axes: Main candlestick axes (use for overlaying indicators)
|
||||
|
||||
Examples:
|
||||
# Basic chart
|
||||
fig, ax = api.plot_ohlc(df)
|
||||
|
||||
# With volume and title
|
||||
fig, ax = api.plot_ohlc(
|
||||
df,
|
||||
title="BTC/USDT 1H",
|
||||
volume=True,
|
||||
style="binance"
|
||||
)
|
||||
|
||||
# Overlay moving average
|
||||
fig, ax = api.plot_ohlc(df)
|
||||
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue")
|
||||
ax.legend()
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_indicator_panel(
|
||||
self,
|
||||
fig: Figure,
|
||||
df: pd.DataFrame,
|
||||
columns: Optional[List[str]] = None,
|
||||
ylabel: Optional[str] = None,
|
||||
height_ratio: float = 0.3,
|
||||
ylim: Optional[Tuple[float, float]] = None,
|
||||
**kwargs
|
||||
) -> plt.Axes:
|
||||
"""
|
||||
Add an indicator panel below the chart with time-aligned x-axis.
|
||||
|
||||
Use this to display indicators that should be shown separately from the
|
||||
price chart (e.g., RSI, MACD, volume).
|
||||
|
||||
Args:
|
||||
fig: Figure object from plot_ohlc()
|
||||
df: DataFrame with indicator data (must have same index as OHLC data)
|
||||
columns: Column names to plot. If None, plots all numeric columns.
|
||||
ylabel: Y-axis label (e.g., "RSI", "MACD")
|
||||
height_ratio: Panel height relative to main chart (default: 0.3 = 30%)
|
||||
ylim: Y-axis limits as (min, max). If None, auto-scales.
|
||||
**kwargs: Line styling options (color, linewidth, linestyle, alpha)
|
||||
|
||||
Returns:
|
||||
Axes object for the new panel (use for further customization)
|
||||
|
||||
Examples:
|
||||
# Add RSI panel with reference lines
|
||||
fig, ax = api.plot_ohlc(df)
|
||||
rsi_ax = api.add_indicator_panel(
|
||||
fig, df,
|
||||
columns=["rsi"],
|
||||
ylabel="RSI",
|
||||
ylim=(0, 100)
|
||||
)
|
||||
rsi_ax.axhline(30, color='green', linestyle='--', alpha=0.5)
|
||||
rsi_ax.axhline(70, color='red', linestyle='--', alpha=0.5)
|
||||
|
||||
# Add MACD panel
|
||||
fig, ax = api.plot_ohlc(df)
|
||||
api.add_indicator_panel(
|
||||
fig, df,
|
||||
columns=["macd", "macd_signal"],
|
||||
ylabel="MACD"
|
||||
)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create_figure(
|
||||
self,
|
||||
figsize: Tuple[int, int] = (12, 8),
|
||||
style: str = "charles"
|
||||
) -> Tuple[Figure, plt.Axes]:
|
||||
"""
|
||||
Create a styled figure for custom visualizations.
|
||||
|
||||
Use this when you want to create charts other than candlesticks
|
||||
(e.g., histograms, scatter plots, heatmaps).
|
||||
|
||||
Args:
|
||||
figsize: Figure size as (width, height) in inches. Default: (12, 8)
|
||||
style: Style name for consistent theming. Default: "charles"
|
||||
|
||||
Returns:
|
||||
Tuple of (Figure, Axes) ready for plotting
|
||||
|
||||
Examples:
|
||||
# Histogram
|
||||
fig, ax = api.create_figure()
|
||||
ax.hist(returns, bins=50)
|
||||
ax.set_title("Return Distribution")
|
||||
|
||||
# Heatmap
|
||||
fig, ax = api.create_figure(figsize=(10, 10))
|
||||
import seaborn as sns
|
||||
sns.heatmap(correlation_matrix, ax=ax)
|
||||
ax.set_title("Correlation Matrix")
|
||||
"""
|
||||
pass
|
||||
```
|
||||
|
||||
|
||||
### __init__.py
|
||||
```python
|
||||
"""
|
||||
DexOrder API - market data and charting for research and trading.
|
||||
|
||||
For research scripts, import and use get_api() to access the API:
|
||||
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
|
||||
api = get_api()
|
||||
df = asyncio.run(api.data.historical_ohlc(...))
|
||||
fig, ax = api.charting.plot_ohlc(df)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from dexorder.api.api import API
|
||||
from dexorder.api.charting_api import ChartingAPI
|
||||
from dexorder.api.data_api import DataAPI
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Global API instance - managed by main.py
|
||||
_global_api: Optional[API] = None
|
||||
|
||||
|
||||
def get_api() -> API:
|
||||
"""
|
||||
Get the global API instance for accessing market data and charts.
|
||||
|
||||
Use this in research scripts to access the data and charting APIs.
|
||||
|
||||
Returns:
|
||||
API instance with data and charting capabilities
|
||||
|
||||
Raises:
|
||||
RuntimeError: If called before API initialization (should not happen in research scripts)
|
||||
|
||||
Example:
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
|
||||
api = get_api()
|
||||
|
||||
# Fetch data
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time="2021-12-20",
|
||||
end_time="2021-12-21"
|
||||
))
|
||||
|
||||
# Create chart
|
||||
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT")
|
||||
"""
|
||||
if _global_api is None:
|
||||
raise RuntimeError("API not initialized")
|
||||
return _global_api
|
||||
|
||||
|
||||
def set_api(api: API) -> None:
|
||||
"""Set the global API instance. Internal use only."""
|
||||
global _global_api
|
||||
_global_api = api
|
||||
|
||||
|
||||
__all__ = ['API', 'ChartingAPI', 'DataAPI', 'get_api', 'set_api']
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
For practical usage patterns and complete working examples, see `usage-examples.md`.
|
||||
221
gateway/src/harness/subagents/research/memory/usage-examples.md
Normal file
221
gateway/src/harness/subagents/research/memory/usage-examples.md
Normal file
@@ -0,0 +1,221 @@
|
||||
# Research Script API Usage
|
||||
|
||||
Research scripts executed via the `execute_research` MCP tool have access to the global API instance, which provides both data fetching and charting capabilities.
|
||||
|
||||
## Accessing the API
|
||||
|
||||
```python
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
|
||||
# Get the global API instance
|
||||
api = get_api()
|
||||
```
|
||||
|
||||
## Using the Data API
|
||||
|
||||
The data API provides access to historical OHLC (Open, High, Low, Close) market data with smart caching via Iceberg.
|
||||
|
||||
### Fetching Historical Data
|
||||
|
||||
The API accepts flexible timestamp formats for convenience:
|
||||
|
||||
```python
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
|
||||
api = get_api()
|
||||
|
||||
# Method 1: Using Unix timestamps (seconds)
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600, # 1 hour candles
|
||||
start_time=1640000000, # Unix timestamp in seconds
|
||||
end_time=1640086400,
|
||||
extra_columns=["volume"]
|
||||
))
|
||||
|
||||
# Method 2: Using date strings
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time="2021-12-20", # Simple date string
|
||||
end_time="2021-12-21",
|
||||
extra_columns=["volume"]
|
||||
))
|
||||
|
||||
# Method 3: Using date strings with time
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time="2021-12-20 00:00:00",
|
||||
end_time="2021-12-20 23:59:59",
|
||||
extra_columns=["volume"]
|
||||
))
|
||||
|
||||
# Method 4: Using datetime objects
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time=datetime(2021, 12, 20),
|
||||
end_time=datetime(2021, 12, 21),
|
||||
extra_columns=["volume"]
|
||||
))
|
||||
|
||||
print(f"Loaded {len(df)} candles")
|
||||
print(df.head())
|
||||
```
|
||||
|
||||
### Available Extra Columns
|
||||
|
||||
- `"volume"` - Total volume
|
||||
- `"buy_vol"` - Buy-side volume
|
||||
- `"sell_vol"` - Sell-side volume
|
||||
- `"open_time"`, `"high_time"`, `"low_time"`, `"close_time"` - Timestamps for each price point
|
||||
- `"open_interest"` - Open interest (for futures)
|
||||
- `"ticker"` - Market identifier
|
||||
- `"period_seconds"` - Period in seconds
|
||||
|
||||
## Using the Charting API
|
||||
|
||||
The charting API provides styled financial charts with OHLC candlesticks and technical indicators.
|
||||
|
||||
### Creating a Basic Candlestick Chart
|
||||
|
||||
```python
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
|
||||
api = get_api()
|
||||
|
||||
# Fetch data
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time="2021-12-20",
|
||||
end_time="2021-12-21",
|
||||
extra_columns=["volume"]
|
||||
))
|
||||
|
||||
# Create candlestick chart (synchronous)
|
||||
fig, ax = api.charting.plot_ohlc(
|
||||
df,
|
||||
title="BTC/USDT 1H",
|
||||
volume=True, # Show volume bars
|
||||
style="charles" # Chart style
|
||||
)
|
||||
|
||||
# The figure is automatically captured and returned to the MCP client
|
||||
```
|
||||
|
||||
### Adding Indicator Panels
|
||||
|
||||
```python
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
import pandas as pd
|
||||
|
||||
api = get_api()
|
||||
|
||||
# Fetch data
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600,
|
||||
start_time="2021-12-20",
|
||||
end_time="2021-12-21"
|
||||
))
|
||||
|
||||
# Calculate a simple moving average
|
||||
df['sma_20'] = df['close'].rolling(window=20).mean()
|
||||
|
||||
# Create chart
|
||||
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT with SMA")
|
||||
|
||||
# Overlay the SMA on the price chart
|
||||
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue", linewidth=2)
|
||||
ax.legend()
|
||||
|
||||
# Add RSI indicator panel below
|
||||
df['rsi'] = calculate_rsi(df['close'], 14) # Your RSI calculation
|
||||
rsi_ax = api.charting.add_indicator_panel(
|
||||
fig, df,
|
||||
columns=["rsi"],
|
||||
ylabel="RSI",
|
||||
ylim=(0, 100)
|
||||
)
|
||||
rsi_ax.axhline(70, color='red', linestyle='--', alpha=0.5)
|
||||
rsi_ax.axhline(30, color='green', linestyle='--', alpha=0.5)
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
```python
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
import pandas as pd
|
||||
|
||||
# Get API instance
|
||||
api = get_api()
|
||||
|
||||
# Fetch historical data using date strings (easiest for research)
|
||||
df = asyncio.run(api.data.historical_ohlc(
|
||||
ticker="BINANCE:BTC/USDT",
|
||||
period_seconds=3600, # 1 hour
|
||||
start_time="2021-12-20",
|
||||
end_time="2021-12-21",
|
||||
extra_columns=["volume"]
|
||||
))
|
||||
|
||||
# Add some analysis
|
||||
df['sma_20'] = df['close'].rolling(window=20).mean()
|
||||
df['sma_50'] = df['close'].rolling(window=50).mean()
|
||||
|
||||
# Create chart with volume
|
||||
fig, ax = api.charting.plot_ohlc(
|
||||
df,
|
||||
title="BTC/USDT Analysis",
|
||||
volume=True,
|
||||
style="charles"
|
||||
)
|
||||
|
||||
# Overlay moving averages
|
||||
ax.plot(df.index, df['sma_20'], label="SMA 20", color="blue", linewidth=1.5)
|
||||
ax.plot(df.index, df['sma_50'], label="SMA 50", color="red", linewidth=1.5)
|
||||
ax.legend()
|
||||
|
||||
# Print summary statistics
|
||||
print(f"Period: {len(df)} candles")
|
||||
print(f"High: {df['high'].max()}")
|
||||
print(f"Low: {df['low'].min()}")
|
||||
print(f"Mean Volume: {df['volume'].mean():.2f}")
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- **Async vs Sync**: Data API methods are async and require `asyncio.run()`. Charting API methods are synchronous.
|
||||
- **Figure Capture**: All matplotlib figures created during script execution are automatically captured and returned as PNG images.
|
||||
- **Print Statements**: All `print()` output is captured and returned as text content.
|
||||
- **Errors**: Exceptions are caught and reported in the execution results.
|
||||
- **Timestamps**: The API accepts flexible timestamp formats:
|
||||
- Unix timestamps in **seconds** (int or float) - e.g., `1640000000`
|
||||
- Date strings - e.g., `"2021-12-20"` or `"2021-12-20 12:00:00"`
|
||||
- datetime objects - e.g., `datetime(2021, 12, 20)`
|
||||
- pandas Timestamp objects
|
||||
- Internally, the system uses microseconds since epoch, but you don't need to worry about this conversion.
|
||||
- **Price/Volume Values**: All prices and volumes are returned as decimal floats, automatically converted from internal storage format using market metadata. No manual conversion is needed.
|
||||
|
||||
## Available Chart Styles
|
||||
|
||||
- `"charles"` (default)
|
||||
- `"binance"`
|
||||
- `"blueskies"`
|
||||
- `"brasil"`
|
||||
- `"checkers"`
|
||||
- `"classic"`
|
||||
- `"mike"`
|
||||
- `"nightclouds"`
|
||||
- `"sas"`
|
||||
- `"starsandstripes"`
|
||||
- `"yahoo"`
|
||||
138
gateway/src/harness/subagents/research/system-prompt.md
Normal file
138
gateway/src/harness/subagents/research/system-prompt.md
Normal file
@@ -0,0 +1,138 @@
|
||||
# Research Script Assistant
|
||||
|
||||
You are a specialized assistant that creates Python research scripts for market data analysis and visualization.
|
||||
|
||||
## Your Purpose
|
||||
|
||||
Create Python scripts that:
|
||||
- Fetch historical market data using the Dexorder DataAPI
|
||||
- Perform statistical analysis and calculations
|
||||
- Generate professional charts using matplotlib via the ChartingAPI
|
||||
- All matplotlib figures are automatically captured and sent to the user as images
|
||||
|
||||
## Available Tools
|
||||
|
||||
You have direct access to these MCP tools:
|
||||
|
||||
- **category_write**: Create a new research script
|
||||
- Required: category="research", name, description, code
|
||||
- Optional: metadata (with conda_packages list if needed)
|
||||
- Automatically executes the script after writing
|
||||
- Returns validation results and execution output (text + images)
|
||||
|
||||
- **category_edit**: Update an existing research script
|
||||
- Required: category="research", name
|
||||
- Optional: code, description, metadata
|
||||
- Automatically re-executes if code is updated
|
||||
- Returns validation results and execution output
|
||||
|
||||
- **category_read**: Read an existing research script
|
||||
- Returns: code, metadata
|
||||
|
||||
- **category_list**: List all research scripts
|
||||
- Returns: array of {name, description, metadata}
|
||||
|
||||
- **execute_research**: Manually run a research script
|
||||
- Note: Usually not needed since write/edit auto-execute
|
||||
- Returns: text output and images
|
||||
|
||||
## Research Script API
|
||||
|
||||
All research scripts have access to the Dexorder API via:
|
||||
|
||||
```python
|
||||
from dexorder.api import get_api
|
||||
import asyncio
|
||||
|
||||
api = get_api()
|
||||
```
|
||||
|
||||
The API provides two main components:
|
||||
- `api.data` - DataAPI for fetching OHLC market data
|
||||
- `api.charting` - ChartingAPI for creating financial charts
|
||||
|
||||
See your knowledge base for complete API documentation and examples.
|
||||
|
||||
## Coding Loop Pattern
|
||||
|
||||
When a user requests analysis:
|
||||
|
||||
1. **Understand the request**: What data is needed? What analysis? What visualization?
|
||||
|
||||
2. **Check for existing scripts**: Use `category_list` to see if a similar script exists
|
||||
- If exists and suitable: use `category_read` to review it
|
||||
- Consider editing existing script vs creating new one
|
||||
|
||||
3. **Write the script**: Use `category_write` (or `category_edit`)
|
||||
- Write clean, well-commented Python code
|
||||
- Include proper error handling
|
||||
- Use appropriate ticker symbols, time ranges, and periods
|
||||
- The script will auto-execute after writing
|
||||
|
||||
4. **Check execution results**: The tool returns:
|
||||
- `validation.success`: Whether script ran without errors
|
||||
- `validation.output`: Any stdout/stderr text output
|
||||
- `execution.content`: Array of text and image results
|
||||
- Note: Images are NOT included in your context - only text output is visible to you
|
||||
|
||||
5. **Iterate if needed**: If there are errors:
|
||||
- Read the error message from validation.output or execution text
|
||||
- Use `category_edit` to fix the script
|
||||
- The script will auto-execute again
|
||||
|
||||
6. **Return results**: Once successful, summarize what was done
|
||||
- The user will receive both your text response AND the chart images
|
||||
- Don't try to describe the images in detail - the user can see them
|
||||
|
||||
## Important Guidelines
|
||||
|
||||
- **Images are pass-through only**: Chart images go directly to the user. You only see text output (print statements, errors). Don't try to analyze or describe images you can't see.
|
||||
|
||||
- **Async data fetching**: All `api.data` methods are async. Always use `asyncio.run()`:
|
||||
```python
|
||||
df = asyncio.run(api.data.historical_ohlc(...))
|
||||
```
|
||||
|
||||
- **Charting is sync**: All `api.charting` methods are synchronous:
|
||||
```python
|
||||
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT")
|
||||
```
|
||||
|
||||
- **Automatic figure capture**: All matplotlib figures are automatically captured. Don't save manually.
|
||||
|
||||
- **Print for debugging**: Use `print()` statements for debugging - you'll see this output.
|
||||
|
||||
- **Package management**: If script needs packages beyond base environment (pandas, numpy, matplotlib):
|
||||
- Add `conda_packages: ["package-name"]` to metadata
|
||||
- Packages are auto-installed during validation
|
||||
|
||||
- **Script naming**: Choose descriptive, unique names. Examples:
|
||||
- "BTC Weekly Analysis"
|
||||
- "ETH Volume Profile"
|
||||
- "Market Correlation Heatmap"
|
||||
|
||||
- **Error handling**: Wrap data fetching in try/except to provide helpful error messages
|
||||
|
||||
## Example Workflow
|
||||
|
||||
User: "Show me BTC price action for the last 7 days with volume"
|
||||
|
||||
You:
|
||||
1. Call `category_write` with:
|
||||
- name: "BTC 7-Day Price Action"
|
||||
- description: "BTC/USDT price and volume analysis for the last 7 days"
|
||||
- code: (Python script that fetches data and creates chart)
|
||||
2. Check execution results
|
||||
3. If successful, respond: "I've created a 7-day BTC price chart with volume analysis. The chart shows [brief summary of what the script does]."
|
||||
4. User receives: Your text response + the actual chart image
|
||||
|
||||
## Response Format
|
||||
|
||||
When reporting results:
|
||||
- Be concise and factual
|
||||
- Mention what data was fetched and what analysis was performed
|
||||
- Don't try to interpret the charts (user can see them)
|
||||
- If errors occurred and you fixed them, briefly mention the resolution
|
||||
- Always confirm the script name for future reference
|
||||
|
||||
Remember: You're creating tools for the user, not just answering questions. Each research script becomes a reusable analysis tool.
|
||||
@@ -4,6 +4,7 @@ import * as yaml from 'js-yaml';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import type { K8sResources } from '../types/user.js';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
@@ -18,14 +19,15 @@ export interface K8sClientConfig {
|
||||
export interface DeploymentSpec {
|
||||
userId: string;
|
||||
licenseType: 'free' | 'pro' | 'enterprise';
|
||||
agentImage: string;
|
||||
k8sResources: K8sResources;
|
||||
sandboxImage: string;
|
||||
sidecarImage: string;
|
||||
storageClass: string;
|
||||
imagePullPolicy?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Kubernetes client wrapper for managing agent deployments
|
||||
* Kubernetes client wrapper for managing sandbox deployments
|
||||
*/
|
||||
export class KubernetesClient {
|
||||
private config: K8sClientConfig;
|
||||
@@ -59,7 +61,7 @@ export class KubernetesClient {
|
||||
static getDeploymentName(userId: string): string {
|
||||
// Sanitize userId to be k8s-compliant (lowercase alphanumeric + hyphens)
|
||||
const sanitized = userId.toLowerCase().replace(/[^a-z0-9-]/g, '-');
|
||||
return `agent-${sanitized}`;
|
||||
return `sandbox-${sanitized}`;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -104,7 +106,7 @@ export class KubernetesClient {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create agent deployment from template
|
||||
* Create sandbox deployment from template
|
||||
*/
|
||||
async createAgentDeployment(spec: DeploymentSpec): Promise<void> {
|
||||
const deploymentName = KubernetesClient.getDeploymentName(spec.userId);
|
||||
@@ -113,28 +115,31 @@ export class KubernetesClient {
|
||||
|
||||
this.config.logger.info(
|
||||
{ userId: spec.userId, licenseType: spec.licenseType, deploymentName },
|
||||
'Creating agent deployment'
|
||||
);
|
||||
|
||||
// Load template based on license type
|
||||
const templatePath = path.join(
|
||||
__dirname,
|
||||
'templates',
|
||||
`${spec.licenseType}-tier.yaml`
|
||||
'Creating sandbox deployment'
|
||||
);
|
||||
|
||||
const templatePath = path.join(__dirname, 'templates', 'sandbox.yaml');
|
||||
const templateContent = await fs.readFile(templatePath, 'utf-8');
|
||||
|
||||
// Substitute variables
|
||||
const r = spec.k8sResources;
|
||||
const rendered = templateContent
|
||||
.replace(/\{\{userId\}\}/g, spec.userId)
|
||||
.replace(/\{\{deploymentName\}\}/g, deploymentName)
|
||||
.replace(/\{\{serviceName\}\}/g, serviceName)
|
||||
.replace(/\{\{pvcName\}\}/g, pvcName)
|
||||
.replace(/\{\{agentImage\}\}/g, spec.agentImage)
|
||||
.replace(/\{\{sandboxImage\}\}/g, spec.sandboxImage)
|
||||
.replace(/\{\{sidecarImage\}\}/g, spec.sidecarImage)
|
||||
.replace(/\{\{storageClass\}\}/g, spec.storageClass)
|
||||
.replace(/\{\{imagePullPolicy\}\}/g, spec.imagePullPolicy || 'Always');
|
||||
.replace(/\{\{imagePullPolicy\}\}/g, spec.imagePullPolicy || 'Always')
|
||||
.replace(/\{\{licenseType\}\}/g, spec.licenseType)
|
||||
.replace(/\{\{memoryRequest\}\}/g, r.memoryRequest)
|
||||
.replace(/\{\{memoryLimit\}\}/g, r.memoryLimit)
|
||||
.replace(/\{\{cpuRequest\}\}/g, r.cpuRequest)
|
||||
.replace(/\{\{cpuLimit\}\}/g, r.cpuLimit)
|
||||
.replace(/\{\{storage\}\}/g, r.storage)
|
||||
.replace(/\{\{tmpSizeLimit\}\}/g, r.tmpSizeLimit)
|
||||
.replace(/\{\{enableIdleShutdown\}\}/g, String(r.enableIdleShutdown))
|
||||
.replace(/\{\{idleTimeoutMinutes\}\}/g, String(r.idleTimeoutMinutes));
|
||||
|
||||
// Parse YAML documents (deployment, pvc, service)
|
||||
const documents = yaml.loadAll(rendered) as any[];
|
||||
@@ -186,7 +191,7 @@ export class KubernetesClient {
|
||||
}
|
||||
}
|
||||
|
||||
this.config.logger.info({ deploymentName }, 'Agent deployment created successfully');
|
||||
this.config.logger.info({ deploymentName }, 'Sandbox deployment created successfully');
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -302,7 +307,7 @@ export class KubernetesClient {
|
||||
const serviceName = KubernetesClient.getServiceName(userId);
|
||||
const pvcName = KubernetesClient.getPvcName(userId);
|
||||
|
||||
this.config.logger.info({ userId, deploymentName }, 'Deleting agent deployment');
|
||||
this.config.logger.info({ userId, deploymentName }, 'Deleting sandbox deployment');
|
||||
|
||||
// Delete deployment
|
||||
try {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import { KubernetesClient, type DeploymentSpec } from './client.js';
|
||||
import type { UserLicense } from '../types/user.js';
|
||||
import type { License } from '../types/user.js';
|
||||
|
||||
export interface ContainerManagerConfig {
|
||||
k8sClient: KubernetesClient;
|
||||
agentImage: string;
|
||||
sandboxImage: string;
|
||||
sidecarImage: string;
|
||||
storageClass: string;
|
||||
imagePullPolicy?: string;
|
||||
@@ -25,7 +25,7 @@ export interface EnsureContainerResult {
|
||||
}
|
||||
|
||||
/**
|
||||
* Container manager orchestrates agent container lifecycle
|
||||
* Container manager orchestrates sandbox container lifecycle
|
||||
*/
|
||||
export class ContainerManager {
|
||||
private config: ContainerManagerConfig;
|
||||
@@ -41,7 +41,7 @@ export class ContainerManager {
|
||||
*/
|
||||
async ensureContainerRunning(
|
||||
userId: string,
|
||||
license: UserLicense,
|
||||
license: License,
|
||||
waitForReady: boolean = true
|
||||
): Promise<EnsureContainerResult> {
|
||||
const deploymentName = KubernetesClient.getDeploymentName(userId);
|
||||
@@ -80,7 +80,8 @@ export class ContainerManager {
|
||||
const spec: DeploymentSpec = {
|
||||
userId,
|
||||
licenseType: license.licenseType,
|
||||
agentImage: this.config.agentImage,
|
||||
k8sResources: license.k8sResources,
|
||||
sandboxImage: this.config.sandboxImage,
|
||||
sidecarImage: this.config.sidecarImage,
|
||||
storageClass: this.config.storageClass,
|
||||
imagePullPolicy: this.config.imagePullPolicy,
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
# Enterprise tier agent deployment template
|
||||
# Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}}
|
||||
# Enterprise: No idle shutdown, larger resources
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{deploymentName}}
|
||||
namespace: dexorder-agents
|
||||
labels:
|
||||
app.kubernetes.io/name: agent
|
||||
app.kubernetes.io/component: user-agent
|
||||
dexorder.io/component: agent
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/deployment: {{deploymentName}}
|
||||
dexorder.io/license-tier: enterprise
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
dexorder.io/component: agent
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/deployment: {{deploymentName}}
|
||||
dexorder.io/license-tier: enterprise
|
||||
spec:
|
||||
serviceAccountName: agent-lifecycle
|
||||
shareProcessNamespace: true
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
containers:
|
||||
- name: agent
|
||||
image: {{agentImage}}
|
||||
imagePullPolicy: {{imagePullPolicy}}
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "4000m"
|
||||
|
||||
env:
|
||||
- name: USER_ID
|
||||
value: {{userId}}
|
||||
- name: IDLE_TIMEOUT_MINUTES
|
||||
value: "0"
|
||||
- name: IDLE_CHECK_INTERVAL_SECONDS
|
||||
value: "60"
|
||||
- name: ENABLE_IDLE_SHUTDOWN
|
||||
value: "false"
|
||||
- name: MCP_SERVER_PORT
|
||||
value: "3000"
|
||||
- name: ZMQ_CONTROL_PORT
|
||||
value: "5555"
|
||||
- name: ZMQ_GATEWAY_ENDPOINT
|
||||
value: "tcp://gateway.default.svc.cluster.local:5571"
|
||||
|
||||
ports:
|
||||
- name: mcp
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
- name: zmq-control
|
||||
containerPort: 5555
|
||||
protocol: TCP
|
||||
|
||||
volumeMounts:
|
||||
- name: agent-data
|
||||
mountPath: /app/data
|
||||
- name: agent-config
|
||||
mountPath: /app/config
|
||||
readOnly: true
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: shared-run
|
||||
mountPath: /var/run/agent
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: mcp
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: mcp
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
|
||||
- name: lifecycle-sidecar
|
||||
image: {{sidecarImage}}
|
||||
imagePullPolicy: {{imagePullPolicy}}
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
limits:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: DEPLOYMENT_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels['dexorder.io/deployment']
|
||||
- name: USER_TYPE
|
||||
value: "enterprise"
|
||||
- name: MAIN_CONTAINER_PID
|
||||
value: "1"
|
||||
|
||||
volumeMounts:
|
||||
- name: shared-run
|
||||
mountPath: /var/run/agent
|
||||
readOnly: true
|
||||
|
||||
volumes:
|
||||
- name: agent-data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{pvcName}}
|
||||
- name: agent-config
|
||||
configMap:
|
||||
name: agent-config
|
||||
- name: tmp
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
sizeLimit: 512Mi
|
||||
- name: shared-run
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
sizeLimit: 1Mi
|
||||
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{pvcName}}
|
||||
namespace: dexorder-agents
|
||||
labels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/license-tier: enterprise
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
||||
storageClassName: {{storageClass}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{serviceName}}
|
||||
namespace: dexorder-agents
|
||||
labels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/license-tier: enterprise
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
ports:
|
||||
- name: mcp
|
||||
port: 3000
|
||||
targetPort: mcp
|
||||
protocol: TCP
|
||||
- name: zmq-control
|
||||
port: 5555
|
||||
targetPort: zmq-control
|
||||
protocol: TCP
|
||||
@@ -1,206 +0,0 @@
|
||||
# Free tier agent deployment template
|
||||
# Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{deploymentName}}
|
||||
namespace: dexorder-agents
|
||||
labels:
|
||||
app.kubernetes.io/name: agent
|
||||
app.kubernetes.io/component: user-agent
|
||||
dexorder.io/component: agent
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/deployment: {{deploymentName}}
|
||||
dexorder.io/license-tier: free
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
dexorder.io/component: agent
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/deployment: {{deploymentName}}
|
||||
dexorder.io/license-tier: free
|
||||
spec:
|
||||
serviceAccountName: agent-lifecycle
|
||||
shareProcessNamespace: true
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
containers:
|
||||
- name: agent
|
||||
image: {{agentImage}}
|
||||
imagePullPolicy: {{imagePullPolicy}}
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
|
||||
env:
|
||||
- name: USER_ID
|
||||
value: {{userId}}
|
||||
- name: IDLE_TIMEOUT_MINUTES
|
||||
value: "15"
|
||||
- name: IDLE_CHECK_INTERVAL_SECONDS
|
||||
value: "60"
|
||||
- name: ENABLE_IDLE_SHUTDOWN
|
||||
value: "true"
|
||||
- name: MCP_SERVER_PORT
|
||||
value: "3000"
|
||||
- name: ZMQ_CONTROL_PORT
|
||||
value: "5555"
|
||||
- name: ZMQ_GATEWAY_ENDPOINT
|
||||
value: "tcp://gateway.default.svc.cluster.local:5571"
|
||||
|
||||
ports:
|
||||
- name: mcp
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
- name: zmq-control
|
||||
containerPort: 5555
|
||||
protocol: TCP
|
||||
|
||||
volumeMounts:
|
||||
- name: agent-data
|
||||
mountPath: /app/data
|
||||
- name: agent-config
|
||||
mountPath: /app/config
|
||||
readOnly: true
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: shared-run
|
||||
mountPath: /var/run/agent
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: mcp
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: mcp
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
|
||||
- name: lifecycle-sidecar
|
||||
image: {{sidecarImage}}
|
||||
imagePullPolicy: {{imagePullPolicy}}
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
limits:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: DEPLOYMENT_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels['dexorder.io/deployment']
|
||||
- name: USER_TYPE
|
||||
value: "free"
|
||||
- name: MAIN_CONTAINER_PID
|
||||
value: "1"
|
||||
|
||||
volumeMounts:
|
||||
- name: shared-run
|
||||
mountPath: /var/run/agent
|
||||
readOnly: true
|
||||
|
||||
volumes:
|
||||
- name: agent-data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{pvcName}}
|
||||
- name: agent-config
|
||||
configMap:
|
||||
name: agent-config
|
||||
- name: tmp
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
sizeLimit: 128Mi
|
||||
- name: shared-run
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
sizeLimit: 1Mi
|
||||
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{pvcName}}
|
||||
namespace: dexorder-agents
|
||||
labels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/license-tier: free
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: {{storageClass}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{serviceName}}
|
||||
namespace: dexorder-agents
|
||||
labels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/license-tier: free
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
ports:
|
||||
- name: mcp
|
||||
port: 3000
|
||||
targetPort: mcp
|
||||
protocol: TCP
|
||||
- name: zmq-control
|
||||
port: 5555
|
||||
targetPort: zmq-control
|
||||
protocol: TCP
|
||||
@@ -1,18 +1,23 @@
|
||||
# Pro tier agent deployment template
|
||||
# Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}}
|
||||
# Sandbox deployment template — variables are populated from the user's License k8sResources.
|
||||
# Variables: {{userId}}, {{deploymentName}}, {{pvcName}}, {{serviceName}},
|
||||
# {{sandboxImage}}, {{sidecarImage}}, {{imagePullPolicy}}, {{storageClass}},
|
||||
# {{licenseType}},
|
||||
# {{memoryRequest}}, {{memoryLimit}}, {{cpuRequest}}, {{cpuLimit}},
|
||||
# {{storage}}, {{tmpSizeLimit}},
|
||||
# {{enableIdleShutdown}}, {{idleTimeoutMinutes}}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{deploymentName}}
|
||||
namespace: dexorder-agents
|
||||
namespace: dexorder-sandboxes
|
||||
labels:
|
||||
app.kubernetes.io/name: agent
|
||||
app.kubernetes.io/component: user-agent
|
||||
dexorder.io/component: agent
|
||||
app.kubernetes.io/name: sandbox
|
||||
app.kubernetes.io/component: user-sandbox
|
||||
dexorder.io/component: sandbox
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/deployment: {{deploymentName}}
|
||||
dexorder.io/license-tier: pro
|
||||
dexorder.io/license-tier: {{licenseType}}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
@@ -21,26 +26,26 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
dexorder.io/component: agent
|
||||
dexorder.io/component: sandbox
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/deployment: {{deploymentName}}
|
||||
dexorder.io/license-tier: pro
|
||||
dexorder.io/license-tier: {{licenseType}}
|
||||
spec:
|
||||
serviceAccountName: agent-lifecycle
|
||||
serviceAccountName: sandbox-lifecycle
|
||||
shareProcessNamespace: true
|
||||
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
fsGroup: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
|
||||
containers:
|
||||
- name: agent
|
||||
image: {{agentImage}}
|
||||
- name: sandbox
|
||||
image: {{sandboxImage}}
|
||||
imagePullPolicy: {{imagePullPolicy}}
|
||||
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
@@ -49,31 +54,39 @@ spec:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
memory: "{{memoryRequest}}"
|
||||
cpu: "{{cpuRequest}}"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "2000m"
|
||||
|
||||
memory: "{{memoryLimit}}"
|
||||
cpu: "{{cpuLimit}}"
|
||||
|
||||
env:
|
||||
- name: USER_ID
|
||||
value: {{userId}}
|
||||
- name: IDLE_TIMEOUT_MINUTES
|
||||
value: "60"
|
||||
value: "{{idleTimeoutMinutes}}"
|
||||
- name: IDLE_CHECK_INTERVAL_SECONDS
|
||||
value: "60"
|
||||
- name: ENABLE_IDLE_SHUTDOWN
|
||||
value: "true"
|
||||
value: "{{enableIdleShutdown}}"
|
||||
- name: MCP_SERVER_PORT
|
||||
value: "3000"
|
||||
- name: ZMQ_CONTROL_PORT
|
||||
value: "5555"
|
||||
- name: ZMQ_GATEWAY_ENDPOINT
|
||||
value: "tcp://gateway.default.svc.cluster.local:5571"
|
||||
|
||||
- name: ICEBERG_CATALOG_URI
|
||||
value: "http://iceberg-catalog.default.svc.cluster.local:8181"
|
||||
- name: ICEBERG_NAMESPACE
|
||||
value: "trading"
|
||||
- name: S3_ENDPOINT
|
||||
value: "http://minio.default.svc.cluster.local:9000"
|
||||
- name: RELAY_ENDPOINT
|
||||
value: "tcp://relay.default.svc.cluster.local:5559"
|
||||
|
||||
ports:
|
||||
- name: mcp
|
||||
containerPort: 3000
|
||||
@@ -81,17 +94,17 @@ spec:
|
||||
- name: zmq-control
|
||||
containerPort: 5555
|
||||
protocol: TCP
|
||||
|
||||
|
||||
volumeMounts:
|
||||
- name: agent-data
|
||||
- name: sandbox-data
|
||||
mountPath: /app/data
|
||||
- name: agent-config
|
||||
- name: sandbox-config
|
||||
mountPath: /app/config
|
||||
readOnly: true
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: shared-run
|
||||
mountPath: /var/run/agent
|
||||
mountPath: /var/run/sandbox
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -111,7 +124,7 @@ spec:
|
||||
- name: lifecycle-sidecar
|
||||
image: {{sidecarImage}}
|
||||
imagePullPolicy: {{imagePullPolicy}}
|
||||
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
@@ -120,7 +133,7 @@ spec:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
@@ -128,7 +141,7 @@ spec:
|
||||
limits:
|
||||
memory: "64Mi"
|
||||
cpu: "50m"
|
||||
|
||||
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
@@ -139,26 +152,30 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels['dexorder.io/deployment']
|
||||
- name: USER_TYPE
|
||||
value: "pro"
|
||||
value: "{{licenseType}}"
|
||||
- name: MAIN_CONTAINER_PID
|
||||
value: "1"
|
||||
|
||||
|
||||
volumeMounts:
|
||||
- name: shared-run
|
||||
mountPath: /var/run/agent
|
||||
mountPath: /var/run/sandbox
|
||||
readOnly: true
|
||||
|
||||
|
||||
volumes:
|
||||
- name: agent-data
|
||||
- name: sandbox-data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{pvcName}}
|
||||
- name: agent-config
|
||||
configMap:
|
||||
name: agent-config
|
||||
- name: sandbox-config
|
||||
projected:
|
||||
sources:
|
||||
- configMap:
|
||||
name: sandbox-config
|
||||
- secret:
|
||||
name: sandbox-secrets
|
||||
- name: tmp
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
sizeLimit: 256Mi
|
||||
sizeLimit: {{tmpSizeLimit}}
|
||||
- name: shared-run
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
@@ -171,26 +188,26 @@ apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{pvcName}}
|
||||
namespace: dexorder-agents
|
||||
namespace: dexorder-sandboxes
|
||||
labels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/license-tier: pro
|
||||
dexorder.io/license-tier: {{licenseType}}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storage: {{storage}}
|
||||
storageClassName: {{storageClass}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{serviceName}}
|
||||
namespace: dexorder-agents
|
||||
namespace: dexorder-sandboxes
|
||||
labels:
|
||||
dexorder.io/user-id: {{userId}}
|
||||
dexorder.io/license-tier: pro
|
||||
dexorder.io/license-tier: {{licenseType}}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import { LLMProviderFactory, type ModelConfig, LLMProvider, type LicenseModelsConfig } from './provider.js';
|
||||
import type { UserLicense } from '../types/user.js';
|
||||
import type { License } from '../types/user.js';
|
||||
|
||||
/**
|
||||
* Model routing strategies
|
||||
@@ -39,8 +39,9 @@ export class ModelRouter {
|
||||
*/
|
||||
async route(
|
||||
message: string,
|
||||
license: UserLicense,
|
||||
strategy: RoutingStrategy = RoutingStrategy.USER_PREFERENCE
|
||||
license: License,
|
||||
strategy: RoutingStrategy = RoutingStrategy.USER_PREFERENCE,
|
||||
userId?: string
|
||||
): Promise<BaseChatModel> {
|
||||
let modelConfig: ModelConfig;
|
||||
|
||||
@@ -67,7 +68,7 @@ export class ModelRouter {
|
||||
|
||||
this.logger.info(
|
||||
{
|
||||
userId: license.userId,
|
||||
userId,
|
||||
strategy,
|
||||
provider: modelConfig.provider,
|
||||
model: modelConfig.model,
|
||||
@@ -81,9 +82,9 @@ export class ModelRouter {
|
||||
/**
|
||||
* Route based on user's preferred model (if set in license)
|
||||
*/
|
||||
private routeByUserPreference(license: UserLicense): ModelConfig {
|
||||
private routeByUserPreference(license: License): ModelConfig {
|
||||
// Check if user has custom model preference
|
||||
const preferredModel = (license as any).preferredModel as ModelConfig | undefined;
|
||||
const preferredModel = license.preferredModel as ModelConfig | undefined;
|
||||
|
||||
if (preferredModel && this.isModelAllowed(preferredModel, license)) {
|
||||
return preferredModel;
|
||||
@@ -96,7 +97,7 @@ export class ModelRouter {
|
||||
/**
|
||||
* Route based on query complexity
|
||||
*/
|
||||
private routeByComplexity(message: string, license: UserLicense): ModelConfig {
|
||||
private routeByComplexity(message: string, license: License): ModelConfig {
|
||||
const isComplex = this.isComplexQuery(message);
|
||||
|
||||
// Use configuration if available
|
||||
@@ -127,7 +128,7 @@ export class ModelRouter {
|
||||
/**
|
||||
* Route based on license tier
|
||||
*/
|
||||
private routeByLicenseTier(license: UserLicense): ModelConfig {
|
||||
private routeByLicenseTier(license: License): ModelConfig {
|
||||
// Use configuration if available
|
||||
if (this.licenseModels) {
|
||||
const tierConfig = this.licenseModels[license.licenseType];
|
||||
@@ -155,7 +156,7 @@ export class ModelRouter {
|
||||
/**
|
||||
* Route to cheapest available model
|
||||
*/
|
||||
private routeByCost(license: UserLicense): ModelConfig {
|
||||
private routeByCost(license: License): ModelConfig {
|
||||
// Use configuration if available
|
||||
if (this.licenseModels) {
|
||||
const tierConfig = this.licenseModels[license.licenseType];
|
||||
@@ -171,7 +172,7 @@ export class ModelRouter {
|
||||
/**
|
||||
* Check if model is allowed for user's license
|
||||
*/
|
||||
private isModelAllowed(model: ModelConfig, license: UserLicense): boolean {
|
||||
private isModelAllowed(model: ModelConfig, license: License): boolean {
|
||||
// Use configuration if available
|
||||
if (this.licenseModels) {
|
||||
const tierConfig = this.licenseModels[license.licenseType];
|
||||
|
||||
@@ -15,6 +15,8 @@ import { KubernetesClient } from './k8s/client.js';
|
||||
import { ContainerManager } from './k8s/container-manager.js';
|
||||
import { ZMQRelayClient } from './clients/zmq-relay-client.js';
|
||||
import { IcebergClient } from './clients/iceberg-client.js';
|
||||
import { ConversationStore } from './harness/memory/conversation-store.js';
|
||||
import { AgentHarness, type HarnessSessionConfig } from './harness/agent-harness.js';
|
||||
import { OHLCService } from './services/ohlc-service.js';
|
||||
import { SymbolIndexService } from './services/symbol-index-service.js';
|
||||
import { SymbolRoutes } from './routes/symbol-routes.js';
|
||||
@@ -38,6 +40,7 @@ import {
|
||||
} from './events/index.js';
|
||||
import { QdrantClient } from './clients/qdrant-client.js';
|
||||
import { EmbeddingService, RAGRetriever, DocumentLoader } from './harness/memory/index.js';
|
||||
import { initializeToolRegistry } from './tools/tool-registry.js';
|
||||
import { join } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
@@ -131,6 +134,9 @@ function loadConfig() {
|
||||
// Redis configuration (for harness memory layer)
|
||||
redisUrl: configData.redis?.url || process.env.REDIS_URL || 'redis://localhost:6379',
|
||||
|
||||
// Conversation history limit: number of prior turns loaded as LLM context and flushed to Iceberg
|
||||
conversationHistoryLimit: configData.agent?.conversation_history_limit || parseInt(process.env.CONVERSATION_HISTORY_LIMIT || '20'),
|
||||
|
||||
// Qdrant configuration (for RAG)
|
||||
qdrant: {
|
||||
url: configData.qdrant?.url || process.env.QDRANT_URL || 'http://localhost:6333',
|
||||
@@ -147,6 +153,7 @@ function loadConfig() {
|
||||
s3Endpoint: configData.iceberg?.s3_endpoint || process.env.S3_ENDPOINT,
|
||||
s3AccessKey: secretsData.iceberg?.s3_access_key || process.env.S3_ACCESS_KEY,
|
||||
s3SecretKey: secretsData.iceberg?.s3_secret_key || process.env.S3_SECRET_KEY,
|
||||
conversationsBucket: configData.iceberg?.conversations_bucket || process.env.CONVERSATIONS_S3_BUCKET,
|
||||
},
|
||||
|
||||
// Relay configuration (for historical data)
|
||||
@@ -165,12 +172,12 @@ function loadConfig() {
|
||||
|
||||
// Kubernetes configuration
|
||||
kubernetes: {
|
||||
namespace: configData.kubernetes?.namespace || process.env.KUBERNETES_NAMESPACE || 'dexorder-agents',
|
||||
namespace: configData.kubernetes?.namespace || process.env.KUBERNETES_NAMESPACE || 'dexorder-sandboxes',
|
||||
inCluster: configData.kubernetes?.in_cluster ?? (process.env.KUBERNETES_IN_CLUSTER === 'true'),
|
||||
context: configData.kubernetes?.context || process.env.KUBERNETES_CONTEXT,
|
||||
agentImage: configData.kubernetes?.agent_image || process.env.AGENT_IMAGE || 'ghcr.io/dexorder/agent:latest',
|
||||
sandboxImage: configData.kubernetes?.sandbox_image || process.env.SANDBOX_IMAGE || 'ghcr.io/dexorder/sandbox:latest',
|
||||
sidecarImage: configData.kubernetes?.sidecar_image || process.env.SIDECAR_IMAGE || 'ghcr.io/dexorder/lifecycle-sidecar:latest',
|
||||
storageClass: configData.kubernetes?.storage_class || process.env.AGENT_STORAGE_CLASS || 'standard',
|
||||
storageClass: configData.kubernetes?.storage_class || process.env.SANDBOX_STORAGE_CLASS || 'standard',
|
||||
imagePullPolicy: configData.kubernetes?.image_pull_policy || process.env.IMAGE_PULL_POLICY || 'Always',
|
||||
},
|
||||
};
|
||||
@@ -261,11 +268,25 @@ const qdrantClient = new QdrantClient(config.qdrant, app.log);
|
||||
// Initialize Iceberg client (for durable storage)
|
||||
// const icebergClient = new IcebergClient(config.iceberg, app.log);
|
||||
|
||||
// Create metadata update callback that will be wired up when SymbolIndexService initializes
|
||||
// This ensures we don't miss notifications sent before the service is ready
|
||||
let symbolIndexService: SymbolIndexService | undefined;
|
||||
const onMetadataUpdate = async () => {
|
||||
if (symbolIndexService) {
|
||||
app.log.info('Reloading symbol metadata from Iceberg');
|
||||
await symbolIndexService.initialize();
|
||||
app.log.info({ stats: symbolIndexService.getStats() }, 'Symbol metadata reloaded');
|
||||
} else {
|
||||
app.log.warn('Received METADATA_UPDATE before SymbolIndexService initialized, ignoring');
|
||||
}
|
||||
};
|
||||
|
||||
// Initialize ZMQ Relay client (for historical data)
|
||||
// Note: onMetadataUpdate callback will be set after symbolIndexService is initialized
|
||||
// Pass onMetadataUpdate callback so it's registered before connection
|
||||
const zmqRelayClient = new ZMQRelayClient({
|
||||
relayRequestEndpoint: config.relay.requestEndpoint,
|
||||
relayNotificationEndpoint: config.relay.notificationEndpoint,
|
||||
onMetadataUpdate,
|
||||
}, app.log);
|
||||
|
||||
app.log.info({
|
||||
@@ -286,7 +307,7 @@ const k8sClient = new KubernetesClient({
|
||||
|
||||
const containerManager = new ContainerManager({
|
||||
k8sClient,
|
||||
agentImage: config.kubernetes.agentImage,
|
||||
sandboxImage: config.kubernetes.sandboxImage,
|
||||
sidecarImage: config.kubernetes.sidecarImage,
|
||||
storageClass: config.kubernetes.storageClass,
|
||||
imagePullPolicy: config.kubernetes.imagePullPolicy,
|
||||
@@ -326,10 +347,13 @@ const eventRouter = new EventRouter({
|
||||
});
|
||||
app.log.debug('Event router initialized');
|
||||
|
||||
// Initialize shared Iceberg client (used by both OHLC service and conversation store)
|
||||
const icebergClient = new IcebergClient(config.iceberg, app.log);
|
||||
app.log.debug('Iceberg client initialized');
|
||||
|
||||
// Initialize OHLC service (optional - only if relay is available)
|
||||
let ohlcService: OHLCService | undefined;
|
||||
try {
|
||||
const icebergClient = new IcebergClient(config.iceberg, app.log);
|
||||
ohlcService = new OHLCService({
|
||||
icebergClient,
|
||||
relayClient: zmqRelayClient,
|
||||
@@ -340,16 +364,30 @@ try {
|
||||
app.log.warn({ error }, 'Failed to initialize OHLC service - historical data will not be available');
|
||||
}
|
||||
|
||||
// Initialize Symbol Index Service (deferred to after server starts)
|
||||
let symbolIndexService: SymbolIndexService | undefined;
|
||||
// Initialize conversation store (Redis hot path + Iceberg cold path)
|
||||
const conversationStore = new ConversationStore(redis, app.log, icebergClient);
|
||||
app.log.debug('Conversation store initialized');
|
||||
|
||||
// Harness factory: captures infrastructure deps; channel handlers stay infrastructure-free
|
||||
function createHarness(sessionConfig: HarnessSessionConfig): AgentHarness {
|
||||
return new AgentHarness({
|
||||
...sessionConfig,
|
||||
providerConfig: config.providerConfig,
|
||||
conversationStore,
|
||||
historyLimit: config.conversationHistoryLimit,
|
||||
});
|
||||
}
|
||||
|
||||
// Symbol Index Service will be initialized after server starts
|
||||
// (declared above near ZMQ client initialization)
|
||||
|
||||
// Initialize channel handlers
|
||||
const websocketHandler = new WebSocketHandler({
|
||||
authenticator,
|
||||
containerManager,
|
||||
providerConfig: config.providerConfig,
|
||||
sessionRegistry,
|
||||
eventSubscriber,
|
||||
createHarness,
|
||||
ohlcService, // Optional
|
||||
symbolIndexService, // Optional
|
||||
});
|
||||
@@ -357,8 +395,8 @@ app.log.debug('WebSocket handler initialized');
|
||||
|
||||
const telegramHandler = new TelegramHandler({
|
||||
authenticator,
|
||||
providerConfig: config.providerConfig,
|
||||
telegramBotToken: config.telegramBotToken,
|
||||
createHarness,
|
||||
});
|
||||
app.log.debug('Telegram handler initialized');
|
||||
|
||||
@@ -477,6 +515,10 @@ app.get('/admin/knowledge-stats', async (_request, reply) => {
|
||||
const shutdown = async () => {
|
||||
app.log.info('Shutting down gracefully...');
|
||||
try {
|
||||
// Flush all active sessions to Iceberg before shutdown
|
||||
await websocketHandler.endAllSessions();
|
||||
await telegramHandler.endAllSessions();
|
||||
|
||||
// Stop event system first
|
||||
await eventSubscriber.stop();
|
||||
await eventRouter.stop();
|
||||
@@ -529,6 +571,53 @@ try {
|
||||
app.log.warn({ error }, 'Qdrant initialization failed - RAG will not be available');
|
||||
}
|
||||
|
||||
// Initialize tool registry
|
||||
app.log.debug('Initializing tool registry...');
|
||||
try {
|
||||
const toolRegistry = initializeToolRegistry(app.log, {
|
||||
// Use getter functions to support lazy initialization
|
||||
ohlcService: () => ohlcService,
|
||||
symbolIndexService: () => symbolIndexService,
|
||||
workspaceManager: undefined, // Will be set per-session
|
||||
});
|
||||
|
||||
// Register agent tool configurations
|
||||
// Main agent: platform tools + user's general MCP tools
|
||||
toolRegistry.registerAgentTools({
|
||||
agentName: 'main',
|
||||
platformTools: ['symbol_lookup', 'get_chart_data'],
|
||||
mcpTools: [], // No MCP tools for main agent by default (can be extended later)
|
||||
});
|
||||
|
||||
// Research subagent: only MCP tools for script creation/execution
|
||||
toolRegistry.registerAgentTools({
|
||||
agentName: 'research',
|
||||
platformTools: [], // No platform tools (works at script level)
|
||||
mcpTools: ['category_*', 'execute_research'],
|
||||
});
|
||||
|
||||
// Code reviewer subagent: no tools by default
|
||||
toolRegistry.registerAgentTools({
|
||||
agentName: 'code-reviewer',
|
||||
platformTools: [],
|
||||
mcpTools: [],
|
||||
});
|
||||
|
||||
app.log.info(
|
||||
{
|
||||
agents: toolRegistry.getRegisteredAgents(),
|
||||
configs: toolRegistry.getRegisteredAgents().map(name => ({
|
||||
name,
|
||||
config: toolRegistry.getAgentToolConfig(name),
|
||||
})),
|
||||
},
|
||||
'Tool registry initialized'
|
||||
);
|
||||
} catch (error) {
|
||||
app.log.error({ error }, 'Failed to initialize tool registry');
|
||||
// Non-fatal - continue without tools
|
||||
}
|
||||
|
||||
// Initialize RAG system and load global knowledge
|
||||
app.log.debug('Initializing RAG system...');
|
||||
try {
|
||||
@@ -586,6 +675,7 @@ try {
|
||||
|
||||
// Initialize Symbol Index Service (after server is running)
|
||||
// This is done asynchronously to not block server startup
|
||||
// The onMetadataUpdate callback is already registered with zmqRelayClient
|
||||
(async () => {
|
||||
try {
|
||||
const icebergClient = new IcebergClient(config.iceberg, app.log);
|
||||
@@ -594,18 +684,13 @@ try {
|
||||
logger: app.log,
|
||||
});
|
||||
await indexService.initialize();
|
||||
|
||||
// Assign to module-level variable so onMetadataUpdate callback can use it
|
||||
symbolIndexService = indexService;
|
||||
|
||||
// Update websocket handler's config so it can use the service
|
||||
(websocketHandler as any).config.symbolIndexService = indexService;
|
||||
|
||||
// Configure ZMQ relay to reload symbol metadata on updates
|
||||
(zmqRelayClient as any).config.onMetadataUpdate = async () => {
|
||||
app.log.info('Reloading symbol metadata from Iceberg');
|
||||
await indexService.initialize();
|
||||
app.log.info({ stats: indexService.getStats() }, 'Symbol metadata reloaded');
|
||||
};
|
||||
|
||||
app.log.info({ stats: symbolIndexService.getStats() }, 'Symbol index service initialized');
|
||||
} catch (error) {
|
||||
app.log.warn({ error }, 'Failed to initialize symbol index service - symbol search will not be available');
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* OHLC Service - High-level API for historical market data
|
||||
*
|
||||
* Workflow (mirroring client-py/dexorder/ohlc_client.py):
|
||||
* Workflow (mirroring sandbox/dexorder/ohlc_client.py):
|
||||
* 1. Check Iceberg for existing data
|
||||
* 2. Identify missing ranges
|
||||
* 3. If complete, return immediately
|
||||
|
||||
11
gateway/src/tools/index.ts
Normal file
11
gateway/src/tools/index.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
// Tools exports
|
||||
|
||||
export * from './platform/index.js';
|
||||
export * from './mcp/index.js';
|
||||
export {
|
||||
ToolRegistry,
|
||||
initializeToolRegistry,
|
||||
getToolRegistry,
|
||||
type AgentToolConfig,
|
||||
type PlatformServices,
|
||||
} from './tool-registry.js';
|
||||
7
gateway/src/tools/mcp/index.ts
Normal file
7
gateway/src/tools/mcp/index.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
// MCP tool wrappers exports
|
||||
|
||||
export {
|
||||
createMCPToolWrapper,
|
||||
createMCPToolWrappers,
|
||||
type MCPToolInfo,
|
||||
} from './mcp-tool-wrapper.js';
|
||||
186
gateway/src/tools/mcp/mcp-tool-wrapper.ts
Normal file
186
gateway/src/tools/mcp/mcp-tool-wrapper.ts
Normal file
@@ -0,0 +1,186 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { MCPClientConnector } from '../../harness/mcp-client.js';
|
||||
|
||||
/**
|
||||
* MCP Tool Wrapper
|
||||
*
|
||||
* Wraps remote MCP server tools as standard LangChain tools.
|
||||
* Provides dynamic tool creation based on MCP tool definitions.
|
||||
*/
|
||||
|
||||
export interface MCPToolInfo {
|
||||
name: string;
|
||||
description?: string;
|
||||
inputSchema?: {
|
||||
type: string;
|
||||
properties?: Record<string, any>;
|
||||
required?: string[];
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a LangChain tool from an MCP tool definition
|
||||
*/
|
||||
export function createMCPToolWrapper(
|
||||
toolInfo: MCPToolInfo,
|
||||
mcpClient: MCPClientConnector,
|
||||
logger: FastifyBaseLogger,
|
||||
onImage?: (image: { data: string; mimeType: string }) => void
|
||||
): DynamicStructuredTool {
|
||||
// Convert MCP input schema to Zod schema
|
||||
const zodSchema = mcpInputSchemaToZod(toolInfo.inputSchema);
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: toolInfo.name,
|
||||
description: toolInfo.description || `MCP tool: ${toolInfo.name}`,
|
||||
schema: zodSchema,
|
||||
func: async (input: Record<string, unknown>) => {
|
||||
try {
|
||||
const result = await mcpClient.callTool(toolInfo.name, input);
|
||||
|
||||
logger.info({ tool: toolInfo.name }, 'MCP tool call completed');
|
||||
|
||||
// Handle different MCP result formats
|
||||
if (typeof result === 'string') {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Handle structured MCP responses with content arrays
|
||||
if (result && typeof result === 'object') {
|
||||
// Extract text content from MCP response
|
||||
const textParts: string[] = [];
|
||||
|
||||
// Check for content array (standard MCP format)
|
||||
if (Array.isArray((result as any).content)) {
|
||||
logger.debug({ tool: toolInfo.name, itemCount: (result as any).content.length }, 'Processing MCP content array');
|
||||
for (const item of (result as any).content) {
|
||||
if (item.type === 'text' && item.text) {
|
||||
textParts.push(item.text);
|
||||
} else if (item.type === 'image' && item.data && item.mimeType) {
|
||||
logger.info({ tool: toolInfo.name, mimeType: item.mimeType }, 'Capturing image from MCP response');
|
||||
onImage?.({ data: item.data, mimeType: item.mimeType });
|
||||
}
|
||||
}
|
||||
if (textParts.length > 0) {
|
||||
return textParts.join('\n\n');
|
||||
}
|
||||
}
|
||||
|
||||
// Check for nested execution.content
|
||||
if ((result as any).execution && Array.isArray((result as any).execution.content)) {
|
||||
for (const item of (result as any).execution.content) {
|
||||
if (item.type === 'text' && item.text) {
|
||||
textParts.push(item.text);
|
||||
} else if (item.type === 'image' && item.data && item.mimeType) {
|
||||
onImage?.({ data: item.data, mimeType: item.mimeType });
|
||||
}
|
||||
}
|
||||
if (textParts.length > 0) {
|
||||
return textParts.join('\n\n');
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: stringify the result
|
||||
return JSON.stringify(result, null, 2);
|
||||
}
|
||||
|
||||
return String(result || '');
|
||||
} catch (error) {
|
||||
logger.error({ error, tool: toolInfo.name, input }, 'MCP tool call failed');
|
||||
return `Error calling MCP tool ${toolInfo.name}: ${error instanceof Error ? error.message : String(error)}`;
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert MCP input schema to Zod schema
|
||||
*/
|
||||
function mcpInputSchemaToZod(inputSchema?: MCPToolInfo['inputSchema']): z.ZodObject<any> {
|
||||
if (!inputSchema || !inputSchema.properties) {
|
||||
// Generic schema that accepts any properties
|
||||
return z.object({}).passthrough();
|
||||
}
|
||||
|
||||
const properties = inputSchema.properties;
|
||||
const required = inputSchema.required || [];
|
||||
|
||||
const zodFields: Record<string, z.ZodTypeAny> = {};
|
||||
|
||||
for (const [key, prop] of Object.entries(properties)) {
|
||||
let zodType: z.ZodTypeAny;
|
||||
|
||||
// Map JSON Schema types to Zod types
|
||||
switch (prop.type) {
|
||||
case 'string':
|
||||
zodType = z.string().describe(prop.description || '');
|
||||
break;
|
||||
case 'number':
|
||||
zodType = z.number().describe(prop.description || '');
|
||||
break;
|
||||
case 'integer':
|
||||
zodType = z.number().int().describe(prop.description || '');
|
||||
break;
|
||||
case 'boolean':
|
||||
zodType = z.boolean().describe(prop.description || '');
|
||||
break;
|
||||
case 'array':
|
||||
// Handle array items
|
||||
if (prop.items) {
|
||||
const itemType = getZodTypeForProperty(prop.items);
|
||||
zodType = z.array(itemType).describe(prop.description || '');
|
||||
} else {
|
||||
zodType = z.array(z.any()).describe(prop.description || '');
|
||||
}
|
||||
break;
|
||||
case 'object':
|
||||
zodType = z.object({}).passthrough().describe(prop.description || '');
|
||||
break;
|
||||
default:
|
||||
zodType = z.any().describe(prop.description || '');
|
||||
}
|
||||
|
||||
// Make optional if not required
|
||||
if (!required.includes(key)) {
|
||||
zodType = zodType.optional();
|
||||
}
|
||||
|
||||
zodFields[key] = zodType;
|
||||
}
|
||||
|
||||
return z.object(zodFields);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to get Zod type for a property definition
|
||||
*/
|
||||
function getZodTypeForProperty(prop: any): z.ZodTypeAny {
|
||||
switch (prop.type) {
|
||||
case 'string':
|
||||
return z.string();
|
||||
case 'number':
|
||||
return z.number();
|
||||
case 'integer':
|
||||
return z.number().int();
|
||||
case 'boolean':
|
||||
return z.boolean();
|
||||
case 'object':
|
||||
return z.object({}).passthrough();
|
||||
default:
|
||||
return z.any();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create multiple MCP tool wrappers from tool list
|
||||
*/
|
||||
export function createMCPToolWrappers(
|
||||
toolInfos: MCPToolInfo[],
|
||||
mcpClient: MCPClientConnector,
|
||||
logger: FastifyBaseLogger,
|
||||
onImage?: (image: { data: string; mimeType: string }) => void
|
||||
): DynamicStructuredTool[] {
|
||||
return toolInfos.map(toolInfo => createMCPToolWrapper(toolInfo, mcpClient, logger, onImage));
|
||||
}
|
||||
253
gateway/src/tools/platform/get-chart-data.tool.ts
Normal file
253
gateway/src/tools/platform/get-chart-data.tool.ts
Normal file
@@ -0,0 +1,253 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { OHLCService } from '../../services/ohlc-service.js';
|
||||
import type { WorkspaceManager } from '../../workspace/workspace-manager.js';
|
||||
import type { ChartState } from '../../workspace/types.js';
|
||||
import * as chrono from 'chrono-node';
|
||||
|
||||
/**
|
||||
* Get Chart Data Tool
|
||||
*
|
||||
* Standard LangChain tool for fetching OHLCV+ data with workspace defaults.
|
||||
* Allows agent to override any parameter for historical or alternative ticker queries.
|
||||
*/
|
||||
|
||||
export interface GetChartDataToolConfig {
|
||||
ohlcService: OHLCService;
|
||||
workspaceManager: WorkspaceManager;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
export function createGetChartDataTool(config: GetChartDataToolConfig): DynamicStructuredTool {
|
||||
const { ohlcService, workspaceManager, logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'get_chart_data',
|
||||
description: `Fetch OHLCV+ data for current chart or any ticker/timeframe. All parameters are optional and default to workspace chart state.
|
||||
|
||||
**IMPORTANT: Use this tool ONLY for quick, casual data viewing. For any analysis, plotting, statistics, or deep research, use the 'research' tool instead.**
|
||||
|
||||
Parameters:
|
||||
- ticker (optional): Market symbol (defaults to workspace chartState.symbol)
|
||||
- period (optional): OHLC period in seconds (defaults to workspace chartState.period)
|
||||
- from_time (optional): Start time as Unix timestamp (number or string like "1774126800") OR date string like "2 days ago", "2024-01-01" (defaults to workspace chartState.start_time)
|
||||
- to_time (optional): End time as Unix timestamp (number or string like "1774732500") OR date string like "now", "yesterday" (defaults to workspace chartState.end_time)
|
||||
- countback (optional): Limit number of bars returned
|
||||
- columns (optional): Extra columns beyond OHLC: ["volume", "buy_vol", "sell_vol", "open_time", "high_time", "low_time", "close_time", "open_interest"]`,
|
||||
schema: z.object({
|
||||
ticker: z.string().optional().describe('Market symbol (defaults to workspace chartState.symbol)'),
|
||||
period: z.number().optional().describe('OHLC period in seconds (defaults to workspace chartState.period)'),
|
||||
from_time: z.union([z.number(), z.string()]).optional().describe('Start time: Unix seconds OR date string (defaults to workspace chartState.start_time)'),
|
||||
to_time: z.union([z.number(), z.string()]).optional().describe('End time: Unix seconds OR date string (defaults to workspace chartState.end_time)'),
|
||||
countback: z.number().optional().describe('Limit number of bars returned'),
|
||||
columns: z.array(z.enum(['volume', 'buy_vol', 'sell_vol', 'open_time', 'high_time', 'low_time', 'close_time', 'open_interest'])).optional().describe('Extra columns beyond OHLC'),
|
||||
}),
|
||||
func: async ({ ticker, period, from_time, to_time, countback, columns }) => {
|
||||
logger.debug({ ticker, period, from_time, to_time, countback, columns }, 'Executing get_chart_data tool');
|
||||
|
||||
try {
|
||||
// Get workspace chart state
|
||||
const chartState = await getChartState(workspaceManager, logger);
|
||||
|
||||
// Build request with workspace defaults
|
||||
const finalTicker = ticker ?? chartState.symbol;
|
||||
const finalPeriod = period ?? parsePeriod(chartState.period);
|
||||
const finalFromTime = await parseTime(from_time, chartState.start_time, logger);
|
||||
const finalToTime = await parseTime(to_time, chartState.end_time, logger);
|
||||
const requestedColumns = columns ?? [];
|
||||
|
||||
// Validate we have all required parameters
|
||||
if (!finalTicker) {
|
||||
return JSON.stringify({ error: 'Ticker not specified and not available in workspace' });
|
||||
}
|
||||
if (!finalPeriod) {
|
||||
return JSON.stringify({ error: 'Period not specified and not available in workspace' });
|
||||
}
|
||||
if (!finalFromTime) {
|
||||
return JSON.stringify({ error: 'from_time not specified and not available in workspace' });
|
||||
}
|
||||
if (!finalToTime) {
|
||||
return JSON.stringify({ error: 'to_time not specified and not available in workspace' });
|
||||
}
|
||||
|
||||
logger.debug({
|
||||
ticker: finalTicker,
|
||||
period: finalPeriod,
|
||||
from_time: finalFromTime,
|
||||
to_time: finalToTime,
|
||||
countback,
|
||||
columns: requestedColumns,
|
||||
}, 'Fetching OHLC data');
|
||||
|
||||
// Fetch data from OHLCService
|
||||
const historyResult = await ohlcService.fetchOHLC(
|
||||
finalTicker,
|
||||
finalPeriod.toString(),
|
||||
finalFromTime,
|
||||
finalToTime,
|
||||
countback
|
||||
);
|
||||
|
||||
if (historyResult.noData || !historyResult.bars || historyResult.bars.length === 0) {
|
||||
return JSON.stringify({
|
||||
ticker: finalTicker,
|
||||
period: finalPeriod,
|
||||
timeRange: { start: finalFromTime, end: finalToTime },
|
||||
bars: [],
|
||||
});
|
||||
}
|
||||
|
||||
// Filter/format bars with requested columns
|
||||
const bars = historyResult.bars.map(bar => {
|
||||
const result: any = {
|
||||
time: bar.time,
|
||||
open: bar.open,
|
||||
high: bar.high,
|
||||
low: bar.low,
|
||||
close: bar.close,
|
||||
ticker: finalTicker,
|
||||
};
|
||||
|
||||
// Add optional columns if requested
|
||||
for (const col of requestedColumns) {
|
||||
if (col === 'volume' && bar.volume !== undefined) {
|
||||
result.volume = bar.volume;
|
||||
} else if (col === 'buy_vol' && bar.buy_vol !== undefined) {
|
||||
result.buy_vol = bar.buy_vol;
|
||||
} else if (col === 'sell_vol' && bar.sell_vol !== undefined) {
|
||||
result.sell_vol = bar.sell_vol;
|
||||
} else if (col === 'open_time' && bar.open_time !== undefined) {
|
||||
result.open_time = bar.open_time;
|
||||
} else if (col === 'high_time' && bar.high_time !== undefined) {
|
||||
result.high_time = bar.high_time;
|
||||
} else if (col === 'low_time' && bar.low_time !== undefined) {
|
||||
result.low_time = bar.low_time;
|
||||
} else if (col === 'close_time' && bar.close_time !== undefined) {
|
||||
result.close_time = bar.close_time;
|
||||
} else if (col === 'open_interest' && bar.open_interest !== undefined) {
|
||||
result.open_interest = bar.open_interest;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
});
|
||||
|
||||
logger.info({ ticker: finalTicker, barCount: bars.length }, 'Chart data fetched successfully');
|
||||
|
||||
return JSON.stringify({
|
||||
ticker: finalTicker,
|
||||
period: finalPeriod,
|
||||
timeRange: {
|
||||
start: finalFromTime,
|
||||
end: finalToTime,
|
||||
},
|
||||
bars,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error({ error }, 'Get chart data tool failed');
|
||||
return JSON.stringify({
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get chart state from workspace
|
||||
*/
|
||||
async function getChartState(workspaceManager: WorkspaceManager, logger: FastifyBaseLogger): Promise<ChartState> {
|
||||
try {
|
||||
const chartState = workspaceManager.getState<ChartState>('chartState');
|
||||
|
||||
if (!chartState) {
|
||||
// Return default chart state
|
||||
return {
|
||||
symbol: 'BINANCE:BTC/USDT',
|
||||
start_time: null,
|
||||
end_time: null,
|
||||
period: '15',
|
||||
selected_shapes: [],
|
||||
};
|
||||
}
|
||||
|
||||
return chartState;
|
||||
} catch (error) {
|
||||
logger.error({ error }, 'Failed to get chart state from workspace');
|
||||
// Return default chart state
|
||||
return {
|
||||
symbol: 'BINANCE:BTC/USDT',
|
||||
start_time: null,
|
||||
end_time: null,
|
||||
period: '15',
|
||||
selected_shapes: [],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse period string to seconds
|
||||
* Handles period as either a number (already in seconds) or string (minutes)
|
||||
*/
|
||||
function parsePeriod(period: string | number | null): number | null {
|
||||
if (period === null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (typeof period === 'number') {
|
||||
return period;
|
||||
}
|
||||
|
||||
// Period in workspace is stored as string representing minutes
|
||||
// Convert to seconds
|
||||
const minutes = parseInt(period, 10);
|
||||
if (isNaN(minutes)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return minutes * 60;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse time parameter (Unix seconds, date string, or null)
|
||||
* Returns Unix timestamp in seconds
|
||||
*/
|
||||
async function parseTime(
|
||||
timeParam: number | string | null | undefined,
|
||||
workspaceDefault: number | null,
|
||||
logger: FastifyBaseLogger
|
||||
): Promise<number | null> {
|
||||
// Use workspace default if param not provided
|
||||
if (timeParam === undefined || timeParam === null) {
|
||||
return workspaceDefault;
|
||||
}
|
||||
|
||||
// If it's already a number, assume Unix seconds
|
||||
if (typeof timeParam === 'number') {
|
||||
return timeParam;
|
||||
}
|
||||
|
||||
// Try to parse string as numeric Unix timestamp first
|
||||
const numericTimestamp = parseInt(timeParam, 10);
|
||||
if (!isNaN(numericTimestamp) && numericTimestamp.toString() === timeParam) {
|
||||
// String is a valid integer - treat as Unix seconds
|
||||
logger.debug({ timeParam, parsedTimestamp: numericTimestamp }, 'Parsed string as Unix timestamp');
|
||||
return numericTimestamp;
|
||||
}
|
||||
|
||||
// Parse date string using chrono
|
||||
try {
|
||||
const parsed = chrono.parseDate(timeParam);
|
||||
if (!parsed) {
|
||||
logger.warn({ timeParam }, 'Failed to parse time string');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Convert to Unix seconds
|
||||
return Math.floor(parsed.getTime() / 1000);
|
||||
} catch (error) {
|
||||
logger.error({ error, timeParam }, 'Error parsing time string');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
11
gateway/src/tools/platform/index.ts
Normal file
11
gateway/src/tools/platform/index.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
// Platform tools exports
|
||||
|
||||
export {
|
||||
createSymbolLookupTool,
|
||||
type SymbolLookupToolConfig,
|
||||
} from './symbol-lookup.tool.js';
|
||||
|
||||
export {
|
||||
createGetChartDataTool,
|
||||
type GetChartDataToolConfig,
|
||||
} from './get-chart-data.tool.js';
|
||||
53
gateway/src/tools/platform/research-agent.tool.ts
Normal file
53
gateway/src/tools/platform/research-agent.tool.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { ResearchSubagent } from '../../harness/subagents/research/index.js';
|
||||
import type { SubagentContext } from '../../harness/subagents/base-subagent.js';
|
||||
|
||||
export interface ResearchAgentToolConfig {
|
||||
researchSubagent: ResearchSubagent;
|
||||
context: SubagentContext;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a LangChain tool that delegates to the research subagent.
|
||||
* This is the standard LangChain pattern for exposing a subagent as a tool
|
||||
* to a parent agent.
|
||||
*/
|
||||
export function createResearchAgentTool(config: ResearchAgentToolConfig): DynamicStructuredTool {
|
||||
const { researchSubagent, context, logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'research',
|
||||
description: `Delegate to the research subagent for data analysis, charting, statistics, and Python script execution.
|
||||
|
||||
Use this tool for:
|
||||
- Plotting charts with technical indicators (EMA, RSI, MACD, Bollinger Bands, etc.)
|
||||
- Statistical analysis of price data
|
||||
- Custom research scripts using the DataAPI and ChartingAPI
|
||||
- Any task requiring code execution or matplotlib charts
|
||||
|
||||
The research subagent will write and execute Python scripts, capture output and charts, and return results.`,
|
||||
schema: z.object({
|
||||
instruction: z.string().describe('The research task or analysis to perform. Be specific about what data, indicators, timeframes, and output you want.'),
|
||||
}),
|
||||
func: async ({ instruction }: { instruction: string }): Promise<string> => {
|
||||
logger.info({ instruction: instruction.substring(0, 100) }, 'Delegating to research subagent');
|
||||
|
||||
try {
|
||||
const result = await researchSubagent.executeWithImages(context, instruction);
|
||||
|
||||
// Return in the format that AgentHarness.processToolResult() knows how to handle
|
||||
// (extracts images and passes them to channelAdapter)
|
||||
return JSON.stringify({
|
||||
text: result.text,
|
||||
images: result.images,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error({ error, errorMessage: (error as Error)?.message }, 'Research subagent failed');
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
78
gateway/src/tools/platform/symbol-lookup.tool.ts
Normal file
78
gateway/src/tools/platform/symbol-lookup.tool.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { z } from 'zod';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { SymbolIndexService } from '../../services/symbol-index-service.js';
|
||||
|
||||
/**
|
||||
* Symbol Lookup Tool
|
||||
*
|
||||
* Standard LangChain tool for symbol search and resolution.
|
||||
* Supports two modes:
|
||||
* - search: Find symbols matching a query
|
||||
* - resolve: Get detailed metadata for a specific symbol
|
||||
*/
|
||||
|
||||
export interface SymbolLookupToolConfig {
|
||||
symbolIndexService: SymbolIndexService;
|
||||
logger: FastifyBaseLogger;
|
||||
}
|
||||
|
||||
export function createSymbolLookupTool(config: SymbolLookupToolConfig): DynamicStructuredTool {
|
||||
const { symbolIndexService, logger } = config;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: 'symbol_lookup',
|
||||
description: `Search for market symbols or resolve symbol metadata. Use 'search' mode to find symbols matching a query, or 'resolve' mode to get detailed metadata for a specific symbol.
|
||||
|
||||
Parameters:
|
||||
- mode (required): Either 'search' or 'resolve'
|
||||
- query (required): Search query (for search mode) or symbol ticker (for resolve mode)
|
||||
- limit (optional): Maximum number of search results (search mode only, default: 30)`,
|
||||
schema: z.object({
|
||||
mode: z.enum(['search', 'resolve']).describe('Operation mode: search for symbols or resolve a specific symbol'),
|
||||
query: z.string().describe('Search query (for search mode) or symbol ticker (for resolve mode)'),
|
||||
limit: z.number().optional().default(30).describe('Maximum number of search results (search mode only, default: 30)'),
|
||||
}),
|
||||
func: async ({ mode, query, limit }) => {
|
||||
logger.debug({ mode, query, limit }, 'Executing symbol_lookup tool');
|
||||
|
||||
try {
|
||||
if (mode === 'search') {
|
||||
const results = await symbolIndexService.search(query, limit);
|
||||
|
||||
logger.info({ query, resultCount: results.length }, 'Symbol search completed');
|
||||
|
||||
return JSON.stringify({
|
||||
mode: 'search',
|
||||
query,
|
||||
count: results.length,
|
||||
results,
|
||||
});
|
||||
} else {
|
||||
const symbolInfo = await symbolIndexService.resolveSymbol(query);
|
||||
|
||||
if (!symbolInfo) {
|
||||
logger.warn({ symbol: query }, 'Symbol not found');
|
||||
return JSON.stringify({
|
||||
error: `Symbol not found: ${query}`,
|
||||
symbol: query,
|
||||
});
|
||||
}
|
||||
|
||||
logger.info({ symbol: query }, 'Symbol resolved');
|
||||
|
||||
return JSON.stringify({
|
||||
mode: 'resolve',
|
||||
symbol: query,
|
||||
symbolInfo,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error({ error, mode, query }, 'Symbol lookup tool failed');
|
||||
return JSON.stringify({
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
291
gateway/src/tools/tool-registry.ts
Normal file
291
gateway/src/tools/tool-registry.ts
Normal file
@@ -0,0 +1,291 @@
|
||||
import type { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import type { FastifyBaseLogger } from 'fastify';
|
||||
import type { MCPClientConnector } from '../harness/mcp-client.js';
|
||||
import type { OHLCService } from '../services/ohlc-service.js';
|
||||
import type { SymbolIndexService } from '../services/symbol-index-service.js';
|
||||
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
|
||||
import { createSymbolLookupTool } from './platform/symbol-lookup.tool.js';
|
||||
import { createGetChartDataTool } from './platform/get-chart-data.tool.js';
|
||||
import { createMCPToolWrappers, type MCPToolInfo } from './mcp/mcp-tool-wrapper.js';
|
||||
|
||||
/**
|
||||
* Agent tool configuration
|
||||
* Specifies which tools are available to which agent
|
||||
*/
|
||||
export interface AgentToolConfig {
|
||||
/** Agent name (e.g., 'main', 'research', 'code-reviewer') */
|
||||
agentName: string;
|
||||
|
||||
/** Platform tool names to include */
|
||||
platformTools: string[];
|
||||
|
||||
/** MCP tool patterns/names to include (supports wildcards like 'category_*') */
|
||||
mcpTools: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Platform services required for creating platform tools
|
||||
* Can be provided as direct references or getter functions (for lazy initialization)
|
||||
*/
|
||||
export interface PlatformServices {
|
||||
ohlcService?: OHLCService | (() => OHLCService | undefined);
|
||||
symbolIndexService?: SymbolIndexService | (() => SymbolIndexService | undefined);
|
||||
workspaceManager?: WorkspaceManager | (() => WorkspaceManager | undefined);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool Registry
|
||||
*
|
||||
* Manages tool creation and agent-to-tool mappings.
|
||||
* Supports:
|
||||
* - Platform tools (local services like symbol lookup, chart data)
|
||||
* - Remote MCP tools (per-user, session-scoped)
|
||||
* - Configurable tool routing (which tools for which agents)
|
||||
*/
|
||||
export class ToolRegistry {
|
||||
private logger: FastifyBaseLogger;
|
||||
private platformServices: PlatformServices;
|
||||
private agentToolConfigs: Map<string, AgentToolConfig> = new Map();
|
||||
|
||||
constructor(logger: FastifyBaseLogger, platformServices: PlatformServices) {
|
||||
this.logger = logger;
|
||||
this.platformServices = platformServices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register agent tool configuration
|
||||
*/
|
||||
registerAgentTools(config: AgentToolConfig): void {
|
||||
this.agentToolConfigs.set(config.agentName, config);
|
||||
this.logger.debug(
|
||||
{
|
||||
agent: config.agentName,
|
||||
platformTools: config.platformTools,
|
||||
mcpTools: config.mcpTools,
|
||||
},
|
||||
'Registered agent tool configuration'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tools for a specific agent
|
||||
*
|
||||
* @param agentName - Name of the agent ('main', 'research', etc.)
|
||||
* @param mcpClient - MCP client for remote tools (optional)
|
||||
* @param availableMCPTools - List of available MCP tools from user's server (optional)
|
||||
* @param workspaceManager - Workspace manager for this session (optional, used by some platform tools)
|
||||
* @returns Array of tools for this agent
|
||||
*/
|
||||
async getToolsForAgent(
|
||||
agentName: string,
|
||||
mcpClient?: MCPClientConnector,
|
||||
availableMCPTools?: MCPToolInfo[],
|
||||
workspaceManager?: WorkspaceManager,
|
||||
onImage?: (image: { data: string; mimeType: string }) => void
|
||||
): Promise<DynamicStructuredTool[]> {
|
||||
const config = this.agentToolConfigs.get(agentName);
|
||||
|
||||
if (!config) {
|
||||
this.logger.warn({ agent: agentName }, 'No tool configuration found for agent');
|
||||
return [];
|
||||
}
|
||||
|
||||
const tools: DynamicStructuredTool[] = [];
|
||||
|
||||
// Add platform tools
|
||||
for (const toolName of config.platformTools) {
|
||||
const tool = await this.getPlatformTool(toolName, workspaceManager);
|
||||
if (tool) {
|
||||
tools.push(tool);
|
||||
} else {
|
||||
this.logger.warn({ agent: agentName, tool: toolName }, 'Platform tool not found');
|
||||
}
|
||||
}
|
||||
|
||||
// Add MCP tools (if MCP client and tools are available)
|
||||
if (mcpClient && availableMCPTools && availableMCPTools.length > 0) {
|
||||
const filteredMCPTools = this.filterMCPTools(availableMCPTools, config.mcpTools);
|
||||
const mcpToolInstances = createMCPToolWrappers(filteredMCPTools, mcpClient, this.logger, onImage);
|
||||
tools.push(...mcpToolInstances);
|
||||
|
||||
this.logger.debug(
|
||||
{
|
||||
agent: agentName,
|
||||
mcpToolCount: mcpToolInstances.length,
|
||||
mcpToolNames: mcpToolInstances.map(t => t.name),
|
||||
},
|
||||
'Added MCP tools for agent'
|
||||
);
|
||||
}
|
||||
|
||||
this.logger.info(
|
||||
{
|
||||
agent: agentName,
|
||||
toolCount: tools.length,
|
||||
toolNames: tools.map(t => t.name),
|
||||
},
|
||||
'Retrieved tools for agent'
|
||||
);
|
||||
|
||||
return tools;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a platform tool by name
|
||||
*
|
||||
* @param toolName - Name of the tool to create
|
||||
* @param sessionWorkspaceManager - Optional session-specific workspace manager
|
||||
*/
|
||||
private async getPlatformTool(
|
||||
toolName: string,
|
||||
sessionWorkspaceManager?: WorkspaceManager
|
||||
): Promise<DynamicStructuredTool | null> {
|
||||
// Don't cache tools - recreate each time to get latest services
|
||||
// (services might be initialized asynchronously after registry creation)
|
||||
|
||||
// Create tool based on name
|
||||
let tool: DynamicStructuredTool | null = null;
|
||||
|
||||
switch (toolName) {
|
||||
case 'symbol_lookup': {
|
||||
const symbolIndexService = this.resolveService(this.platformServices.symbolIndexService);
|
||||
if (symbolIndexService) {
|
||||
tool = createSymbolLookupTool({
|
||||
symbolIndexService,
|
||||
logger: this.logger,
|
||||
});
|
||||
} else {
|
||||
this.logger.warn('SymbolIndexService not available for symbol_lookup tool');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'get_chart_data': {
|
||||
const ohlcService = this.resolveService(this.platformServices.ohlcService);
|
||||
// Use session workspace manager if provided, otherwise try global
|
||||
const workspaceManager = sessionWorkspaceManager ||
|
||||
this.resolveService(this.platformServices.workspaceManager);
|
||||
if (ohlcService && workspaceManager) {
|
||||
tool = createGetChartDataTool({
|
||||
ohlcService,
|
||||
workspaceManager,
|
||||
logger: this.logger,
|
||||
});
|
||||
} else {
|
||||
this.logger.warn(
|
||||
{ hasOHLC: !!ohlcService, hasWorkspace: !!workspaceManager },
|
||||
'OHLCService or WorkspaceManager not available for get_chart_data tool'
|
||||
);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
this.logger.warn({ tool: toolName }, 'Unknown platform tool');
|
||||
return null;
|
||||
}
|
||||
|
||||
return tool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a service (handle both direct references and getter functions)
|
||||
*/
|
||||
private resolveService<T>(service: T | (() => T | undefined) | undefined): T | undefined {
|
||||
// Check if it's a function by checking the type more carefully
|
||||
if (service && typeof (service as any) === 'function' && !(service as any).prototype) {
|
||||
// It's a getter function (arrow function or function expression, not a class)
|
||||
return (service as () => T | undefined)();
|
||||
}
|
||||
return service as T | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter MCP tools based on patterns/names
|
||||
* Supports wildcards like 'category_*' or exact names like 'execute_research'
|
||||
*/
|
||||
private filterMCPTools(availableTools: MCPToolInfo[], patterns: string[]): MCPToolInfo[] {
|
||||
if (patterns.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return availableTools.filter(tool => {
|
||||
for (const pattern of patterns) {
|
||||
if (this.matchesPattern(tool.name, pattern)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool name matches a pattern
|
||||
* Supports wildcards: 'category_*' matches 'category_write', 'category_read', etc.
|
||||
*/
|
||||
private matchesPattern(toolName: string, pattern: string): boolean {
|
||||
if (pattern === toolName) {
|
||||
return true; // Exact match
|
||||
}
|
||||
|
||||
if (pattern.includes('*')) {
|
||||
// Convert wildcard pattern to regex
|
||||
const regexPattern = pattern
|
||||
.replace(/\*/g, '.*')
|
||||
.replace(/\?/g, '.');
|
||||
const regex = new RegExp(`^${regexPattern}$`);
|
||||
return regex.test(toolName);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered agent names
|
||||
*/
|
||||
getRegisteredAgents(): string[] {
|
||||
return Array.from(this.agentToolConfigs.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool configuration for an agent
|
||||
*/
|
||||
getAgentToolConfig(agentName: string): AgentToolConfig | null {
|
||||
return this.agentToolConfigs.get(agentName) || null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Global registry instance (initialized at gateway startup)
|
||||
*/
|
||||
let globalToolRegistry: ToolRegistry | null = null;
|
||||
|
||||
/**
|
||||
* Initialize the global tool registry
|
||||
*/
|
||||
export function initializeToolRegistry(
|
||||
logger: FastifyBaseLogger,
|
||||
platformServices: PlatformServices
|
||||
): ToolRegistry {
|
||||
if (globalToolRegistry) {
|
||||
logger.warn('Global tool registry already initialized');
|
||||
return globalToolRegistry;
|
||||
}
|
||||
|
||||
globalToolRegistry = new ToolRegistry(logger, platformServices);
|
||||
|
||||
logger.info('Tool registry initialized');
|
||||
|
||||
return globalToolRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the global tool registry
|
||||
*/
|
||||
export function getToolRegistry(): ToolRegistry {
|
||||
if (!globalToolRegistry) {
|
||||
throw new Error('Tool registry not initialized. Call initializeToolRegistry() first.');
|
||||
}
|
||||
|
||||
return globalToolRegistry;
|
||||
}
|
||||
@@ -16,7 +16,15 @@ export interface TradingViewBar {
|
||||
high: number;
|
||||
low: number;
|
||||
close: number;
|
||||
volume: number;
|
||||
volume?: number;
|
||||
// Optional extra columns from ohlc.proto
|
||||
buy_vol?: number;
|
||||
sell_vol?: number;
|
||||
open_time?: number;
|
||||
high_time?: number;
|
||||
low_time?: number;
|
||||
close_time?: number;
|
||||
open_interest?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -12,11 +12,31 @@ export const ModelPreferenceSchema = z.object({
|
||||
export type ModelPreference = z.infer<typeof ModelPreferenceSchema>;
|
||||
|
||||
/**
|
||||
* User license and feature authorization
|
||||
* Kubernetes resource allocations — stored per-user so they can be customized
|
||||
* beyond the standard tier defaults.
|
||||
*/
|
||||
export const UserLicenseSchema = z.object({
|
||||
userId: z.string(),
|
||||
email: z.string().email().optional(),
|
||||
export const K8sResourcesSchema = z.object({
|
||||
memoryRequest: z.string(), // e.g. "256Mi"
|
||||
memoryLimit: z.string(), // e.g. "512Mi"
|
||||
cpuRequest: z.string(), // e.g. "100m"
|
||||
cpuLimit: z.string(), // e.g. "500m"
|
||||
storage: z.string(), // e.g. "1Gi"
|
||||
tmpSizeLimit: z.string(), // e.g. "128Mi"
|
||||
enableIdleShutdown: z.boolean(),
|
||||
idleTimeoutMinutes: z.number(),
|
||||
});
|
||||
|
||||
export type K8sResources = z.infer<typeof K8sResourcesSchema>;
|
||||
|
||||
/**
|
||||
* The portable License dict — stored as a single JSONB blob per user in the DB,
|
||||
* passable over-the-wire to any service that needs to enforce or inspect
|
||||
* feature access, resource limits, or preferences.
|
||||
*
|
||||
* Standard tier templates define the defaults; per-user rows are copies that
|
||||
* can be customised independently without schema changes.
|
||||
*/
|
||||
export const LicenseSchema = z.object({
|
||||
licenseType: z.enum(['free', 'pro', 'enterprise']),
|
||||
features: z.object({
|
||||
maxIndicators: z.number(),
|
||||
@@ -32,8 +52,82 @@ export const UserLicenseSchema = z.object({
|
||||
maxTokensPerMessage: z.number(),
|
||||
rateLimitPerMinute: z.number(),
|
||||
}),
|
||||
mcpServerUrl: z.string(), // Allow any string including 'pending', URL validation happens later
|
||||
k8sResources: K8sResourcesSchema,
|
||||
preferredModel: ModelPreferenceSchema.optional(),
|
||||
});
|
||||
|
||||
export type License = z.infer<typeof LicenseSchema>;
|
||||
export type LicenseTier = License['licenseType'];
|
||||
|
||||
/**
|
||||
* Standard tier templates — single source of truth for default License values.
|
||||
* Used when creating new user accounts (copy the tier template into the user's
|
||||
* license row) and anywhere tier-specific defaults are needed.
|
||||
*/
|
||||
export const LICENSE_TIER_TEMPLATES: Record<LicenseTier, License> = {
|
||||
free: {
|
||||
licenseType: 'free',
|
||||
features: {
|
||||
maxIndicators: 5, maxStrategies: 3, maxBacktestDays: 30,
|
||||
realtimeData: false, customExecutors: false, apiAccess: false,
|
||||
},
|
||||
resourceLimits: {
|
||||
maxConcurrentSessions: 1, maxMessagesPerDay: 100,
|
||||
maxTokensPerMessage: 4096, rateLimitPerMinute: 10,
|
||||
},
|
||||
k8sResources: {
|
||||
memoryRequest: '256Mi', memoryLimit: '512Mi',
|
||||
cpuRequest: '100m', cpuLimit: '500m',
|
||||
storage: '1Gi', tmpSizeLimit: '128Mi',
|
||||
enableIdleShutdown: true, idleTimeoutMinutes: 15,
|
||||
},
|
||||
},
|
||||
pro: {
|
||||
licenseType: 'pro',
|
||||
features: {
|
||||
maxIndicators: 50, maxStrategies: 20, maxBacktestDays: 365,
|
||||
realtimeData: true, customExecutors: true, apiAccess: true,
|
||||
},
|
||||
resourceLimits: {
|
||||
maxConcurrentSessions: 5, maxMessagesPerDay: 1000,
|
||||
maxTokensPerMessage: 8192, rateLimitPerMinute: 60,
|
||||
},
|
||||
k8sResources: {
|
||||
memoryRequest: '512Mi', memoryLimit: '2Gi',
|
||||
cpuRequest: '250m', cpuLimit: '2000m',
|
||||
storage: '10Gi', tmpSizeLimit: '256Mi',
|
||||
enableIdleShutdown: true, idleTimeoutMinutes: 60,
|
||||
},
|
||||
},
|
||||
enterprise: {
|
||||
licenseType: 'enterprise',
|
||||
features: {
|
||||
maxIndicators: 999, maxStrategies: 999, maxBacktestDays: 3650,
|
||||
realtimeData: true, customExecutors: true, apiAccess: true,
|
||||
},
|
||||
resourceLimits: {
|
||||
maxConcurrentSessions: 20, maxMessagesPerDay: 10000,
|
||||
maxTokensPerMessage: 32768, rateLimitPerMinute: 300,
|
||||
},
|
||||
k8sResources: {
|
||||
memoryRequest: '1Gi', memoryLimit: '4Gi',
|
||||
cpuRequest: '500m', cpuLimit: '4000m',
|
||||
storage: '50Gi', tmpSizeLimit: '512Mi',
|
||||
enableIdleShutdown: false, idleTimeoutMinutes: 0,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* UserLicense — DB row envelope. Wraps the portable License dict with account
|
||||
* identity and metadata. Not intended to be sent over-the-wire directly;
|
||||
* use the nested `license` field for cross-service communication.
|
||||
*/
|
||||
export const UserLicenseSchema = z.object({
|
||||
userId: z.string(),
|
||||
email: z.string().email().optional(),
|
||||
license: LicenseSchema,
|
||||
mcpServerUrl: z.string(), // Allow any string including 'pending'; validated at use time
|
||||
expiresAt: z.union([z.date(), z.string(), z.null()]).optional().transform(val => {
|
||||
if (!val || val === null) return undefined;
|
||||
return val instanceof Date ? val : new Date(val);
|
||||
@@ -59,14 +153,17 @@ export enum ChannelType {
|
||||
}
|
||||
|
||||
/**
|
||||
* Authentication context per channel
|
||||
* Authentication context per channel session.
|
||||
* `license` is the portable License dict (not the full UserLicense row).
|
||||
* `mcpServerUrl` is the runtime container endpoint, resolved at auth time.
|
||||
*/
|
||||
export const AuthContextSchema = z.object({
|
||||
userId: z.string(),
|
||||
channelType: z.nativeEnum(ChannelType),
|
||||
channelUserId: z.string(), // Platform-specific ID (telegram_id, discord_id, etc)
|
||||
sessionId: z.string(),
|
||||
license: UserLicenseSchema,
|
||||
license: LicenseSchema,
|
||||
mcpServerUrl: z.string(),
|
||||
authenticatedAt: z.date(),
|
||||
});
|
||||
|
||||
|
||||
@@ -62,6 +62,8 @@ export type {
|
||||
StoreConfig,
|
||||
ChannelAdapter,
|
||||
ChannelCapabilities,
|
||||
ImageMessage,
|
||||
TextMessage,
|
||||
PathTrigger,
|
||||
PathTriggerHandler,
|
||||
PathTriggerContext,
|
||||
|
||||
@@ -131,6 +131,29 @@ export interface ChannelCapabilities {
|
||||
supportsTradingViewEmbed: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image message for channel adapters.
|
||||
* Contains base64-encoded image data from MCP tools.
|
||||
*/
|
||||
export interface ImageMessage {
|
||||
/** Base64-encoded image data */
|
||||
data: string;
|
||||
|
||||
/** MIME type (e.g., 'image/png', 'image/jpeg') */
|
||||
mimeType: string;
|
||||
|
||||
/** Optional caption/description */
|
||||
caption?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Text message for channel adapters.
|
||||
*/
|
||||
export interface TextMessage {
|
||||
/** Text content */
|
||||
text: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapter interface for communication channels.
|
||||
* Implemented by WebSocket handler, Telegram handler, etc.
|
||||
@@ -142,6 +165,18 @@ export interface ChannelAdapter {
|
||||
/** Send an incremental patch to the client */
|
||||
sendPatch(msg: PatchMessage): void;
|
||||
|
||||
/** Send a text message to the client */
|
||||
sendText(msg: TextMessage): void;
|
||||
|
||||
/** Send a streaming text chunk to the client */
|
||||
sendChunk(content: string): void;
|
||||
|
||||
/** Send an image to the client */
|
||||
sendImage(msg: ImageMessage): void;
|
||||
|
||||
/** Notify client that a tool call is being executed */
|
||||
sendToolCall?(toolName: string, label?: string): void;
|
||||
|
||||
/** Get channel capabilities */
|
||||
getCapabilities(): ChannelCapabilities;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user