sandbox connected and streaming

This commit is contained in:
2026-03-30 23:29:03 -04:00
parent c3a8fae132
commit 998f69fa1a
130 changed files with 7416 additions and 2123 deletions

View File

@@ -1,22 +1,56 @@
import type { BaseMessage } from '@langchain/core/messages';
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
import { HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify';
import type { UserLicense } from '../types/user.js';
import type { License } from '../types/user.js';
import { ChannelType } from '../types/user.js';
import type { ConversationStore } from './memory/conversation-store.js';
import type { InboundMessage, OutboundMessage } from '../types/messages.js';
import { MCPClientConnector } from './mcp-client.js';
import { CONTEXT_URIS, type ResourceContent } from '../types/resources.js';
import { LLMProviderFactory, type ProviderConfig } from '../llm/provider.js';
import { ModelRouter, RoutingStrategy } from '../llm/router.js';
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
import type { ChannelAdapter } from '../workspace/index.js';
import type { ResearchSubagent } from './subagents/research/index.js';
import type { DynamicStructuredTool } from '@langchain/core/tools';
import { getToolRegistry } from '../tools/tool-registry.js';
import type { MCPToolInfo } from '../tools/mcp/mcp-tool-wrapper.js';
import { createResearchAgentTool } from '../tools/platform/research-agent.tool.js';
import { createUserContext } from './memory/session-context.js';
import { readFile } from 'fs/promises';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
export interface AgentHarnessConfig {
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* Session-specific config provided by channel handlers.
* Contains only per-connection details — no infrastructure dependencies.
*/
export interface HarnessSessionConfig {
userId: string;
sessionId: string;
license: UserLicense;
providerConfig: ProviderConfig;
license: License;
mcpServerUrl: string;
logger: FastifyBaseLogger;
workspaceManager?: WorkspaceManager;
channelAdapter?: ChannelAdapter;
channelType?: ChannelType;
channelUserId?: string;
}
/**
* Factory function type for creating AgentHarness instances.
* Created in main.ts with infrastructure (storage, providerConfig) captured in closure.
* Channel handlers call this factory without knowing about Redis or Iceberg.
*/
export type HarnessFactory = (sessionConfig: HarnessSessionConfig) => AgentHarness;
export interface AgentHarnessConfig extends HarnessSessionConfig {
providerConfig: ProviderConfig;
conversationStore?: ConversationStore;
historyLimit: number;
researchSubagent?: ResearchSubagent;
}
/**
@@ -27,32 +61,59 @@ export interface AgentHarnessConfig {
* 1. Fetches context from user's MCP resources
* 2. Routes to appropriate LLM model
* 3. Calls LLM with embedded context
* 4. Routes tool calls to user's MCP or platform tools
* 4. Routes tool calls to platform tools or user's MCP tools
* 5. Saves messages back to user's MCP
*/
export class AgentHarness {
private static systemPromptTemplate: string | null = null;
private config: AgentHarnessConfig;
private modelFactory: LLMProviderFactory;
private modelRouter: ModelRouter;
private mcpClient: MCPClientConnector;
private workspaceManager?: WorkspaceManager;
private lastWorkspaceSeq: number = 0;
private channelAdapter?: ChannelAdapter;
private isFirstMessage: boolean = true;
private researchSubagent?: ResearchSubagent;
private availableMCPTools: MCPToolInfo[] = [];
private researchImageCapture: Array<{ data: string; mimeType: string }> = [];
private conversationStore?: ConversationStore;
constructor(config: AgentHarnessConfig) {
this.config = config;
this.workspaceManager = config.workspaceManager;
this.channelAdapter = config.channelAdapter;
this.researchSubagent = config.researchSubagent;
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
this.conversationStore = config.conversationStore;
this.mcpClient = new MCPClientConnector({
userId: config.userId,
mcpServerUrl: config.license.mcpServerUrl,
mcpServerUrl: config.mcpServerUrl,
logger: config.logger,
});
}
/**
* Load system prompt template from file (cached)
*/
private static async loadSystemPromptTemplate(): Promise<string> {
if (!AgentHarness.systemPromptTemplate) {
const templatePath = join(__dirname, 'prompts', 'system-prompt.md');
AgentHarness.systemPromptTemplate = await readFile(templatePath, 'utf-8');
}
return AgentHarness.systemPromptTemplate;
}
/**
* Set the channel adapter (can be called after construction)
*/
setChannelAdapter(adapter: ChannelAdapter): void {
this.channelAdapter = adapter;
}
/**
* Initialize harness and connect to user's MCP server
*/
@@ -64,6 +125,13 @@ export class AgentHarness {
try {
await this.mcpClient.connect();
// Discover available MCP tools from user's server
await this.discoverMCPTools();
// Initialize research subagent if not provided
await this.initializeResearchSubagent();
this.config.logger.info('Agent harness initialized');
} catch (error) {
this.config.logger.error({ error }, 'Failed to initialize agent harness');
@@ -71,46 +139,384 @@ export class AgentHarness {
}
}
/**
* Discover available MCP tools from user's server
*/
private async discoverMCPTools(): Promise<void> {
try {
this.config.logger.debug('Discovering MCP tools from user server');
// Call MCP client to list tools
const tools = await this.mcpClient.listTools();
// Convert to MCPToolInfo format
this.availableMCPTools = tools.map(tool => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema as any,
}));
this.config.logger.info(
{
toolCount: this.availableMCPTools.length,
toolNames: this.availableMCPTools.map(t => t.name),
},
'MCP tools discovered'
);
} catch (error) {
this.config.logger.warn(
{
error,
errorMessage: (error as Error)?.message,
errorName: (error as Error)?.name,
errorCode: (error as any)?.code,
},
'Failed to discover MCP tools - continuing without remote tools'
);
// Don't throw - MCP tools are optional, agent can still work with platform tools
this.availableMCPTools = [];
}
}
/**
* Initialize research subagent
*/
private async initializeResearchSubagent(): Promise<void> {
if (this.researchSubagent) {
this.config.logger.debug('Research subagent already provided');
return;
}
this.config.logger.debug('Creating research subagent for session');
try {
const { createResearchSubagent } = await import('./subagents/research/index.js');
// Create a model for the research subagent
const model = await this.modelRouter.route(
'research analysis', // dummy query
this.config.license,
RoutingStrategy.COMPLEXITY,
this.config.userId
);
// Get tools for research subagent from registry
// Images from MCP responses are captured via onImage and routed to the subagent
const toolRegistry = getToolRegistry();
const researchTools = await toolRegistry.getToolsForAgent(
'research',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager,
(img) => this.researchImageCapture.push(img)
);
// Path resolution: use the compiled output path
const researchSubagentPath = join(__dirname, 'subagents', 'research');
this.config.logger.debug({ researchSubagentPath }, 'Using research subagent path');
this.researchSubagent = await createResearchSubagent(
model,
this.config.logger,
researchSubagentPath,
this.mcpClient,
researchTools,
this.researchImageCapture
);
this.config.logger.info(
{
toolCount: researchTools.length,
toolNames: researchTools.map(t => t.name),
},
'Research subagent created successfully'
);
} catch (error) {
this.config.logger.error(
{ error, errorMessage: (error as Error).message, stack: (error as Error).stack },
'Failed to create research subagent'
);
// Don't throw - research subagent is optional
}
}
/**
* Execute model with tool calling loop
* Handles multi-turn tool calls until the model produces a final text response
*/
private async executeWithToolCalling(
model: any,
messages: BaseMessage[],
tools: DynamicStructuredTool[],
maxIterations: number = 2
): Promise<string> {
this.config.logger.info(
{ toolCount: tools.length, maxIterations },
'Starting tool calling loop'
);
const messagesCopy = [...messages];
let iterations = 0;
while (iterations < maxIterations) {
iterations++;
this.config.logger.info(
{
iteration: iterations,
messageCount: messagesCopy.length,
lastMessageType: messagesCopy[messagesCopy.length - 1]?.constructor.name,
},
'Tool calling loop iteration'
);
this.config.logger.debug('Streaming model response...');
let response: any = null;
try {
const stream = await model.stream(messagesCopy);
for await (const chunk of stream) {
if (typeof chunk.content === 'string' && chunk.content.length > 0) {
this.channelAdapter?.sendChunk(chunk.content);
} else if (Array.isArray(chunk.content)) {
for (const block of chunk.content) {
if (block.type === 'text' && block.text) {
this.channelAdapter?.sendChunk(block.text);
}
}
}
response = response ? response.concat(chunk) : chunk;
}
} catch (invokeError: any) {
this.config.logger.error(
{
error: invokeError,
errorMessage: invokeError?.message,
errorStack: invokeError?.stack,
iteration: iterations,
messageCount: messagesCopy.length,
},
'Model streaming failed in tool calling loop'
);
throw invokeError;
}
this.config.logger.info(
{
hasContent: !!response.content,
contentLength: typeof response.content === 'string' ? response.content.length : 0,
hasToolCalls: !!response.tool_calls,
toolCallCount: response.tool_calls?.length || 0,
},
'Model response received'
);
// Check if model wants to call tools
if (!response.tool_calls || response.tool_calls.length === 0) {
// No tool calls - return final response
let finalContent: string;
if (typeof response.content === 'string') {
finalContent = response.content;
} else if (Array.isArray(response.content)) {
finalContent = response.content
.filter((block: any) => block.type === 'text')
.map((block: any) => block.text || '')
.join('');
} else {
finalContent = JSON.stringify(response.content);
}
this.config.logger.info(
{ finalContentLength: finalContent.length, iterations },
'Tool calling loop complete - no more tool calls'
);
return finalContent;
}
this.config.logger.info(
{ toolCalls: response.tool_calls.map((tc: any) => tc.name) },
'Processing tool calls'
);
// Add assistant message with tool calls to history
messagesCopy.push(response);
// Execute each tool call
for (const toolCall of response.tool_calls) {
this.config.logger.info(
{ tool: toolCall.name, args: toolCall.args },
'Executing tool call'
);
const tool = tools.find(t => t.name === toolCall.name);
if (!tool) {
this.config.logger.warn({ tool: toolCall.name }, 'Tool not found');
messagesCopy.push(
new ToolMessage({
content: `Error: Tool '${toolCall.name}' not found`,
tool_call_id: toolCall.id,
})
);
continue;
}
try {
this.channelAdapter?.sendToolCall?.(toolCall.name, this.getToolLabel(toolCall.name));
const result = await tool.func(toolCall.args);
// Process result to extract images and send them via channel adapter
const processedResult = this.processToolResult(result, toolCall.name);
this.config.logger.debug(
{
tool: toolCall.name,
originalResultLength: result.length,
processedResultLength: processedResult.length,
},
'Tool result processed'
);
messagesCopy.push(
new ToolMessage({
content: processedResult,
tool_call_id: toolCall.id,
})
);
this.config.logger.info(
{ tool: toolCall.name, resultLength: processedResult.length },
'Tool execution completed'
);
} catch (error) {
this.config.logger.error(
{
error,
errorMessage: (error as Error)?.message,
errorStack: (error as Error)?.stack,
tool: toolCall.name,
args: toolCall.args,
},
'Tool execution failed'
);
messagesCopy.push(
new ToolMessage({
content: `Error: ${error}`,
tool_call_id: toolCall.id,
})
);
}
}
}
// Max iterations reached - return what we have
this.config.logger.warn('Max tool calling iterations reached');
return 'I apologize, but I encountered an issue processing your request. Please try rephrasing your question.';
}
/**
* Handle incoming message from user
*/
async handleMessage(message: InboundMessage): Promise<OutboundMessage> {
this.config.logger.info(
{ messageId: message.messageId, userId: message.userId },
{ messageId: message.messageId, userId: message.userId, content: message.content.substring(0, 100) },
'Processing user message'
);
try {
// 1. Fetch context resources from user's MCP server
this.config.logger.debug('Fetching context resources from MCP');
const contextResources = await this.fetchContextResources();
// 1. Build system prompt from template
this.config.logger.debug('Building system prompt');
const systemPrompt = await this.buildSystemPrompt();
this.config.logger.debug({ systemPromptLength: systemPrompt.length }, 'System prompt built');
// 2. Build system prompt from resources
const systemPrompt = this.buildSystemPrompt(contextResources);
// 2. Load recent conversation history
const channelKey = this.config.channelType ?? ChannelType.WEBSOCKET;
const storedMessages = this.conversationStore
? await this.conversationStore.getRecentMessages(
this.config.userId, this.config.sessionId, this.config.historyLimit, channelKey
)
: [];
const history = this.conversationStore
? this.conversationStore.toLangChainMessages(storedMessages)
: [];
this.config.logger.debug({ historyLength: history.length }, 'Conversation history loaded');
// 3. Build messages with conversation context from MCP
const messages = this.buildMessages(message, contextResources);
// 4. Route to appropriate model
// 4. Get the configured model
this.config.logger.debug('Routing to model');
const model = await this.modelRouter.route(
message.content,
this.config.license,
RoutingStrategy.COMPLEXITY
RoutingStrategy.COMPLEXITY,
this.config.userId
);
this.config.logger.info({ modelName: model.constructor.name }, 'Model selected');
// 5. Build LangChain messages
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages);
const langchainMessages = this.buildLangChainMessages(systemPrompt, history, message.content);
this.config.logger.debug({ messageCount: langchainMessages.length }, 'LangChain messages built');
// 6. Call LLM with streaming
this.config.logger.debug('Invoking LLM');
const response = await model.invoke(langchainMessages);
// 6. Get tools for main agent from registry
const toolRegistry = getToolRegistry();
const tools = await toolRegistry.getToolsForAgent(
'main',
this.mcpClient,
this.availableMCPTools,
this.workspaceManager // Pass session workspace manager
);
// 7. Extract text response (tool handling TODO)
const assistantMessage = response.content as string;
// Add research subagent as a tool if available
if (this.researchSubagent) {
const subagentContext = {
userContext: createUserContext({
userId: this.config.userId,
sessionId: this.config.sessionId,
license: this.config.license,
channelType: this.config.channelType ?? ChannelType.WEBSOCKET,
channelUserId: this.config.channelUserId ?? this.config.userId,
}),
};
// TODO: Save messages to Iceberg conversation table instead of MCP
// Should batch-insert periodically or on session end to avoid many small Parquet files
// await icebergConversationStore.appendMessages([...]);
tools.push(createResearchAgentTool({
researchSubagent: this.researchSubagent,
context: subagentContext,
logger: this.config.logger,
}));
}
this.config.logger.info(
{
toolCount: tools.length,
toolNames: tools.map(t => t.name),
},
'Tools loaded for main agent'
);
// 7. Bind tools to model
const modelWithTools = tools.length > 0 && model.bindTools ? model.bindTools(tools) : model;
if (tools.length > 0) {
this.config.logger.info(
{ modelType: modelWithTools.constructor.name, toolsBound: tools.length > 0 && !!model.bindTools },
'Model bound with tools'
);
}
// 8. Call LLM with tool calling loop
this.config.logger.info('Invoking LLM with tool support');
const assistantMessage = await this.executeWithToolCalling(modelWithTools, langchainMessages, tools);
this.config.logger.info(
{ responseLength: assistantMessage.length },
'LLM response received'
);
// Save user message and assistant response to conversation store
if (this.conversationStore) {
await this.conversationStore.saveMessage(
this.config.userId, this.config.sessionId, 'user', message.content, undefined, channelKey
);
await this.conversationStore.saveMessage(
this.config.userId, this.config.sessionId, 'assistant', assistantMessage, undefined, channelKey
);
}
// Mark first message as processed
if (this.isFirstMessage) {
@@ -129,214 +535,174 @@ export class AgentHarness {
}
}
/**
* Stream response from LLM
*/
async *streamMessage(message: InboundMessage): AsyncGenerator<string> {
try {
// Fetch context
const contextResources = await this.fetchContextResources();
const systemPrompt = this.buildSystemPrompt(contextResources);
const messages = this.buildMessages(message, contextResources);
// Route to model
const model = await this.modelRouter.route(
message.content,
this.config.license,
RoutingStrategy.COMPLEXITY
);
// Build messages
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages);
// Stream response
const stream = await model.stream(langchainMessages);
let fullResponse = '';
for await (const chunk of stream) {
const content = chunk.content as string;
fullResponse += content;
yield content;
}
// TODO: Save messages to Iceberg conversation table instead of MCP
// Should batch-insert periodically or on session end to avoid many small Parquet files
// await icebergConversationStore.appendMessages([
// { role: 'user', content: message.content, timestamp: message.timestamp },
// { role: 'assistant', content: fullResponse, timestamp: new Date() }
// ]);
// Mark first message as processed
if (this.isFirstMessage) {
this.isFirstMessage = false;
}
} catch (error) {
this.config.logger.error({ error }, 'Error streaming message');
throw error;
}
}
/**
* Fetch context resources from user's MCP server
*/
private async fetchContextResources(): Promise<ResourceContent[]> {
const contextUris = [
CONTEXT_URIS.USER_PROFILE,
CONTEXT_URIS.CONVERSATION_SUMMARY,
CONTEXT_URIS.WORKSPACE_STATE,
CONTEXT_URIS.SYSTEM_PROMPT,
];
const resources = await Promise.all(
contextUris.map(async (uri) => {
try {
return await this.mcpClient.readResource(uri);
} catch (error) {
this.config.logger.warn({ error, uri }, 'Failed to fetch resource, using empty');
return { uri, text: '' };
}
})
);
return resources;
}
/**
* Build messages array with context from resources
*/
private buildMessages(
currentMessage: InboundMessage,
contextResources: ResourceContent[]
): Array<{ role: string; content: string }> {
const conversationSummary = contextResources.find(
(r) => r.uri === CONTEXT_URIS.CONVERSATION_SUMMARY
);
const messages: Array<{ role: string; content: string }> = [];
// Add conversation context as a system-like user message
if (conversationSummary?.text) {
messages.push({
role: 'user',
content: `[Previous Conversation Context]\n${conversationSummary.text}`,
});
messages.push({
role: 'assistant',
content: 'I understand the context from our previous conversations.',
});
}
// Add workspace delta (for subsequent turns)
const workspaceDelta = this.buildWorkspaceDelta();
if (workspaceDelta) {
messages.push({
role: 'user',
content: workspaceDelta,
});
}
// Add current user message
messages.push({
role: 'user',
content: currentMessage.content,
});
return messages;
}
/**
* Convert to LangChain message format
*/
private buildLangChainMessages(
systemPrompt: string,
messages: Array<{ role: string; content: string }>
history: BaseMessage[],
currentUserMessage: string
): BaseMessage[] {
const langchainMessages: BaseMessage[] = [new SystemMessage(systemPrompt)];
for (const msg of messages) {
if (msg.role === 'user') {
langchainMessages.push(new HumanMessage(msg.content));
} else if (msg.role === 'assistant') {
langchainMessages.push(new AIMessage(msg.content));
}
}
return langchainMessages;
return [
new SystemMessage(systemPrompt),
...history,
new HumanMessage(currentUserMessage),
];
}
/**
* Build system prompt from platform base + user resources
* Build system prompt from template
*/
private buildSystemPrompt(contextResources: ResourceContent[]): string {
const userProfile = contextResources.find((r) => r.uri === CONTEXT_URIS.USER_PROFILE);
const customPrompt = contextResources.find((r) => r.uri === CONTEXT_URIS.SYSTEM_PROMPT);
const workspaceState = contextResources.find((r) => r.uri === CONTEXT_URIS.WORKSPACE_STATE);
// Base platform prompt
let prompt = `You are a helpful AI assistant for Dexorder, an AI-first trading platform.
You help users research markets, develop indicators and strategies, and analyze trading data.
User license: ${this.config.license.licenseType}
Available features: ${JSON.stringify(this.config.license.features, null, 2)}`;
// Add user profile context
if (userProfile?.text) {
prompt += `\n\n# User Profile\n${userProfile.text}`;
}
// Add workspace context from MCP resource (if available)
if (workspaceState?.text) {
prompt += `\n\n# Current Workspace (from MCP)\n${workspaceState.text}`;
}
private async buildSystemPrompt(): Promise<string> {
// Load template and populate with license info
const template = await AgentHarness.loadSystemPromptTemplate();
let prompt = template
.replace('{{licenseType}}', this.config.license.licenseType)
.replace('{{features}}', JSON.stringify(this.config.license.features, null, 2));
// Add full workspace state from WorkspaceManager (first message only)
if (this.isFirstMessage && this.workspaceManager) {
const workspaceJSON = this.workspaceManager.serializeState();
prompt += `\n\n# Workspace State (JSON)\n\`\`\`json\n${workspaceJSON}\n\`\`\``;
// Record current workspace sequence for delta tracking
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
}
// Add user's custom instructions (highest priority)
if (customPrompt?.text) {
prompt += `\n\n# User Instructions\n${customPrompt.text}`;
prompt += `\n\n# Current Workspace State\n\`\`\`json\n${workspaceJSON}\n\`\`\``;
}
return prompt;
}
/**
* Build workspace delta message for subsequent turns.
* Returns null if no changes since last message.
* Map tool names to user-friendly status labels.
*/
private buildWorkspaceDelta(): string | null {
if (!this.workspaceManager || this.isFirstMessage) {
return null;
}
const changes = this.workspaceManager.getChangesSince(this.lastWorkspaceSeq);
if (Object.keys(changes).length === 0) {
return null;
}
// Format changes as JSON
const deltaJSON = JSON.stringify(changes, null, 2);
// Update sequence marker
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
return `[Workspace Changes Since Last Turn]\n\`\`\`json\n${deltaJSON}\n\`\`\``;
private getToolLabel(toolName: string): string {
const labels: Record<string, string> = {
research_agent: 'Researching...',
get_chart_data: 'Fetching chart data...',
symbol_lookup: 'Looking up symbol...',
};
return labels[toolName] ?? `Running ${toolName}...`;
}
/**
* Process tool result to extract images and send via channel adapter.
* Returns text-only version for LLM context (no base64 image data).
*/
private processToolResult(result: string, toolName: string): string {
// Most tools return plain strings - only process JSON results
if (!result || typeof result !== 'string') {
return String(result || '');
}
// Try to parse as JSON
let parsedResult: any;
try {
parsedResult = JSON.parse(result);
} catch {
// Not JSON, return as-is
return result;
}
// Check if result has images array (from ResearchSubagent)
if (parsedResult && Array.isArray(parsedResult.images) && parsedResult.images.length > 0) {
this.config.logger.info(
{ tool: toolName, imageCount: parsedResult.images.length },
'Extracting images from tool result'
);
// Send each image via channel adapter
for (const image of parsedResult.images) {
if (image.data && image.mimeType) {
if (this.channelAdapter) {
this.config.logger.debug({ mimeType: image.mimeType }, 'Sending image to channel');
this.channelAdapter.sendImage({
data: image.data,
mimeType: image.mimeType,
caption: undefined,
});
} else {
this.config.logger.warn('No channel adapter set, cannot send image');
}
}
}
// Create text-only version for LLM
const textOnlyResult = {
...parsedResult,
images: undefined,
imageCount: parsedResult.images.length,
};
// Clean up undefined values
Object.keys(textOnlyResult).forEach(key => {
if (textOnlyResult[key] === undefined) {
delete textOnlyResult[key];
}
});
return JSON.stringify(textOnlyResult);
}
// Check for nested chart_images object
if (parsedResult && parsedResult.chart_images && typeof parsedResult.chart_images === 'object') {
this.config.logger.info(
{ tool: toolName, chartCount: Object.keys(parsedResult.chart_images).length },
'Extracting chart images from tool result'
);
// Send each chart image via channel adapter
for (const [chartId, chartData] of Object.entries(parsedResult.chart_images)) {
const chart = chartData as any;
if (chart.type === 'image' && chart.data) {
if (this.channelAdapter) {
this.config.logger.debug({ chartId }, 'Sending chart image to channel');
this.channelAdapter.sendImage({
data: chart.data,
mimeType: 'image/png',
caption: undefined,
});
} else {
this.config.logger.warn('No channel adapter set, cannot send chart image');
}
}
}
// Create text-only version for LLM
const textOnlyResult = {
...parsedResult,
chart_images: undefined,
chartCount: Object.keys(parsedResult.chart_images).length,
};
// Clean up undefined values
Object.keys(textOnlyResult).forEach(key => {
if (textOnlyResult[key] === undefined) {
delete textOnlyResult[key];
}
});
return JSON.stringify(textOnlyResult);
}
// No images found, return stringified result
return result;
}
/**
* Cleanup resources
* End the session: flush conversation to cold storage, then release resources.
* Called by channel handlers on disconnect, session expiry, or graceful shutdown.
*/
async cleanup(): Promise<void> {
this.config.logger.info('Cleaning up agent harness');
if (this.conversationStore) {
const channelKey = this.config.channelType ?? ChannelType.WEBSOCKET;
try {
await this.conversationStore.flushToIceberg(
this.config.userId, this.config.sessionId, this.config.historyLimit, channelKey
);
} catch (error) {
this.config.logger.error({ error }, 'Failed to flush conversation to Iceberg during cleanup');
}
}
await this.mcpClient.disconnect();
}
}