container lifecycle management

This commit is contained in:
2026-03-12 15:13:38 -04:00
parent e99ef5d2dd
commit b9cc397e05
61 changed files with 6880 additions and 31 deletions

View File

@@ -0,0 +1,306 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify';
import type { UserLicense } from '../types/user.js';
import type { InboundMessage, OutboundMessage } from '../types/messages.js';
import { MCPClientConnector } from './mcp-client.js';
import { CONTEXT_URIS, type ResourceContent } from '../types/resources.js';
import { LLMProviderFactory, type ProviderConfig } from '../llm/provider.js';
import { ModelRouter, RoutingStrategy } from '../llm/router.js';
export interface AgentHarnessConfig {
userId: string;
sessionId: string;
license: UserLicense;
providerConfig: ProviderConfig;
logger: FastifyBaseLogger;
}
/**
* Agent harness orchestrates between LLM and user's MCP server.
*
* This is a STATELESS orchestrator - all conversation history, RAG, and context
* lives in the user's MCP server container. The harness only:
* 1. Fetches context from user's MCP resources
* 2. Routes to appropriate LLM model
* 3. Calls LLM with embedded context
* 4. Routes tool calls to user's MCP or platform tools
* 5. Saves messages back to user's MCP
*/
export class AgentHarness {
private config: AgentHarnessConfig;
private modelFactory: LLMProviderFactory;
private modelRouter: ModelRouter;
private mcpClient: MCPClientConnector;
constructor(config: AgentHarnessConfig) {
this.config = config;
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
this.mcpClient = new MCPClientConnector({
userId: config.userId,
mcpServerUrl: config.license.mcpServerUrl,
logger: config.logger,
});
}
/**
* Initialize harness and connect to user's MCP server
*/
async initialize(): Promise<void> {
this.config.logger.info(
{ userId: this.config.userId, sessionId: this.config.sessionId },
'Initializing agent harness'
);
try {
await this.mcpClient.connect();
this.config.logger.info('Agent harness initialized');
} catch (error) {
this.config.logger.error({ error }, 'Failed to initialize agent harness');
throw error;
}
}
/**
* Handle incoming message from user
*/
async handleMessage(message: InboundMessage): Promise<OutboundMessage> {
this.config.logger.info(
{ messageId: message.messageId, userId: message.userId },
'Processing user message'
);
try {
// 1. Fetch context resources from user's MCP server
this.config.logger.debug('Fetching context resources from MCP');
const contextResources = await this.fetchContextResources();
// 2. Build system prompt from resources
const systemPrompt = this.buildSystemPrompt(contextResources);
// 3. Build messages with conversation context from MCP
const messages = this.buildMessages(message, contextResources);
// 4. Route to appropriate model
const model = await this.modelRouter.route(
message.content,
this.config.license,
RoutingStrategy.COMPLEXITY
);
// 5. Build LangChain messages
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages);
// 6. Call LLM with streaming
this.config.logger.debug('Invoking LLM');
const response = await model.invoke(langchainMessages);
// 7. Extract text response (tool handling TODO)
const assistantMessage = response.content as string;
// 8. Save messages to user's MCP server
this.config.logger.debug('Saving messages to MCP');
await this.mcpClient.callTool('save_message', {
role: 'user',
content: message.content,
timestamp: message.timestamp.toISOString(),
});
await this.mcpClient.callTool('save_message', {
role: 'assistant',
content: assistantMessage,
timestamp: new Date().toISOString(),
});
return {
messageId: `msg_${Date.now()}`,
sessionId: message.sessionId,
content: assistantMessage,
timestamp: new Date(),
};
} catch (error) {
this.config.logger.error({ error }, 'Error processing message');
throw error;
}
}
/**
* Stream response from LLM
*/
async *streamMessage(message: InboundMessage): AsyncGenerator<string> {
try {
// Fetch context
const contextResources = await this.fetchContextResources();
const systemPrompt = this.buildSystemPrompt(contextResources);
const messages = this.buildMessages(message, contextResources);
// Route to model
const model = await this.modelRouter.route(
message.content,
this.config.license,
RoutingStrategy.COMPLEXITY
);
// Build messages
const langchainMessages = this.buildLangChainMessages(systemPrompt, messages);
// Stream response
const stream = await model.stream(langchainMessages);
let fullResponse = '';
for await (const chunk of stream) {
const content = chunk.content as string;
fullResponse += content;
yield content;
}
// Save after streaming completes
await this.mcpClient.callTool('save_message', {
role: 'user',
content: message.content,
timestamp: message.timestamp.toISOString(),
});
await this.mcpClient.callTool('save_message', {
role: 'assistant',
content: fullResponse,
timestamp: new Date().toISOString(),
});
} catch (error) {
this.config.logger.error({ error }, 'Error streaming message');
throw error;
}
}
/**
* Fetch context resources from user's MCP server
*/
private async fetchContextResources(): Promise<ResourceContent[]> {
const contextUris = [
CONTEXT_URIS.USER_PROFILE,
CONTEXT_URIS.CONVERSATION_SUMMARY,
CONTEXT_URIS.WORKSPACE_STATE,
CONTEXT_URIS.SYSTEM_PROMPT,
];
const resources = await Promise.all(
contextUris.map(async (uri) => {
try {
return await this.mcpClient.readResource(uri);
} catch (error) {
this.config.logger.warn({ error, uri }, 'Failed to fetch resource, using empty');
return { uri, text: '' };
}
})
);
return resources;
}
/**
* Build messages array with context from resources
*/
private buildMessages(
currentMessage: InboundMessage,
contextResources: ResourceContent[]
): Array<{ role: string; content: string }> {
const conversationSummary = contextResources.find(
(r) => r.uri === CONTEXT_URIS.CONVERSATION_SUMMARY
);
const messages: Array<{ role: string; content: string }> = [];
// Add conversation context as a system-like user message
if (conversationSummary?.text) {
messages.push({
role: 'user',
content: `[Previous Conversation Context]\n${conversationSummary.text}`,
});
messages.push({
role: 'assistant',
content: 'I understand the context from our previous conversations.',
});
}
// Add current user message
messages.push({
role: 'user',
content: currentMessage.content,
});
return messages;
}
/**
* Convert to LangChain message format
*/
private buildLangChainMessages(
systemPrompt: string,
messages: Array<{ role: string; content: string }>
): BaseMessage[] {
const langchainMessages: BaseMessage[] = [new SystemMessage(systemPrompt)];
for (const msg of messages) {
if (msg.role === 'user') {
langchainMessages.push(new HumanMessage(msg.content));
} else if (msg.role === 'assistant') {
langchainMessages.push(new AIMessage(msg.content));
}
}
return langchainMessages;
}
/**
* Build system prompt from platform base + user resources
*/
private buildSystemPrompt(contextResources: ResourceContent[]): string {
const userProfile = contextResources.find((r) => r.uri === CONTEXT_URIS.USER_PROFILE);
const customPrompt = contextResources.find((r) => r.uri === CONTEXT_URIS.SYSTEM_PROMPT);
const workspaceState = contextResources.find((r) => r.uri === CONTEXT_URIS.WORKSPACE_STATE);
// Base platform prompt
let prompt = `You are a helpful AI assistant for Dexorder, an AI-first trading platform.
You help users research markets, develop indicators and strategies, and analyze trading data.
User license: ${this.config.license.licenseType}
Available features: ${JSON.stringify(this.config.license.features, null, 2)}`;
// Add user profile context
if (userProfile?.text) {
prompt += `\n\n# User Profile\n${userProfile.text}`;
}
// Add workspace context
if (workspaceState?.text) {
prompt += `\n\n# Current Workspace\n${workspaceState.text}`;
}
// Add user's custom instructions (highest priority)
if (customPrompt?.text) {
prompt += `\n\n# User Instructions\n${customPrompt.text}`;
}
return prompt;
}
/**
* Get platform tools (non-user-specific tools)
*/
private getPlatformTools(): Array<{ name: string; description?: string }> {
// Platform tools that don't need user's MCP
return [
// TODO: Add platform tools like market data queries, chart rendering, etc.
];
}
/**
* Cleanup resources
*/
async cleanup(): Promise<void> {
this.config.logger.info('Cleaning up agent harness');
await this.mcpClient.disconnect();
}
}