chart data loading

This commit is contained in:
2026-03-24 21:37:49 -04:00
parent f6bd22a8ef
commit c76887ab92
65 changed files with 6350 additions and 713 deletions

View File

@@ -9,10 +9,13 @@ COPY tsconfig.json ./
# Install dependencies
RUN npm install
# Copy protobuf definitions
COPY protobuf ../protobuf/
# Copy source
COPY src ./src
# Build
# Build (includes protobuf generation)
RUN npm run build
# Production image
@@ -53,6 +56,12 @@ RUN npm install --omit=dev
# Copy built application
COPY --from=builder /app/dist ./dist
# Copy protobuf definitions for runtime loading
COPY protobuf ./protobuf
# Copy k8s templates (not included in TypeScript build)
COPY src/k8s/templates ./dist/k8s/templates
# Copy entrypoint script
COPY entrypoint.sh ./
RUN chmod +x entrypoint.sh

View File

@@ -43,8 +43,16 @@ qdrant:
iceberg:
catalog_uri: http://iceberg-catalog:8181
namespace: gateway
# Future: Separate OHLC database
# ohlc_catalog_uri: http://iceberg-catalog-trading:8181
# ohlc_namespace: trading
s3_endpoint: http://minio:9000
# ZMQ Relay configuration for historical data
relay:
request_endpoint: tcp://relay:5559
notification_endpoint: tcp://relay:5558
# Event router (ZeroMQ)
events:
router_bind: tcp://*:5571

View File

@@ -1,7 +1,7 @@
-- Development seed data
-- This file contains sample data for local development and testing
--
-- Dev user: cryptochimp@dexorder.ai / moon2the
-- Dev user: tim@test / test
-- User is created via Better Auth API in bin/dev script
-- License is also created in bin/dev script
--

View File

@@ -5,8 +5,9 @@
"private": true,
"description": "Multi-channel gateway with agent harness for Dexorder AI platform",
"scripts": {
"proto": "mkdir -p src/generated && pbjs -t static-module -w es6 -o src/generated/proto.js ../protobuf/*.proto && pbts -o src/generated/proto.d.ts src/generated/proto.js && sed -i 's/from \"protobufjs\\/minimal\"/from \"protobufjs\\/minimal.js\"/g' src/generated/proto.js",
"dev": "tsx watch src/main.ts",
"build": "tsc",
"build": "npm run proto && tsc",
"start": "node dist/main.js",
"typecheck": "tsc --noEmit"
},
@@ -22,8 +23,9 @@
"@qdrant/js-client-rest": "^1.17.0",
"argon2": "^0.41.1",
"better-auth": "^1.5.3",
"duckdb": "^1.1.3",
"fast-json-patch": "^3.1.1",
"fastify": "^5.2.0",
"iceberg-js": "latest",
"ioredis": "^5.4.2",
"js-yaml": "^4.1.0",
"kysely": "^0.27.3",
@@ -31,6 +33,7 @@
"pg": "^8.13.1",
"pino": "^9.6.0",
"pino-pretty": "^13.0.0",
"protobufjs": "^7.4.0",
"zeromq": "^6.0.0-beta.20",
"zod": "^3.24.1"
},
@@ -38,6 +41,7 @@
"@types/js-yaml": "^4.0.9",
"@types/node": "^22.10.2",
"@types/pg": "^8.11.10",
"protobufjs-cli": "^1.1.2",
"tsx": "^4.21.0",
"typescript": "^5.7.2"
},

View File

@@ -1,258 +0,0 @@
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.dexorder.proto";
// User container event system for delivering notifications to users
// via active sessions or external channels (Telegram, email, push).
//
// Two ZMQ patterns:
// - XPUB/SUB (port 5570): Fast path for informational events to active sessions
// - DEALER/ROUTER (port 5571): Guaranteed delivery for critical events with ack
//
// See doc/protocol.md and doc/user_container_events.md for details.
// =============================================================================
// User Event (Container → Gateway)
// Message Type ID: 0x20
// =============================================================================
message UserEvent {
// User ID this event belongs to
string user_id = 1;
// Unique event ID for deduplication and ack tracking (UUID)
string event_id = 2;
// Timestamp when event was generated (Unix milliseconds)
int64 timestamp = 3;
// Type of event
EventType event_type = 4;
// Event payload (JSON or nested protobuf, depending on event_type)
bytes payload = 5;
// Delivery specification (priority and channel preferences)
DeliverySpec delivery = 6;
}
enum EventType {
// Trading events
ORDER_PLACED = 0;
ORDER_FILLED = 1;
ORDER_CANCELLED = 2;
ORDER_REJECTED = 3;
ORDER_EXPIRED = 4;
// Alert events
ALERT_TRIGGERED = 10;
ALERT_CREATED = 11;
ALERT_DELETED = 12;
// Position events
POSITION_OPENED = 20;
POSITION_CLOSED = 21;
POSITION_UPDATED = 22;
POSITION_LIQUIDATED = 23;
// Workspace/chart events
WORKSPACE_CHANGED = 30;
CHART_ANNOTATION_ADDED = 31;
CHART_ANNOTATION_REMOVED = 32;
INDICATOR_UPDATED = 33;
// Strategy events
STRATEGY_STARTED = 40;
STRATEGY_STOPPED = 41;
STRATEGY_LOG = 42;
STRATEGY_ERROR = 43;
BACKTEST_COMPLETED = 44;
// System events
CONTAINER_STARTING = 50;
CONTAINER_READY = 51;
CONTAINER_SHUTTING_DOWN = 52;
ERROR = 53;
}
// =============================================================================
// Delivery Specification
// =============================================================================
message DeliverySpec {
// Priority determines routing behavior
Priority priority = 1;
// Ordered list of channel preferences (try first, then second, etc.)
repeated ChannelPreference channels = 2;
}
enum Priority {
// Drop if no active session (fire-and-forget via XPUB)
// Use for: indicator updates, chart syncs, strategy logs when watching
INFORMATIONAL = 0;
// Best effort delivery - queue briefly, deliver when possible
// Uses XPUB if subscribed, otherwise DEALER
// Use for: alerts, position updates
NORMAL = 1;
// Must deliver - retry until acked, escalate channels
// Always uses DEALER for guaranteed delivery
// Use for: order fills, liquidations, critical errors
CRITICAL = 2;
}
message ChannelPreference {
// Channel to deliver to
ChannelType channel = 1;
// If true, skip this channel if user is not connected to it
// If false, deliver even if user is not actively connected
// (e.g., send Telegram message even if user isn't in Telegram chat)
bool only_if_active = 2;
}
enum ChannelType {
// Whatever channel the user currently has open (WebSocket, Telegram session)
ACTIVE_SESSION = 0;
// Specific channels
WEB = 1; // WebSocket to web UI
TELEGRAM = 2; // Telegram bot message
EMAIL = 3; // Email notification
PUSH = 4; // Mobile push notification (iOS/Android)
DISCORD = 5; // Discord webhook (future)
SLACK = 6; // Slack webhook (future)
}
// =============================================================================
// Event Acknowledgment (Gateway → Container)
// Message Type ID: 0x21
// =============================================================================
message EventAck {
// Event ID being acknowledged
string event_id = 1;
// Delivery status
AckStatus status = 2;
// Error message if status is ERROR
string error_message = 3;
// Channel that successfully delivered (for logging/debugging)
ChannelType delivered_via = 4;
}
enum AckStatus {
// Successfully delivered to at least one channel
DELIVERED = 0;
// Accepted and queued for delivery (e.g., rate limited, will retry)
QUEUED = 1;
// Permanent failure - all channels failed
ERROR = 2;
}
// =============================================================================
// Event Payloads
// These are JSON-encoded in the UserEvent.payload field.
// Defined here for documentation; actual encoding is JSON for flexibility.
// =============================================================================
// Payload for ORDER_PLACED, ORDER_FILLED, ORDER_CANCELLED, etc.
message OrderEventPayload {
string order_id = 1;
string symbol = 2;
string side = 3; // "buy" or "sell"
string order_type = 4; // "market", "limit", "stop_limit", etc.
string quantity = 5; // Decimal string
string price = 6; // Decimal string (for limit orders)
string fill_price = 7; // Decimal string (for fills)
string fill_quantity = 8; // Decimal string (for partial fills)
string status = 9; // "open", "filled", "cancelled", etc.
string exchange = 10;
int64 timestamp = 11; // Unix milliseconds
string strategy_id = 12; // If order was placed by a strategy
string error_message = 13; // If rejected/failed
}
// Payload for ALERT_TRIGGERED
message AlertEventPayload {
string alert_id = 1;
string symbol = 2;
string condition = 3; // Human-readable condition (e.g., "BTC > 50000")
string triggered_price = 4; // Decimal string
int64 timestamp = 5;
}
// Payload for POSITION_OPENED, POSITION_CLOSED, POSITION_UPDATED
message PositionEventPayload {
string position_id = 1;
string symbol = 2;
string side = 3; // "long" or "short"
string size = 4; // Decimal string
string entry_price = 5; // Decimal string
string current_price = 6; // Decimal string
string unrealized_pnl = 7; // Decimal string
string realized_pnl = 8; // Decimal string (for closed positions)
string leverage = 9; // Decimal string (for margin)
string liquidation_price = 10;
string exchange = 11;
int64 timestamp = 12;
}
// Payload for WORKSPACE_CHANGED, CHART_ANNOTATION_*, INDICATOR_UPDATED
message WorkspaceEventPayload {
string workspace_id = 1;
string change_type = 2; // "symbol_changed", "timeframe_changed", "annotation_added", etc.
string symbol = 3;
string timeframe = 4;
// For annotations
string annotation_id = 5;
string annotation_type = 6; // "trendline", "horizontal", "rectangle", "text", etc.
string annotation_data = 7; // JSON string with coordinates, style, etc.
// For indicators
string indicator_name = 8;
string indicator_params = 9; // JSON string with indicator parameters
int64 timestamp = 10;
}
// Payload for STRATEGY_LOG, STRATEGY_ERROR
message StrategyEventPayload {
string strategy_id = 1;
string strategy_name = 2;
string log_level = 3; // "debug", "info", "warn", "error"
string message = 4;
string details = 5; // JSON string with additional context
int64 timestamp = 6;
}
// Payload for BACKTEST_COMPLETED
message BacktestEventPayload {
string backtest_id = 1;
string strategy_id = 2;
string strategy_name = 3;
string symbol = 4;
string timeframe = 5;
int64 start_time = 6;
int64 end_time = 7;
// Results summary
int32 total_trades = 8;
int32 winning_trades = 9;
int32 losing_trades = 10;
string total_pnl = 11; // Decimal string
string win_rate = 12; // Decimal string (0-1)
string sharpe_ratio = 13; // Decimal string
string max_drawdown = 14; // Decimal string (0-1)
string results_path = 15; // Path to full results file
int64 completed_at = 16;
}

View File

@@ -19,25 +19,38 @@ export class AuthService {
}
/**
* Verify JWT token and return user ID
* Replaces the placeholder implementation in UserService
* Verify session token and return user ID
* Uses Better Auth's bearer plugin for token verification
*/
async verifyToken(token: string): Promise<string | null> {
try {
// Better Auth's session verification
this.config.logger.debug({
tokenLength: token?.length,
tokenPrefix: token?.substring(0, 8),
}, 'Verifying token');
// Use Better Auth's getSession with Bearer token
// The bearer plugin allows us to pass the session token via Authorization header
const session = await this.config.auth.api.getSession({
headers: {
authorization: `Bearer ${token}`,
},
headers: new Headers({
'Authorization': `Bearer ${token}`,
}),
});
this.config.logger.debug({
hasSession: !!session,
hasUser: !!session?.user,
userId: session?.user?.id,
}, 'Session verification result');
if (!session || !session.user) {
this.config.logger.warn('Session verification failed: no session or user');
return null;
}
return session.user.id;
} catch (error) {
this.config.logger.debug({ error }, 'Token verification failed');
this.config.logger.error({ error }, 'Token verification failed with error');
return null;
}
}
@@ -76,17 +89,47 @@ export class AuthService {
/**
* Sign in with email and password
* Returns the bearer token from response headers
*/
async signIn(email: string, password: string): Promise<{ token: string; userId: string; error?: string }> {
try {
const result = await this.config.auth.api.signInEmail({
this.config.logger.debug({ email }, 'Attempting sign in');
// Use asResponse: true to get the full Response object with headers
const response = await this.config.auth.api.signInEmail({
body: {
email,
password,
},
asResponse: true,
});
if (!result.token || !result.user) {
// Extract bearer token from response headers (set by bearer plugin)
const token = response.headers.get('set-auth-token');
if (!token) {
this.config.logger.error('Bearer token not found in response headers');
return {
token: '',
userId: '',
error: 'Authentication token not generated',
};
}
// Parse the response body to get user info
const result = await response.json() as {
user?: { id: string; email: string; name: string };
error?: string;
};
this.config.logger.debug({
hasUser: !!result.user,
userId: result.user?.id,
hasToken: !!token,
}, 'Sign in result');
if (!result.user) {
this.config.logger.warn('Sign in failed: no user in result');
return {
token: '',
userId: '',
@@ -95,11 +138,11 @@ export class AuthService {
}
return {
token: result.token,
token,
userId: result.user.id,
};
} catch (error: any) {
this.config.logger.error({ error }, 'Sign in failed');
this.config.logger.error({ error }, 'Sign in failed with error');
return {
token: '',
userId: '',
@@ -115,7 +158,8 @@ export class AuthService {
try {
await this.config.auth.api.signOut({
headers: {
authorization: `Bearer ${token}`,
// Better Auth expects the session token in the cookie header
cookie: `better-auth.session_token=${token}`,
},
});
@@ -133,7 +177,8 @@ export class AuthService {
try {
const session = await this.config.auth.api.getSession({
headers: {
authorization: `Bearer ${token}`,
// Better Auth expects the session token in the cookie header
cookie: `better-auth.session_token=${token}`,
},
});

View File

@@ -3,6 +3,11 @@ import { UserService } from '../db/user-service.js';
import { ChannelType, type AuthContext } from '../types/user.js';
import type { ContainerManager } from '../k8s/container-manager.js';
export interface AuthResult {
authContext: AuthContext | null;
isSpinningUp: boolean;
}
export interface AuthenticatorConfig {
userService: UserService;
containerManager: ContainerManager;
@@ -23,40 +28,49 @@ export class Authenticator {
/**
* Authenticate WebSocket connection via JWT token
* Also ensures the user's container is running
* Returns immediately if container is spinning up (non-blocking)
*/
async authenticateWebSocket(
request: FastifyRequest
): Promise<AuthContext | null> {
): Promise<AuthResult> {
try {
const token = this.extractBearerToken(request);
if (!token) {
this.config.logger.warn('No bearer token in WebSocket connection');
return null;
return { authContext: null, isSpinningUp: false };
}
const userId = await this.config.userService.verifyWebToken(token);
if (!userId) {
this.config.logger.warn('Invalid JWT token');
return null;
return { authContext: null, isSpinningUp: false };
}
const license = await this.config.userService.getUserLicense(userId);
if (!license) {
this.config.logger.warn({ userId }, 'User license not found');
return null;
return { authContext: null, isSpinningUp: false };
}
// Ensure container is running (may take time if creating new container)
// Ensure container is running (non-blocking - returns immediately if creating new)
this.config.logger.info({ userId }, 'Ensuring user container is running');
const { mcpEndpoint, wasCreated } = await this.config.containerManager.ensureContainerRunning(
const { mcpEndpoint, wasCreated, isSpinningUp } = await this.config.containerManager.ensureContainerRunning(
userId,
license
license,
false // Don't wait for ready
);
this.config.logger.info(
{ userId, mcpEndpoint, wasCreated },
'Container is ready'
);
if (isSpinningUp) {
this.config.logger.info(
{ userId, wasCreated },
'Container is spinning up'
);
} else {
this.config.logger.info(
{ userId, mcpEndpoint, wasCreated },
'Container is ready'
);
}
// Update license with actual MCP endpoint
license.mcpServerUrl = mcpEndpoint;
@@ -64,16 +78,19 @@ export class Authenticator {
const sessionId = `ws_${userId}_${Date.now()}`;
return {
userId,
channelType: ChannelType.WEBSOCKET,
channelUserId: userId, // For WebSocket, same as userId
sessionId,
license,
authenticatedAt: new Date(),
authContext: {
userId,
channelType: ChannelType.WEBSOCKET,
channelUserId: userId, // For WebSocket, same as userId
sessionId,
license,
authenticatedAt: new Date(),
},
isSpinningUp,
};
} catch (error) {
this.config.logger.error({ error }, 'WebSocket authentication error');
return null;
return { authContext: null, isSpinningUp: false };
}
}
@@ -134,13 +151,22 @@ export class Authenticator {
}
/**
* Extract bearer token from request headers
* Extract bearer token from request headers or query parameters
* WebSocket connections can't set custom headers in browsers, so we support token in query params
*/
private extractBearerToken(request: FastifyRequest): string | null {
// Try Authorization header first
const auth = request.headers.authorization;
if (!auth || !auth.startsWith('Bearer ')) {
return null;
if (auth && auth.startsWith('Bearer ')) {
return auth.substring(7);
}
return auth.substring(7);
// Fall back to query parameter (for WebSocket connections)
const query = request.query as { token?: string };
if (query.token) {
return query.token;
}
return null;
}
}

View File

@@ -1,4 +1,5 @@
import { betterAuth } from 'better-auth';
import { bearer } from 'better-auth/plugins/bearer';
import { Pool } from 'pg';
import { Kysely, PostgresDialect } from 'kysely';
import type { FastifyBaseLogger } from 'fastify';
@@ -88,6 +89,11 @@ export async function createBetterAuth(config: BetterAuthConfig) {
},
},
// Plugins
plugins: [
bearer(), // Enable Bearer token authentication for API/WebSocket
],
});
config.logger.debug('Better Auth instance created');

View File

@@ -7,12 +7,36 @@ import { randomUUID } from 'crypto';
import type { ProviderConfig } from '../llm/provider.js';
import type { SessionRegistry, EventSubscriber, Session } from '../events/index.js';
import type { OHLCService } from '../services/ohlc-service.js';
import type { SymbolIndexService } from '../services/symbol-index-service.js';
import type { ContainerManager } from '../k8s/container-manager.js';
import {
WorkspaceManager,
DEFAULT_STORES,
type ChannelAdapter,
type ChannelCapabilities,
type SnapshotMessage,
type PatchMessage,
} from '../workspace/index.js';
/**
* Safe JSON stringifier that handles BigInt values
* Converts BigInt to Number (safe for timestamps and other integer values)
*/
function jsonStringifySafe(obj: any): string {
return JSON.stringify(obj, (_key, value) =>
typeof value === 'bigint' ? Number(value) : value
);
}
export interface WebSocketHandlerConfig {
authenticator: Authenticator;
containerManager: ContainerManager;
providerConfig: ProviderConfig;
sessionRegistry: SessionRegistry;
eventSubscriber: EventSubscriber;
ohlcService?: OHLCService; // Optional for historical data support
symbolIndexService?: SymbolIndexService; // Optional for symbol search
}
/**
@@ -24,6 +48,7 @@ export interface WebSocketHandlerConfig {
export class WebSocketHandler {
private config: WebSocketHandlerConfig;
private harnesses = new Map<string, AgentHarness>();
private workspaces = new Map<string, WorkspaceManager>();
constructor(config: WebSocketHandlerConfig) {
this.config = config;
@@ -61,8 +86,8 @@ export class WebSocketHandler {
})
);
// Authenticate (this may take time if creating container)
const authContext = await this.config.authenticator.authenticateWebSocket(request);
// Authenticate (returns immediately if container is spinning up)
const { authContext, isSpinningUp } = await this.config.authenticator.authenticateWebSocket(request);
if (!authContext) {
logger.warn('WebSocket authentication failed');
socket.send(
@@ -76,18 +101,62 @@ export class WebSocketHandler {
}
logger.info(
{ userId: authContext.userId, sessionId: authContext.sessionId },
{ userId: authContext.userId, sessionId: authContext.sessionId, isSpinningUp },
'WebSocket connection authenticated'
);
// Send workspace starting message
socket.send(
JSON.stringify({
type: 'status',
status: 'initializing',
message: 'Starting your workspace...',
})
);
// If container is spinning up, send status and start background polling
if (isSpinningUp) {
socket.send(
JSON.stringify({
type: 'status',
status: 'spinning_up',
message: 'Your workspace is starting up, please wait...',
})
);
// Start background polling for container readiness
this.pollContainerReadiness(socket, authContext, app).catch((error) => {
logger.error({ error, userId: authContext.userId }, 'Error polling container readiness');
});
// Don't return - continue with session setup so we can receive messages once ready
} else {
// Send workspace starting message
socket.send(
JSON.stringify({
type: 'status',
status: 'initializing',
message: 'Starting your workspace...',
})
);
}
// Create workspace manager for this session
const workspace = new WorkspaceManager({
userId: authContext.userId,
sessionId: authContext.sessionId,
stores: DEFAULT_STORES,
// containerSync will be added when MCP client is implemented
logger,
});
// Create WebSocket channel adapter
const wsAdapter: ChannelAdapter = {
sendSnapshot: (msg: SnapshotMessage) => {
socket.send(JSON.stringify(msg));
},
sendPatch: (msg: PatchMessage) => {
socket.send(JSON.stringify(msg));
},
getCapabilities: (): ChannelCapabilities => ({
supportsSync: true,
supportsImages: true,
supportsMarkdown: true,
supportsStreaming: true,
supportsTradingViewEmbed: true,
}),
};
// Create agent harness
const harness = new AgentHarness({
@@ -99,6 +168,11 @@ export class WebSocketHandler {
});
try {
// Initialize workspace and harness
await workspace.initialize();
workspace.setAdapter(wsAdapter);
this.workspaces.set(authContext.sessionId, workspace);
await harness.initialize();
this.harnesses.set(authContext.sessionId, harness);
@@ -125,23 +199,29 @@ export class WebSocketHandler {
'Session registered for events'
);
// Send connected message
socket.send(
JSON.stringify({
type: 'connected',
sessionId: authContext.sessionId,
userId: authContext.userId,
licenseType: authContext.license.licenseType,
message: 'Connected to Dexorder AI',
})
);
// Send connected message (only if not spinning up - otherwise sent by pollContainerReadiness)
if (!isSpinningUp) {
socket.send(
JSON.stringify({
type: 'connected',
sessionId: authContext.sessionId,
userId: authContext.userId,
licenseType: authContext.license.licenseType,
message: 'Connected to Dexorder AI',
})
);
}
// Handle messages
socket.on('message', async (data: Buffer) => {
try {
logger.info({ rawMessage: data.toString().substring(0, 500) }, 'WebSocket message received');
const payload = JSON.parse(data.toString());
logger.info({ type: payload.type, request_id: payload.request_id }, 'WebSocket message parsed');
// Route based on message type
if (payload.type === 'message') {
// Chat message - send to agent harness
const inboundMessage: InboundMessage = {
messageId: randomUUID(),
userId: authContext.userId,
@@ -159,6 +239,20 @@ export class WebSocketHandler {
...response,
})
);
} else if (payload.type === 'hello') {
// Workspace sync: hello message
logger.debug({ seqs: payload.seqs }, 'Handling workspace hello');
await workspace.handleHello(payload.seqs || {});
} else if (payload.type === 'patch') {
// Workspace sync: patch message
logger.debug({ store: payload.store, seq: payload.seq }, 'Handling workspace patch');
await workspace.handlePatch(payload.store, payload.seq, payload.patch || []);
} else if (this.isDatafeedMessage(payload)) {
// Historical data request - send to OHLC service
logger.info({ type: payload.type }, 'Routing to datafeed handler');
await this.handleDatafeedMessage(socket, payload, logger);
} else {
logger.warn({ type: payload.type }, 'Unknown message type received');
}
} catch (error) {
logger.error({ error }, 'Error handling WebSocket message');
@@ -181,6 +275,10 @@ export class WebSocketHandler {
await this.config.eventSubscriber.onSessionDisconnect(removedSession);
}
// Cleanup workspace
await workspace.shutdown();
this.workspaces.delete(authContext.sessionId);
// Cleanup harness
await harness.cleanup();
this.harnesses.delete(authContext.sessionId);
@@ -190,12 +288,76 @@ export class WebSocketHandler {
logger.error({ error, sessionId: authContext.sessionId }, 'WebSocket error');
});
} catch (error) {
logger.error({ error }, 'Failed to initialize agent harness');
logger.error({ error }, 'Failed to initialize session');
socket.close(1011, 'Internal server error');
await workspace.shutdown();
this.workspaces.delete(authContext.sessionId);
await harness.cleanup();
}
}
/**
* Poll for container readiness in the background
* Sends notification to client when container is ready
*/
private async pollContainerReadiness(
socket: WebSocket,
authContext: any,
app: FastifyInstance
): Promise<void> {
const logger = app.log;
const userId = authContext.userId;
logger.info({ userId }, 'Starting background poll for container readiness');
try {
// Wait for container to become ready (2 minute timeout)
const ready = await this.config.containerManager.waitForContainerReady(userId, 120000);
if (ready) {
logger.info({ userId }, 'Container is now ready, notifying client');
// Send ready notification
socket.send(
JSON.stringify({
type: 'status',
status: 'ready',
message: 'Your workspace is ready!',
})
);
// Also send the 'connected' message
socket.send(
JSON.stringify({
type: 'connected',
sessionId: authContext.sessionId,
userId: authContext.userId,
licenseType: authContext.license.licenseType,
message: 'Connected to Dexorder AI',
})
);
} else {
logger.warn({ userId }, 'Container failed to become ready within timeout');
socket.send(
JSON.stringify({
type: 'error',
message: 'Workspace failed to start. Please try again later.',
})
);
}
} catch (error) {
logger.error({ error, userId }, 'Error waiting for container readiness');
socket.send(
JSON.stringify({
type: 'error',
message: 'Error starting workspace. Please try again later.',
})
);
}
}
/**
* Derive the container's XPUB event endpoint from the MCP server URL.
*
@@ -212,4 +374,173 @@ export class WebSocketHandler {
return mcpServerUrl.replace('http://', 'tcp://').replace(':3000', ':5570');
}
}
/**
* Check if message is a datafeed message (TradingView protocol)
*/
private isDatafeedMessage(payload: any): boolean {
const datafeedTypes = [
'get_config',
'search_symbols',
'resolve_symbol',
'get_bars',
'subscribe_bars',
'unsubscribe_bars',
];
return datafeedTypes.includes(payload.type);
}
/**
* Handle datafeed messages (TradingView protocol)
*/
private async handleDatafeedMessage(
socket: WebSocket,
payload: any,
logger: any
): Promise<void> {
logger.info({ type: payload.type, payload }, 'handleDatafeedMessage called');
const ohlcService = this.config.ohlcService;
const symbolIndexService = this.config.symbolIndexService;
logger.info({
hasOhlcService: !!ohlcService,
hasSymbolIndexService: !!symbolIndexService
}, 'Service availability');
if (!ohlcService && !symbolIndexService) {
logger.warn('No datafeed services available');
return;
}
const requestId = payload.request_id || randomUUID();
try {
switch (payload.type) {
case 'get_config': {
const config = ohlcService ? await ohlcService.getConfig() : { supported_resolutions: ['1', '5', '15', '60', '1D'] };
socket.send(
JSON.stringify({
type: 'get_config_response',
request_id: requestId,
config,
})
);
break;
}
case 'search_symbols': {
logger.info({ query: payload.query, limit: payload.limit }, 'Handling search_symbols');
// Use SymbolIndexService if available, otherwise fallback to OHLCService stub
const symbolIndexService = this.config.symbolIndexService;
logger.info({ hasSymbolIndexService: !!symbolIndexService }, 'Service check for search');
const results = symbolIndexService
? await symbolIndexService.search(payload.query, payload.limit || 30)
: (ohlcService ? await ohlcService.searchSymbols(
payload.query,
payload.symbol_type,
payload.exchange,
payload.limit || 30
) : []);
logger.info({ resultsCount: results.length }, 'Search complete');
socket.send(
JSON.stringify({
type: 'search_symbols_response',
request_id: requestId,
results,
})
);
break;
}
case 'resolve_symbol': {
logger.info({ symbol: payload.symbol }, 'Handling resolve_symbol');
// Use SymbolIndexService if available, otherwise fallback to OHLCService stub
const symbolIndexService = this.config.symbolIndexService;
logger.info({ hasSymbolIndexService: !!symbolIndexService }, 'Service check for resolve');
const symbolInfo = symbolIndexService
? await symbolIndexService.resolveSymbol(payload.symbol)
: (ohlcService ? await ohlcService.resolveSymbol(payload.symbol) : null);
logger.info({ found: !!symbolInfo }, 'Symbol resolution complete');
if (!symbolInfo) {
logger.warn({ symbol: payload.symbol }, 'Symbol not found');
socket.send(
JSON.stringify({
type: 'error',
request_id: requestId,
error_message: `Symbol not found: ${payload.symbol}`,
})
);
} else {
logger.info({ symbolInfo }, 'Sending symbol_info response');
socket.send(
JSON.stringify({
type: 'resolve_symbol_response',
request_id: requestId,
symbol_info: symbolInfo,
})
);
}
break;
}
case 'get_bars': {
if (!ohlcService) {
socket.send(JSON.stringify({
type: 'error',
request_id: requestId,
error_message: 'OHLC service not available'
}));
break;
}
const history = await ohlcService.fetchOHLC(
payload.symbol,
payload.resolution,
payload.from_time,
payload.to_time,
payload.countback
);
socket.send(
jsonStringifySafe({
type: 'get_bars_response',
request_id: requestId,
history,
})
);
break;
}
case 'subscribe_bars':
case 'unsubscribe_bars':
// TODO: Implement real-time subscriptions
socket.send(
JSON.stringify({
type: `${payload.type}_response`,
request_id: requestId,
subscription_id: payload.subscription_id,
success: false,
message: 'Real-time subscriptions not yet implemented',
})
);
break;
default:
logger.warn({ type: payload.type }, 'Unknown datafeed message type');
}
} catch (error: any) {
logger.error({ error, type: payload.type }, 'Error handling datafeed message');
socket.send(
jsonStringifySafe({
type: 'error',
request_id: requestId,
error_code: 'INTERNAL_ERROR',
error_message: error.message || 'Internal server error',
})
);
}
}
}

View File

@@ -0,0 +1,546 @@
/**
* DuckDB Client for querying Apache Iceberg tables
*
* Uses DuckDB's native Iceberg and Parquet support to query data
* directly from S3/MinIO without needing catalog-only libraries.
*/
import duckdb from 'duckdb';
import type { FastifyBaseLogger } from 'fastify';
import { promisify } from 'util';
type Database = duckdb.Database;
type Connection = duckdb.Connection;
const { Database, Connection } = duckdb;
export interface DuckDBConfig {
catalogUri: string;
namespace: string;
ohlcCatalogUri?: string;
ohlcNamespace?: string;
s3Endpoint?: string;
s3AccessKey?: string;
s3SecretKey?: string;
}
/**
* DuckDB Client with Iceberg support
*
* Provides SQL-based queries against Iceberg tables stored in S3/MinIO.
*/
export class DuckDBClient {
private db: Database | null = null;
private conn: Connection | null = null;
private namespace: string;
private ohlcNamespace: string;
private catalogUri: string;
private ohlcCatalogUri: string;
private s3Config: {
endpoint?: string;
accessKey?: string;
secretKey?: string;
};
private logger: FastifyBaseLogger;
private initialized = false;
constructor(config: DuckDBConfig, logger: FastifyBaseLogger) {
this.logger = logger;
this.namespace = config.namespace;
this.catalogUri = config.catalogUri;
this.ohlcCatalogUri = config.ohlcCatalogUri || config.catalogUri;
this.ohlcNamespace = config.ohlcNamespace || 'trading';
this.s3Config = {
endpoint: config.s3Endpoint,
accessKey: config.s3AccessKey,
secretKey: config.s3SecretKey,
};
}
/**
* Initialize DuckDB connection and configure S3/Iceberg extensions
*/
async initialize(): Promise<void> {
if (this.initialized) {
return;
}
try {
this.db = new Database(':memory:');
this.conn = this.db.connect();
const all = promisify(this.conn.all.bind(this.conn));
// Install and load required extensions
await all('INSTALL httpfs;');
await all('LOAD httpfs;');
await all('INSTALL iceberg;');
await all('LOAD iceberg;');
// Configure S3 credentials if provided
if (this.s3Config.endpoint && this.s3Config.accessKey && this.s3Config.secretKey) {
const s3Url = new URL(this.s3Config.endpoint);
const useSSL = s3Url.protocol === 'https:';
await all(`SET s3_endpoint='${s3Url.hostname}:${s3Url.port || (useSSL ? 443 : 9000)}';`);
await all(`SET s3_access_key_id='${this.s3Config.accessKey}';`);
await all(`SET s3_secret_access_key='${this.s3Config.secretKey}';`);
await all(`SET s3_use_ssl=${useSSL};`);
await all(`SET s3_url_style='path';`);
await all(`SET s3_region='us-east-1';`);
await all(`SET s3_url_compatibility_mode=true;`);
this.logger.info({
endpoint: this.s3Config.endpoint,
useSSL,
}, 'Configured DuckDB S3 settings');
}
this.initialized = true;
this.logger.info({
catalogUri: this.catalogUri,
namespace: this.namespace,
ohlcCatalogUri: this.ohlcCatalogUri,
ohlcNamespace: this.ohlcNamespace,
}, 'DuckDB client initialized');
} catch (error) {
this.logger.error({ error }, 'Failed to initialize DuckDB');
throw error;
}
}
/**
* Execute a SQL query and return all rows
*/
private async query<T = any>(sql: string, params?: any[]): Promise<T[]> {
if (!this.conn) {
throw new Error('DuckDB connection not initialized');
}
try {
const all = promisify(this.conn.all.bind(this.conn)) as (sql: string, ...params: any[]) => Promise<any[]>;
const rows = params && params.length > 0 ? await all(sql, ...params) : await all(sql);
return rows as T[];
} catch (error) {
this.logger.error({ error, sql, params }, 'DuckDB query failed');
throw error;
}
}
/**
* Get the Iceberg table path from REST catalog
*/
private async getTablePath(namespace: string, tableName: string, catalogUri: string): Promise<string | null> {
try {
const tableUrl = `${catalogUri}/v1/namespaces/${namespace}/tables/${tableName}`;
this.logger.debug({ tableUrl }, 'Fetching Iceberg table metadata');
const response = await fetch(tableUrl, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
if (!response.ok) {
if (response.status === 404) {
this.logger.debug({ namespace, tableName }, 'Table not found in catalog');
return null;
}
throw new Error(`Failed to fetch table metadata: ${response.status} ${response.statusText}`);
}
const metadata = await response.json() as any;
// Extract metadata location (S3 path to metadata.json)
const metadataLocation = metadata['metadata-location'] || metadata.location;
if (!metadataLocation) {
this.logger.warn({ metadata }, 'No metadata-location found in table response');
return null;
}
this.logger.debug({ metadataLocation }, 'Found Iceberg table location');
return metadataLocation;
} catch (error: any) {
this.logger.error({ error: error.message, namespace, tableName }, 'Failed to get table path');
return null;
}
}
/**
* Query messages from gateway.conversations table
*/
async queryMessages(
userId: string,
sessionId: string,
options?: {
startTime?: number;
endTime?: number;
limit?: number;
}
): Promise<any[]> {
await this.initialize();
try {
const tablePath = await this.getTablePath(
this.namespace,
'conversations',
this.catalogUri
);
if (!tablePath) {
this.logger.warn('Conversations table not found');
return [];
}
// Build SQL query with optional filters
let sql = `
SELECT
id,
user_id,
session_id,
role,
content,
metadata,
timestamp
FROM iceberg_scan('${tablePath}')
WHERE user_id = ?
AND session_id = ?
`;
const params: any[] = [userId, sessionId];
if (options?.startTime) {
sql += ' AND timestamp >= ?';
params.push(options.startTime.toString());
}
if (options?.endTime) {
sql += ' AND timestamp <= ?';
params.push(options.endTime.toString());
}
sql += ' ORDER BY timestamp ASC';
if (options?.limit) {
sql += ' LIMIT ?';
params.push(options.limit);
}
this.logger.debug({ userId, sessionId, options }, 'Querying conversation messages');
const rows = await this.query(sql, params);
this.logger.info({
userId,
sessionId,
count: rows.length
}, 'Loaded conversation messages from Iceberg');
// Convert timestamp strings back to numbers
return rows.map((row: any) => ({
...row,
timestamp: Number(row.timestamp)
}));
} catch (error: any) {
this.logger.error({
error: error.message,
userId,
sessionId
}, 'Failed to query conversation messages');
return [];
}
}
/**
* Query checkpoint from gateway.checkpoints table
*/
async queryCheckpoint(
userId: string,
sessionId: string,
checkpointId?: string
): Promise<any | null> {
await this.initialize();
try {
const tablePath = await this.getTablePath(
this.namespace,
'checkpoints',
this.catalogUri
);
if (!tablePath) {
this.logger.warn('Checkpoints table not found');
return null;
}
let sql = `
SELECT
user_id,
session_id,
checkpoint_id,
checkpoint_data,
metadata,
timestamp
FROM iceberg_scan('${tablePath}')
WHERE user_id = ?
AND session_id = ?
`;
const params: any[] = [userId, sessionId];
if (checkpointId) {
sql += ' AND checkpoint_id = ?';
params.push(checkpointId);
}
sql += ' ORDER BY timestamp DESC LIMIT 1';
this.logger.debug({ userId, sessionId, checkpointId }, 'Querying checkpoint');
const rows = await this.query(sql, params);
if (rows.length === 0) {
return null;
}
const row = rows[0];
this.logger.info({
userId,
sessionId,
checkpointId: row.checkpoint_id
}, 'Loaded checkpoint from Iceberg');
// Convert timestamp string back to number
return {
...row,
timestamp: Number(row.timestamp)
};
} catch (error: any) {
this.logger.error({
error: error.message,
userId,
sessionId,
checkpointId
}, 'Failed to query checkpoint');
return null;
}
}
/**
* Query symbol metadata from trading.symbol_metadata table
*/
async queryAllSymbols(): Promise<any[]> {
await this.initialize();
try {
const tablePath = await this.getTablePath(
this.ohlcNamespace,
'symbol_metadata',
this.ohlcCatalogUri
);
if (!tablePath) {
this.logger.warn('Symbol metadata table not found');
return [];
}
// Query the Iceberg table using DuckDB
const sql = `SELECT * FROM iceberg_scan('${tablePath}')`;
this.logger.debug({ sql }, 'Querying symbol metadata');
const rows = await this.query(sql);
this.logger.info({ count: rows.length }, 'Loaded symbol metadata from Iceberg');
return rows;
} catch (error: any) {
this.logger.error({ error: error.message }, 'Failed to query symbol metadata');
return [];
}
}
/**
* Query OHLC data from trading.ohlc table
*/
async queryOHLC(
ticker: string,
period_seconds: number,
start_time: bigint, // microseconds
end_time: bigint // microseconds
): Promise<any[]> {
await this.initialize();
try {
const tablePath = await this.getTablePath(
this.ohlcNamespace,
'ohlc',
this.ohlcCatalogUri
);
if (!tablePath) {
this.logger.warn('OHLC table not found');
return [];
}
// Query the Iceberg table with filters
const sql = `
SELECT
timestamp,
ticker,
period_seconds,
open,
high,
low,
close,
volume
FROM iceberg_scan('${tablePath}')
WHERE ticker = ?
AND period_seconds = ?
AND timestamp >= ?
AND timestamp <= ?
ORDER BY timestamp ASC
`;
const params = [
ticker,
period_seconds,
start_time.toString(),
end_time.toString()
];
this.logger.debug({ ticker, period_seconds, start_time, end_time }, 'Querying OHLC data');
const rows = await this.query(sql, params);
this.logger.info({
ticker,
period_seconds,
count: rows.length
}, 'Loaded OHLC data from Iceberg');
// Convert timestamp strings to numbers (microseconds as Number is fine for display)
return rows.map((row: any) => ({
...row,
timestamp: Number(row.timestamp)
}));
} catch (error: any) {
this.logger.error({
error: error.message,
ticker,
period_seconds
}, 'Failed to query OHLC data');
return [];
}
}
/**
* Check if OHLC data exists for the given parameters
*/
async hasOHLCData(
ticker: string,
period_seconds: number,
start_time: bigint,
end_time: bigint
): Promise<boolean> {
await this.initialize();
try {
const tablePath = await this.getTablePath(
this.ohlcNamespace,
'ohlc',
this.ohlcCatalogUri
);
if (!tablePath) {
return false;
}
const sql = `
SELECT COUNT(*) as count
FROM iceberg_scan('${tablePath}')
WHERE ticker = ?
AND period_seconds = ?
AND timestamp >= ?
AND timestamp <= ?
`;
const params = [
ticker,
period_seconds,
start_time.toString(),
end_time.toString()
];
const rows = await this.query<{ count: number }>(sql, params);
return rows.length > 0 && rows[0].count > 0;
} catch (error: any) {
this.logger.error({ error: error.message }, 'Failed to check OHLC data existence');
return false;
}
}
/**
* Find missing OHLC data ranges
*/
async findMissingOHLCRanges(
ticker: string,
period_seconds: number,
start_time: bigint,
end_time: bigint
): Promise<Array<[bigint, bigint]>> {
await this.initialize();
try {
const data = await this.queryOHLC(ticker, period_seconds, start_time, end_time);
if (data.length === 0) {
// All data is missing
return [[start_time, end_time]];
}
// Check if we have continuous data
// For now, simple check: if we have any data, assume complete
// TODO: Implement proper gap detection by checking for missing periods
const periodMicros = BigInt(period_seconds) * 1000000n;
const expectedBars = Number((end_time - start_time) / periodMicros);
if (data.length < expectedBars * 0.95) { // Allow 5% tolerance
this.logger.debug({
ticker,
expected: expectedBars,
actual: data.length,
}, 'Incomplete OHLC data detected');
return [[start_time, end_time]]; // Request full range
}
// Data appears complete
return [];
} catch (error: any) {
this.logger.error({ error: error.message }, 'Failed to find missing OHLC ranges');
// Return full range on error (safe default)
return [[start_time, end_time]];
}
}
/**
* Close the DuckDB connection
*/
async close(): Promise<void> {
if (this.conn) {
const close = promisify(this.conn.close.bind(this.conn));
await close();
this.conn = null;
}
if (this.db) {
const close = promisify(this.db.close.bind(this.db));
await close();
this.db = null;
}
this.initialized = false;
this.logger.info('DuckDB client closed');
}
}

View File

@@ -1,15 +1,32 @@
import { IcebergRestCatalog } from 'iceberg-js';
import type { FastifyBaseLogger } from 'fastify';
import { DuckDBClient } from './duckdb-client.js';
/**
* Iceberg client configuration
*
* Supports separate catalog/warehouse configurations for:
* 1. Conversation data (catalogUri + namespace, typically 'gateway')
* 2. OHLC/Trading data (ohlcCatalogUri + ohlcNamespace, typically 'trading')
*
* This allows for:
* - Different S3 buckets/warehouses per data type
* - Different retention policies
* - Independent scaling and management
* - Cost optimization (e.g., cheaper storage class for old conversations)
*/
export interface IcebergConfig {
// Conversation/Gateway data catalog
catalogUri: string;
namespace: string;
// S3 configuration for conversation data
s3Endpoint?: string;
s3AccessKey?: string;
s3SecretKey?: string;
// OHLC/Trading data catalog (can be same or different from conversation catalog)
ohlcCatalogUri?: string;
ohlcNamespace?: string;
}
/**
@@ -40,79 +57,73 @@ export interface IcebergCheckpoint {
/**
* Iceberg REST client wrapper for durable storage
*
* Uses Iceberg REST Catalog API to:
* - Query conversation history from gateway.conversations
* - Query checkpoints from gateway.checkpoints
* - Note: Writes are handled by Flink; this is read-only
* Architecture:
* - Uses DuckDB with Iceberg extension for querying Parquet data
* - Supports SEPARATE catalogs/warehouses for conversation vs OHLC data
* - Writes are handled by Flink via Kafka; this client is READ-ONLY
*
* For writes, we'll send to a Kafka topic that Flink consumes
* (or implement direct REST catalog write if needed)
* Data separation:
* 1. Conversation data: catalogUri + namespace (e.g., http://catalog:8181 + 'gateway')
* - Tables: conversations, checkpoints
* - Can use different warehouse/S3 bucket in the future
*
* 2. OHLC/Trading data: ohlcCatalogUri + ohlcNamespace (e.g., http://catalog:8181 + 'trading')
* - Tables: ohlc, symbol_metadata
* - Can use different warehouse/S3 bucket for cost optimization
*
* To use separate warehouses in production:
* 1. Deploy two Iceberg REST catalog instances (or configure multi-warehouse)
* 2. Point catalogUri to conversations warehouse
* 3. Point ohlcCatalogUri to trading warehouse
* 4. Update Flink configuration to write to the correct catalogs
*/
export class IcebergClient {
private namespace: string;
private duckdb: DuckDBClient;
private logger: FastifyBaseLogger;
private namespace: string;
private ohlcNamespace: string;
constructor(config: IcebergConfig, logger: FastifyBaseLogger) {
this.logger = logger;
this.namespace = config.namespace;
this.ohlcNamespace = config.ohlcNamespace || 'trading';
// Initialize Iceberg REST client
const clientConfig: any = {
uri: config.catalogUri,
};
if (config.s3Endpoint) {
clientConfig.s3 = {
endpoint: config.s3Endpoint,
'access-key-id': config.s3AccessKey,
'secret-access-key': config.s3SecretKey,
'path-style-access': 'true',
};
}
// TODO: Store client for queries when needed
new IcebergRestCatalog(clientConfig);
// Initialize DuckDB client for querying Iceberg tables
// DuckDB will query tables from the appropriate catalog based on the data type
this.duckdb = new DuckDBClient(
{
catalogUri: config.catalogUri,
namespace: config.namespace,
ohlcCatalogUri: config.ohlcCatalogUri,
ohlcNamespace: config.ohlcNamespace,
s3Endpoint: config.s3Endpoint,
s3AccessKey: config.s3AccessKey,
s3SecretKey: config.s3SecretKey,
},
logger
);
this.logger.info({
catalogUri: config.catalogUri,
namespace: this.namespace,
}, 'Iceberg client initialized');
ohlcCatalogUri: config.ohlcCatalogUri || config.catalogUri,
ohlcNamespace: this.ohlcNamespace,
}, 'Iceberg client initialized with separate conversation and OHLC catalogs');
}
/**
* Query messages from gateway.conversations table
*
* Note: This is a simplified interface. The actual Iceberg REST API
* returns table metadata, and you'd need to query the underlying
* Parquet files via S3 or use a query engine like DuckDB/Trino.
*
* For now, we'll document the expected schema and leave actual
* implementation as TODO since Flink handles writes.
*/
async queryMessages(
userId: string,
sessionId: string,
_options?: {
options?: {
startTime?: number;
endTime?: number;
limit?: number;
}
): Promise<IcebergMessage[]> {
this.logger.debug({
userId,
sessionId,
table: `${this.namespace}.conversations`,
}, 'Querying messages from Iceberg');
// TODO: Implement actual Iceberg query
// Options:
// 1. Use iceberg-js to get table metadata and Parquet file locations
// 2. Query Parquet files directly via S3 + parquet-wasm
// 3. Use external query engine (DuckDB, Trino, Presto)
// 4. Use Flink SQL REST endpoint for queries
this.logger.warn('Iceberg query not yet implemented - returning empty array');
return [];
return this.duckdb.queryMessages(userId, sessionId, options);
}
/**
@@ -123,16 +134,7 @@ export class IcebergClient {
sessionId: string,
checkpointId?: string
): Promise<IcebergCheckpoint | null> {
this.logger.debug({
userId,
sessionId,
checkpointId,
table: `${this.namespace}.checkpoints`,
}, 'Querying checkpoint from Iceberg');
// TODO: Implement actual Iceberg query
this.logger.warn('Iceberg query not yet implemented - returning null');
return null;
return this.duckdb.queryCheckpoint(userId, sessionId, checkpointId);
}
/**
@@ -188,6 +190,49 @@ export class IcebergClient {
return false;
}
}
/**
* Query OHLC data from trading.ohlc table
*/
async queryOHLC(
ticker: string,
period_seconds: number,
start_time: bigint, // microseconds
end_time: bigint // microseconds
): Promise<any[]> {
return this.duckdb.queryOHLC(ticker, period_seconds, start_time, end_time);
}
/**
* Check if OHLC data exists for the given parameters
*/
async hasOHLCData(
ticker: string,
period_seconds: number,
start_time: bigint,
end_time: bigint
): Promise<boolean> {
return this.duckdb.hasOHLCData(ticker, period_seconds, start_time, end_time);
}
/**
* Find missing OHLC data ranges
*/
async findMissingOHLCRanges(
ticker: string,
period_seconds: number,
start_time: bigint,
end_time: bigint
): Promise<Array<[bigint, bigint]>> {
return this.duckdb.findMissingOHLCRanges(ticker, period_seconds, start_time, end_time);
}
/**
* Query all symbols from symbol_metadata table
*/
async queryAllSymbols(): Promise<any[]> {
return this.duckdb.queryAllSymbols();
}
}
/**

View File

@@ -0,0 +1,180 @@
/**
* ZMQ Protocol encoding/decoding using Protobuf
*
* Protocol format (as defined in protobuf/ingestor.proto):
* Frame 1: [1 byte: protocol version]
* Frame 2: [1 byte: message type ID][N bytes: protobuf message]
*
* For PUB/SUB: [topic frame][version frame][message frame]
*/
import protobuf from 'protobufjs';
import { readFileSync } from 'fs';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import type {
SubmitHistoricalRequest,
SubmitResponse,
HistoryReadyNotification,
SubmitStatus,
NotificationStatus,
} from '../types/ohlc.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* Protocol constants
*/
export const PROTOCOL_VERSION = 0x01;
export enum MessageType {
SUBMIT_HISTORICAL_REQUEST = 0x10,
SUBMIT_RESPONSE = 0x11,
HISTORY_READY_NOTIFICATION = 0x12,
}
// Load protobuf types at runtime (same pattern as ingestor)
// Proto files are copied to /app/protobuf/ in the Docker image
const protoDir = join(__dirname, '../..', 'protobuf');
const root = new protobuf.Root();
// Load proto file and parse it
const ingestorProto = readFileSync(join(protoDir, 'ingestor.proto'), 'utf8');
protobuf.parse(ingestorProto, root);
// Export message types
const SubmitHistoricalRequestType = root.lookupType('SubmitHistoricalRequest');
const SubmitResponseType = root.lookupType('SubmitResponse');
const HistoryReadyNotificationType = root.lookupType('HistoryReadyNotification');
/**
* Encode SubmitHistoricalRequest to ZMQ frames
*
* Returns: [version_frame, message_frame]
*/
export function encodeSubmitHistoricalRequest(req: SubmitHistoricalRequest): Buffer[] {
const versionFrame = Buffer.from([PROTOCOL_VERSION]);
// Convert to protobuf-compatible format (pbjs uses camelCase)
// Note: protobufjs handles bigint/number conversion automatically for uint64
const protoMessage = {
requestId: req.request_id,
ticker: req.ticker,
startTime: Number(req.start_time), // Convert bigint to number for protobuf
endTime: Number(req.end_time),
periodSeconds: req.period_seconds,
limit: req.limit,
clientId: req.client_id,
};
// Encode as protobuf
const message = SubmitHistoricalRequestType.create(protoMessage);
const payloadBuffer = SubmitHistoricalRequestType.encode(message).finish();
const messageFrame = Buffer.concat([
Buffer.from([MessageType.SUBMIT_HISTORICAL_REQUEST]),
Buffer.from(payloadBuffer),
]);
return [versionFrame, messageFrame];
}
/**
* Decode SubmitResponse from ZMQ frames
*
* Input: [version_frame, message_frame]
*/
export function decodeSubmitResponse(frames: Buffer[]): SubmitResponse {
try {
if (frames.length < 2) {
throw new Error(`Expected 2 frames, got ${frames.length}`);
}
const versionFrame = frames[0];
const messageFrame = frames[1];
// Validate version
if (versionFrame[0] !== PROTOCOL_VERSION) {
throw new Error(`Unsupported protocol version: ${versionFrame[0]}`);
}
// Validate message type
const messageType = messageFrame[0];
if (messageType !== MessageType.SUBMIT_RESPONSE) {
throw new Error(`Expected SUBMIT_RESPONSE (0x11), got 0x${messageType.toString(16)}`);
}
// Decode protobuf payload
const payloadBuffer = messageFrame.slice(1);
const decoded = SubmitResponseType.decode(payloadBuffer);
const payload = SubmitResponseType.toObject(decoded, {
longs: String,
enums: Number, // Keep enums as numbers for comparison
defaults: true,
});
return {
request_id: payload.requestId,
status: payload.status as SubmitStatus,
error_message: payload.errorMessage || undefined,
notification_topic: payload.notificationTopic,
};
} catch (error) {
console.error('Error decoding SubmitResponse:', error);
console.error('Frame count:', frames.length);
if (frames.length >= 2) {
console.error('Version frame:', frames[0].toString('hex'));
console.error('Message frame (first 100 bytes):', frames[1].slice(0, 100).toString('hex'));
}
throw error;
}
}
/**
* Decode HistoryReadyNotification from ZMQ frames
*
* Input: [topic_frame, version_frame, message_frame] (for SUB socket)
*/
export function decodeHistoryReadyNotification(frames: Buffer[]): HistoryReadyNotification {
if (frames.length < 3) {
throw new Error(`Expected 3 frames (topic, version, message), got ${frames.length}`);
}
const versionFrame = frames[1];
const messageFrame = frames[2];
// Validate version
if (versionFrame[0] !== PROTOCOL_VERSION) {
throw new Error(`Unsupported protocol version: ${versionFrame[0]}`);
}
// Validate message type
const messageType = messageFrame[0];
if (messageType !== MessageType.HISTORY_READY_NOTIFICATION) {
throw new Error(`Expected HISTORY_READY_NOTIFICATION (0x12), got 0x${messageType.toString(16)}`);
}
// Decode protobuf payload
const payloadBuffer = messageFrame.slice(1);
const decoded = HistoryReadyNotificationType.decode(payloadBuffer);
const payload = HistoryReadyNotificationType.toObject(decoded, {
longs: String,
enums: Number, // Keep enums as numbers for comparison
defaults: true,
});
return {
request_id: payload.requestId,
ticker: payload.ticker,
period_seconds: payload.periodSeconds,
start_time: BigInt(payload.startTime),
end_time: BigInt(payload.endTime),
status: payload.status as NotificationStatus,
error_message: payload.errorMessage || undefined,
iceberg_namespace: payload.icebergNamespace,
iceberg_table: payload.icebergTable,
row_count: payload.rowCount,
completed_at: BigInt(payload.completedAt),
};
}

View File

@@ -0,0 +1,356 @@
/**
* ZMQ Relay Client for historical data requests
*
* IMPORTANT: Implements race-condition-free notification subscription
* by subscribing to RESPONSE:{client_id} topic BEFORE sending requests.
*
* Architecture:
* - REQ socket to relay (port 5559) for SubmitHistoricalRequest
* - SUB socket to relay (port 5558) for HistoryReadyNotification
* - Notification topic: RESPONSE:{client_id} (deterministic, client-generated)
*/
import * as zmq from 'zeromq';
import type { FastifyBaseLogger } from 'fastify';
import { randomUUID } from 'crypto';
import {
encodeSubmitHistoricalRequest,
decodeSubmitResponse,
decodeHistoryReadyNotification,
} from './zmq-protocol.js';
import type {
SubmitHistoricalRequest,
HistoryReadyNotification,
} from '../types/ohlc.js';
import {
SubmitStatus,
NotificationStatus,
} from '../types/ohlc.js';
export interface ZMQRelayConfig {
relayRequestEndpoint: string; // e.g., "tcp://relay:5559"
relayNotificationEndpoint: string; // e.g., "tcp://relay:5558"
clientId?: string; // Optional client ID, will generate if not provided
requestTimeout?: number; // Request timeout in ms (default: 30000)
onMetadataUpdate?: () => Promise<void>; // Callback when symbol metadata updates
}
interface PendingRequest {
resolve: (notification: HistoryReadyNotification) => void;
reject: (error: Error) => void;
timeoutHandle: NodeJS.Timeout;
}
/**
* ZMQ Relay Client
*
* Provides async API for submitting historical data requests and waiting for
* completion notifications.
*/
export class ZMQRelayClient {
private config: Required<ZMQRelayConfig>;
private logger: FastifyBaseLogger;
private reqSocket?: zmq.Request;
private subSocket?: zmq.Subscriber;
private notificationTopic: string;
private pendingRequests: Map<string, PendingRequest> = new Map();
private connected = false;
private notificationListenerRunning = false;
constructor(config: ZMQRelayConfig, logger: FastifyBaseLogger) {
this.config = {
relayRequestEndpoint: config.relayRequestEndpoint,
relayNotificationEndpoint: config.relayNotificationEndpoint,
clientId: config.clientId || `gateway-${randomUUID().slice(0, 8)}`,
requestTimeout: config.requestTimeout || 30000,
};
this.logger = logger;
this.notificationTopic = `RESPONSE:${this.config.clientId}`;
}
/**
* Connect to relay and start notification listener
*
* CRITICAL: This MUST be called before making any requests.
* The notification listener subscribes to RESPONSE:{client_id} topic
* BEFORE any requests are sent, preventing race conditions.
*/
async connect(): Promise<void> {
if (this.connected) {
return;
}
this.logger.info({
requestEndpoint: this.config.relayRequestEndpoint,
notificationEndpoint: this.config.relayNotificationEndpoint,
clientId: this.config.clientId,
notificationTopic: this.notificationTopic,
}, 'Connecting to ZMQ relay');
// Create REQ socket for requests
this.reqSocket = new zmq.Request();
this.reqSocket.connect(this.config.relayRequestEndpoint);
// Create SUB socket for notifications
this.subSocket = new zmq.Subscriber();
this.subSocket.connect(this.config.relayNotificationEndpoint);
// Subscribe to our notification topic BEFORE sending any requests
this.subSocket.subscribe(this.notificationTopic);
// Subscribe to system metadata update notifications
this.subSocket.subscribe('METADATA_UPDATE');
this.logger.info({
topics: [this.notificationTopic, 'METADATA_UPDATE']
}, 'Subscribed to notification topics');
// Start notification listener
this.startNotificationListener();
// Give sockets a moment to connect
await new Promise(resolve => setTimeout(resolve, 100));
this.connected = true;
this.logger.info('ZMQ relay client connected');
}
/**
* Request historical OHLC data
*
* IMPORTANT: Call connect() before using this method.
*
* @param ticker Market identifier (e.g., "BINANCE:BTC/USDT")
* @param period_seconds OHLC period in seconds
* @param start_time Start timestamp in MICROSECONDS
* @param end_time End timestamp in MICROSECONDS
* @param limit Optional limit on number of candles
* @returns Promise that resolves when data is ready in Iceberg
*/
async requestHistoricalOHLC(
ticker: string,
period_seconds: number,
start_time: bigint,
end_time: bigint,
limit?: number
): Promise<HistoryReadyNotification> {
if (!this.connected || !this.reqSocket) {
throw new Error('Client not connected. Call connect() first.');
}
const request_id = randomUUID();
this.logger.debug({
request_id,
ticker,
period_seconds,
start_time: start_time.toString(),
end_time: end_time.toString(),
}, 'Submitting historical OHLC request');
const request: SubmitHistoricalRequest = {
request_id,
ticker,
start_time,
end_time,
period_seconds,
limit,
client_id: this.config.clientId,
};
// Register pending request BEFORE sending (notification listener is already running)
const resultPromise = new Promise<HistoryReadyNotification>((resolve, reject) => {
const timeoutHandle = setTimeout(() => {
this.pendingRequests.delete(request_id);
reject(new Error(`Request ${request_id} timed out after ${this.config.requestTimeout}ms`));
}, this.config.requestTimeout);
this.pendingRequests.set(request_id, { resolve, reject, timeoutHandle });
});
// Encode and send request
const frames = encodeSubmitHistoricalRequest(request);
try {
// Send two frames: version, then message
await this.reqSocket.send(frames);
// Wait for immediate acknowledgment
const responseFrames = await this.reqSocket.receive();
this.logger.debug({
frameCount: responseFrames.length,
frameLengths: Array.from(responseFrames).map(f => f.length),
}, 'Received response frames from relay');
const response = decodeSubmitResponse(Array.from(responseFrames));
this.logger.debug({
request_id,
response,
}, 'Decoded SubmitResponse');
if (response.status !== SubmitStatus.QUEUED) {
// Request was rejected - clean up pending request
const pending = this.pendingRequests.get(request_id);
if (pending) {
clearTimeout(pending.timeoutHandle);
this.pendingRequests.delete(request_id);
}
throw new Error(`Request rejected: ${response.error_message || 'Unknown error'}`);
}
this.logger.debug({ request_id }, 'Request queued, waiting for notification');
// Wait for notification (already subscribed to topic)
return await resultPromise;
} catch (error) {
// Clean up pending request on error
const pending = this.pendingRequests.get(request_id);
if (pending) {
clearTimeout(pending.timeoutHandle);
this.pendingRequests.delete(request_id);
}
this.logger.error({
error,
request_id,
ticker,
errorMessage: error instanceof Error ? error.message : String(error),
errorStack: error instanceof Error ? error.stack : undefined,
}, 'Failed to submit historical OHLC request');
throw error;
}
}
/**
* Start notification listener
*
* CRITICAL: This runs BEFORE any requests are submitted to prevent race condition.
* We're already subscribed to RESPONSE:{client_id} and METADATA_UPDATE, so we'll receive all notifications.
*/
private startNotificationListener(): void {
if (this.notificationListenerRunning || !this.subSocket) {
return;
}
this.notificationListenerRunning = true;
// Listen for notifications asynchronously
(async () => {
try {
for await (const frames of this.subSocket!) {
try {
// First frame is the topic
const topic = frames[0].toString();
// Handle metadata update notifications
if (topic === 'METADATA_UPDATE') {
this.logger.info('Received METADATA_UPDATE notification');
// Call the onMetadataUpdate callback if configured
if (this.config.onMetadataUpdate) {
try {
await this.config.onMetadataUpdate();
} catch (error) {
this.logger.error({ error }, 'Failed to handle metadata update');
}
}
continue;
}
// Handle history ready notifications
const notification = decodeHistoryReadyNotification(Array.from(frames));
this.logger.debug({
request_id: notification.request_id,
status: NotificationStatus[notification.status],
row_count: notification.row_count,
}, 'Received history ready notification');
// Check if we're waiting for this request
const pending = this.pendingRequests.get(notification.request_id);
if (pending) {
clearTimeout(pending.timeoutHandle);
this.pendingRequests.delete(notification.request_id);
if (notification.status === NotificationStatus.OK) {
pending.resolve(notification);
} else {
pending.reject(new Error(
`Historical data request failed: ${notification.error_message || NotificationStatus[notification.status]}`
));
}
} else {
this.logger.warn({
request_id: notification.request_id,
}, 'Received notification for unknown request');
}
} catch (error) {
this.logger.error({ error }, 'Failed to process notification');
}
}
} catch (error) {
if (this.notificationListenerRunning) {
this.logger.error({ error }, 'Notification listener error');
}
} finally {
this.notificationListenerRunning = false;
}
})();
this.logger.debug('Notification listener started');
}
/**
* Close the client and cleanup resources
*/
async close(): Promise<void> {
if (!this.connected) {
return;
}
this.logger.info('Closing ZMQ relay client');
this.notificationListenerRunning = false;
// Reject all pending requests
for (const [, pending] of this.pendingRequests) {
clearTimeout(pending.timeoutHandle);
pending.reject(new Error('Client closed'));
}
this.pendingRequests.clear();
// Close sockets
if (this.subSocket) {
this.subSocket.close();
this.subSocket = undefined;
}
if (this.reqSocket) {
this.reqSocket.close();
this.reqSocket = undefined;
}
this.connected = false;
this.logger.info('ZMQ relay client closed');
}
/**
* Check if client is connected
*/
isConnected(): boolean {
return this.connected;
}
/**
* Get the client ID used for notifications
*/
getClientId(): string {
return this.config.clientId;
}
}

View File

@@ -94,7 +94,8 @@ export class KubernetesClient {
});
return true;
} catch (error: any) {
if (error.response?.statusCode === 404) {
// @kubernetes/client-node v1.x throws errors with either .code or .response.statusCode
if (error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404) {
return false;
}
throw error;
@@ -171,7 +172,8 @@ export class KubernetesClient {
}
} catch (error: any) {
// If resource already exists, log warning but continue
if (error.response?.statusCode === 409) {
const is409 = error.code === 409 || error.response?.statusCode === 409 || error.statusCode === 409;
if (is409) {
this.config.logger.warn(
{ kind: doc.kind, name: doc.metadata?.name },
'Resource already exists, skipping'
@@ -246,7 +248,7 @@ export class KubernetesClient {
await new Promise((resolve) => setTimeout(resolve, pollInterval));
} catch (error: any) {
if (error.response?.statusCode === 404) {
if (error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404) {
this.config.logger.warn({ deploymentName }, 'Deployment not found');
return false;
}
@@ -281,7 +283,7 @@ export class KubernetesClient {
);
return null;
} catch (error: any) {
if (error.response?.statusCode === 404) {
if (error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404) {
this.config.logger.warn({ serviceName }, 'Service not found');
return null;
}
@@ -308,7 +310,8 @@ export class KubernetesClient {
});
this.config.logger.info({ deploymentName }, 'Deleted deployment');
} catch (error: any) {
if (error.response?.statusCode !== 404) {
const is404 = error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404;
if (!is404) {
this.config.logger.warn({ deploymentName, error }, 'Failed to delete deployment');
}
}
@@ -321,7 +324,8 @@ export class KubernetesClient {
});
this.config.logger.info({ serviceName }, 'Deleted service');
} catch (error: any) {
if (error.response?.statusCode !== 404) {
const is404 = error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404;
if (!is404) {
this.config.logger.warn({ serviceName, error }, 'Failed to delete service');
}
}
@@ -334,7 +338,8 @@ export class KubernetesClient {
});
this.config.logger.info({ pvcName }, 'Deleted PVC');
} catch (error: any) {
if (error.response?.statusCode !== 404) {
const is404 = error.code === 404 || error.response?.statusCode === 404 || error.statusCode === 404;
if (!is404) {
this.config.logger.warn({ pvcName, error }, 'Failed to delete PVC');
}
}

View File

@@ -17,6 +17,12 @@ export interface ContainerStatus {
mcpEndpoint: string;
}
export interface EnsureContainerResult {
mcpEndpoint: string;
wasCreated: boolean;
isSpinningUp: boolean;
}
/**
* Container manager orchestrates agent container lifecycle
*/
@@ -30,11 +36,13 @@ export class ContainerManager {
/**
* Ensure user's container is running and ready
* Returns the MCP endpoint URL
* If waitForReady is false, will return immediately after creating the deployment
*/
async ensureContainerRunning(
userId: string,
license: UserLicense
): Promise<{ mcpEndpoint: string; wasCreated: boolean }> {
license: UserLicense,
waitForReady: boolean = true
): Promise<EnsureContainerResult> {
const deploymentName = KubernetesClient.getDeploymentName(userId);
const mcpEndpoint = KubernetesClient.getMcpEndpoint(userId, this.config.namespace);
@@ -49,18 +57,20 @@ export class ContainerManager {
if (exists) {
this.config.logger.info({ userId, deploymentName }, 'Container deployment already exists');
// Wait for it to be ready (in case it's starting up)
const ready = await this.config.k8sClient.waitForDeploymentReady(deploymentName, 30000);
if (waitForReady) {
// Wait for it to be ready (in case it's starting up)
const ready = await this.config.k8sClient.waitForDeploymentReady(deploymentName, 30000);
if (!ready) {
this.config.logger.warn(
{ userId, deploymentName },
'Existing deployment not ready within timeout'
);
// Continue anyway - might be an image pull or other transient issue
if (!ready) {
this.config.logger.warn(
{ userId, deploymentName },
'Existing deployment not ready within timeout'
);
// Continue anyway - might be an image pull or other transient issue
}
}
return { mcpEndpoint, wasCreated: false };
return { mcpEndpoint, wasCreated: false, isSpinningUp: false };
}
// Create new deployment
@@ -76,6 +86,12 @@ export class ContainerManager {
await this.config.k8sClient.createAgentDeployment(spec);
// If not waiting for ready, return immediately with spinning up status
if (!waitForReady) {
this.config.logger.info({ userId, deploymentName }, 'Container created, spinning up...');
return { mcpEndpoint, wasCreated: true, isSpinningUp: true };
}
// Wait for deployment to be ready
const ready = await this.config.k8sClient.waitForDeploymentReady(deploymentName, 120000);
@@ -87,7 +103,16 @@ export class ContainerManager {
this.config.logger.info({ userId, mcpEndpoint }, 'Container is ready');
return { mcpEndpoint, wasCreated: true };
return { mcpEndpoint, wasCreated: true, isSpinningUp: false };
}
/**
* Wait for a deployment to become ready
* Used for background polling after initial creation
*/
async waitForContainerReady(userId: string, timeoutMs: number = 120000): Promise<boolean> {
const deploymentName = KubernetesClient.getDeploymentName(userId);
return await this.config.k8sClient.waitForDeploymentReady(deploymentName, timeoutMs);
}
/**

View File

@@ -84,11 +84,14 @@ spec:
volumeMounts:
- name: agent-data
mountPath: /app/data
- name: agent-config
mountPath: /app/config
readOnly: true
- name: tmp
mountPath: /tmp
- name: shared-run
mountPath: /var/run/agent
livenessProbe:
httpGet:
path: /health
@@ -96,14 +99,14 @@ spec:
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /ready
port: mcp
initialDelaySeconds: 5
periodSeconds: 10
- name: lifecycle-sidecar
image: {{sidecarImage}}
imagePullPolicy: Always
@@ -148,6 +151,9 @@ spec:
- name: agent-data
persistentVolumeClaim:
claimName: {{pvcName}}
- name: agent-config
configMap:
name: agent-config
- name: tmp
emptyDir:
medium: Memory
@@ -156,7 +162,7 @@ spec:
emptyDir:
medium: Memory
sizeLimit: 1Mi
restartPolicy: Always
terminationGracePeriodSeconds: 30
---

View File

@@ -83,11 +83,14 @@ spec:
volumeMounts:
- name: agent-data
mountPath: /app/data
- name: agent-config
mountPath: /app/config
readOnly: true
- name: tmp
mountPath: /tmp
- name: shared-run
mountPath: /var/run/agent
livenessProbe:
httpGet:
path: /health
@@ -147,6 +150,9 @@ spec:
- name: agent-data
persistentVolumeClaim:
claimName: {{pvcName}}
- name: agent-config
configMap:
name: agent-config
- name: tmp
emptyDir:
medium: Memory
@@ -155,7 +161,7 @@ spec:
emptyDir:
medium: Memory
sizeLimit: 1Mi
restartPolicy: Always
terminationGracePeriodSeconds: 30
---

View File

@@ -83,11 +83,14 @@ spec:
volumeMounts:
- name: agent-data
mountPath: /app/data
- name: agent-config
mountPath: /app/config
readOnly: true
- name: tmp
mountPath: /tmp
- name: shared-run
mountPath: /var/run/agent
livenessProbe:
httpGet:
path: /health
@@ -95,14 +98,14 @@ spec:
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /ready
port: mcp
initialDelaySeconds: 5
periodSeconds: 10
- name: lifecycle-sidecar
image: {{sidecarImage}}
imagePullPolicy: Always
@@ -147,6 +150,9 @@ spec:
- name: agent-data
persistentVolumeClaim:
claimName: {{pvcName}}
- name: agent-config
configMap:
name: agent-config
- name: tmp
emptyDir:
medium: Memory
@@ -155,7 +161,7 @@ spec:
emptyDir:
medium: Memory
sizeLimit: 1Mi
restartPolicy: Always
terminationGracePeriodSeconds: 30
---

View File

@@ -13,6 +13,11 @@ import { WebSocketHandler } from './channels/websocket-handler.js';
import { TelegramHandler } from './channels/telegram-handler.js';
import { KubernetesClient } from './k8s/client.js';
import { ContainerManager } from './k8s/container-manager.js';
import { ZMQRelayClient } from './clients/zmq-relay-client.js';
import { IcebergClient } from './clients/iceberg-client.js';
import { OHLCService } from './services/ohlc-service.js';
import { SymbolIndexService } from './services/symbol-index-service.js';
import { SymbolRoutes } from './routes/symbol-routes.js';
// Catch unhandled promise rejections for better debugging
process.on('unhandledRejection', (reason: any, promise) => {
@@ -114,11 +119,19 @@ function loadConfig() {
iceberg: {
catalogUri: configData.iceberg?.catalog_uri || process.env.ICEBERG_CATALOG_URI || 'http://iceberg-catalog:8181',
namespace: configData.iceberg?.namespace || process.env.ICEBERG_NAMESPACE || 'gateway',
ohlcCatalogUri: configData.iceberg?.ohlc_catalog_uri || process.env.ICEBERG_OHLC_CATALOG_URI,
ohlcNamespace: configData.iceberg?.ohlc_namespace || process.env.ICEBERG_OHLC_NAMESPACE || 'trading',
s3Endpoint: configData.iceberg?.s3_endpoint || process.env.S3_ENDPOINT,
s3AccessKey: secretsData.iceberg?.s3_access_key || process.env.S3_ACCESS_KEY,
s3SecretKey: secretsData.iceberg?.s3_secret_key || process.env.S3_SECRET_KEY,
},
// Relay configuration (for historical data)
relay: {
requestEndpoint: configData.relay?.request_endpoint || process.env.RELAY_REQUEST_ENDPOINT || 'tcp://relay:5559',
notificationEndpoint: configData.relay?.notification_endpoint || process.env.RELAY_NOTIFICATION_ENDPOINT || 'tcp://relay:5558',
},
// Embedding configuration (for RAG)
embedding: {
provider: (configData.embedding?.provider || process.env.EMBEDDING_PROVIDER || 'ollama') as 'ollama' | 'openai' | 'anthropic' | 'local' | 'voyage' | 'cohere' | 'none',
@@ -224,10 +237,18 @@ const qdrantClient = new QdrantClient(config.qdrant, app.log);
// Initialize Iceberg client (for durable storage)
// const icebergClient = new IcebergClient(config.iceberg, app.log);
// Initialize ZMQ Relay client (for historical data)
// Note: onMetadataUpdate callback will be set after symbolIndexService is initialized
const zmqRelayClient = new ZMQRelayClient({
relayRequestEndpoint: config.relay.requestEndpoint,
relayNotificationEndpoint: config.relay.notificationEndpoint,
}, app.log);
app.log.info({
redis: config.redisUrl,
qdrant: config.qdrant.url,
iceberg: config.iceberg.catalogUri,
relay: config.relay.requestEndpoint,
embeddingProvider: config.embedding.provider,
}, 'Harness storage clients configured');
@@ -280,12 +301,32 @@ const eventRouter = new EventRouter({
});
app.log.debug('Event router initialized');
// Initialize OHLC service (optional - only if relay is available)
let ohlcService: OHLCService | undefined;
try {
const icebergClient = new IcebergClient(config.iceberg, app.log);
ohlcService = new OHLCService({
icebergClient,
relayClient: zmqRelayClient,
logger: app.log,
});
app.log.info('OHLC service initialized');
} catch (error) {
app.log.warn({ error }, 'Failed to initialize OHLC service - historical data will not be available');
}
// Initialize Symbol Index Service (deferred to after server starts)
let symbolIndexService: SymbolIndexService | undefined;
// Initialize channel handlers
const websocketHandler = new WebSocketHandler({
authenticator,
containerManager,
providerConfig: config.providerConfig,
sessionRegistry,
eventSubscriber,
ohlcService, // Optional
symbolIndexService, // Optional
});
app.log.debug('WebSocket handler initialized');
@@ -317,6 +358,13 @@ app.log.debug('Registering websocket handler...');
websocketHandler.register(app);
app.log.debug('Registering telegram handler...');
telegramHandler.register(app);
// Register symbol routes (service may not be ready yet, but routes will handle this)
app.log.debug('Registering symbol routes...');
const getSymbolService = () => symbolIndexService;
const symbolRoutes = new SymbolRoutes({ getSymbolIndexService: getSymbolService });
symbolRoutes.register(app);
app.log.debug('All routes registered');
// Health check
@@ -408,6 +456,11 @@ const shutdown = async () => {
await eventSubscriber.stop();
await eventRouter.stop();
// Close ZMQ relay client
if (zmqRelayClient.isConnected()) {
await zmqRelayClient.close();
}
// Disconnect Redis
redis.disconnect();
@@ -433,6 +486,15 @@ try {
await redis.connect();
app.log.info('Redis connected');
// Connect to ZMQ Relay
app.log.debug('Connecting to ZMQ Relay...');
try {
await zmqRelayClient.connect();
app.log.info('ZMQ Relay connected');
} catch (error) {
app.log.warn({ error }, 'ZMQ Relay connection failed - historical data will not be available');
}
// Initialize Qdrant collection
app.log.debug('Initializing Qdrant...');
try {
@@ -496,6 +558,34 @@ try {
},
'Gateway server started'
);
// Initialize Symbol Index Service (after server is running)
// This is done asynchronously to not block server startup
(async () => {
try {
const icebergClient = new IcebergClient(config.iceberg, app.log);
const indexService = new SymbolIndexService({
icebergClient,
logger: app.log,
});
await indexService.initialize();
symbolIndexService = indexService;
// Update websocket handler's config so it can use the service
(websocketHandler as any).config.symbolIndexService = indexService;
// Configure ZMQ relay to reload symbol metadata on updates
(zmqRelayClient as any).config.onMetadataUpdate = async () => {
app.log.info('Reloading symbol metadata from Iceberg');
await indexService.initialize();
app.log.info({ stats: indexService.getStats() }, 'Symbol metadata reloaded');
};
app.log.info({ stats: symbolIndexService.getStats() }, 'Symbol index service initialized');
} catch (error) {
app.log.warn({ error }, 'Failed to initialize symbol index service - symbol search will not be available');
}
})();
} catch (error) {
app.log.error({ error }, 'Failed to start server');
process.exit(1);

View File

@@ -0,0 +1,115 @@
/**
* Symbol routes for HTTP API
*
* Provides REST endpoints for symbol search and resolution.
*/
import type { FastifyInstance } from 'fastify';
import type { SymbolIndexService } from '../services/symbol-index-service.js';
export interface SymbolRoutesConfig {
getSymbolIndexService: () => SymbolIndexService | undefined;
}
export class SymbolRoutes {
private getSymbolIndexService: () => SymbolIndexService | undefined;
constructor(config: SymbolRoutesConfig) {
this.getSymbolIndexService = config.getSymbolIndexService;
}
/**
* Register symbol routes with Fastify
*/
register(app: FastifyInstance): void {
// Search symbols
app.get('/symbols/search', async (request, reply) => {
const symbolIndexService = this.getSymbolIndexService();
if (!symbolIndexService) {
return reply.code(503).send({
error: 'Symbol index service not ready',
message: 'Service is still initializing, please try again in a moment',
});
}
const { q, limit } = request.query as { q?: string; limit?: string };
if (!q) {
return reply.code(400).send({
error: 'Query parameter "q" is required',
});
}
const limitNum = limit ? parseInt(limit, 10) : 30;
try {
const results = symbolIndexService.search(q, limitNum);
return { results };
} catch (error: any) {
app.log.error({ error: error.message }, 'Symbol search failed');
return reply.code(500).send({
error: 'Symbol search failed',
message: error.message,
});
}
});
// Resolve symbol (use wildcard to capture ticker with slashes like BINANCE:BTC/USDT)
app.get('/symbols/*', async (request, reply) => {
const symbolIndexService = this.getSymbolIndexService();
if (!symbolIndexService) {
return reply.code(503).send({
error: 'Symbol index service not ready',
message: 'Service is still initializing, please try again in a moment',
});
}
// Extract ticker from wildcard path (everything after /symbols/)
const ticker = (request.params as any)['*'];
try {
const symbolInfo = symbolIndexService.resolveSymbol(ticker);
if (!symbolInfo) {
return reply.code(404).send({
error: 'Symbol not found',
ticker,
});
}
return symbolInfo;
} catch (error: any) {
app.log.error({ error: error.message, ticker }, 'Symbol resolution failed');
return reply.code(500).send({
error: 'Symbol resolution failed',
message: error.message,
});
}
});
// Get symbol index stats
app.get('/symbols/stats', async (_request, reply) => {
const symbolIndexService = this.getSymbolIndexService();
if (!symbolIndexService) {
return reply.code(503).send({
error: 'Symbol index service not ready',
message: 'Service is still initializing, please try again in a moment',
});
}
try {
const stats = symbolIndexService.getStats();
return stats;
} catch (error: any) {
app.log.error({ error: error.message }, 'Failed to get symbol stats');
return reply.code(500).send({
error: 'Failed to get symbol stats',
message: error.message,
});
}
});
}
}

View File

@@ -0,0 +1,249 @@
/**
* OHLC Service - High-level API for historical market data
*
* Workflow (mirroring client-py/dexorder/ohlc_client.py):
* 1. Check Iceberg for existing data
* 2. Identify missing ranges
* 3. If complete, return immediately
* 4. Otherwise, request missing data via relay
* 5. Wait for completion notification
* 6. Query Iceberg again for complete dataset
* 7. Return results
*
* This provides transparent caching - clients don't need to know
* whether data came from cache or was fetched on-demand.
*/
import type { FastifyBaseLogger } from 'fastify';
import type { IcebergClient } from '../clients/iceberg-client.js';
import type { ZMQRelayClient } from '../clients/zmq-relay-client.js';
import type {
HistoryResult,
SymbolInfo,
SearchResult,
DatafeedConfig,
TradingViewBar,
} from '../types/ohlc.js';
import {
secondsToMicros,
backendToTradingView,
resolutionToSeconds,
DEFAULT_SUPPORTED_RESOLUTIONS,
} from '../types/ohlc.js';
export interface OHLCServiceConfig {
icebergClient: IcebergClient;
relayClient: ZMQRelayClient;
logger: FastifyBaseLogger;
requestTimeout?: number; // Request timeout in ms (default: 30000)
}
/**
* OHLC Service
*
* Provides high-level API for fetching OHLC data with smart caching.
*/
export class OHLCService {
private icebergClient: IcebergClient;
private relayClient: ZMQRelayClient;
private logger: FastifyBaseLogger;
constructor(config: OHLCServiceConfig) {
this.icebergClient = config.icebergClient;
this.relayClient = config.relayClient;
this.logger = config.logger;
}
/**
* Fetch OHLC data with smart caching
*
* Steps:
* 1. Query Iceberg for existing data
* 2. If complete, return immediately
* 3. If missing data, request via relay
* 4. Wait for completion notification
* 5. Query Iceberg again for complete dataset
* 6. Return results
*/
async fetchOHLC(
ticker: string,
resolution: string,
from_time: number, // Unix timestamp in SECONDS
to_time: number, // Unix timestamp in SECONDS
countback?: number
): Promise<HistoryResult> {
this.logger.debug({
ticker,
resolution,
from_time,
to_time,
countback,
}, 'Fetching OHLC data');
// Convert resolution to period_seconds
const period_seconds = resolutionToSeconds(resolution);
// Convert times to microseconds
const start_time = secondsToMicros(from_time);
const end_time = secondsToMicros(to_time);
// Step 1: Check Iceberg for existing data
let data = await this.icebergClient.queryOHLC(ticker, period_seconds, start_time, end_time);
// Step 2: Identify missing ranges
const missingRanges = await this.icebergClient.findMissingOHLCRanges(
ticker,
period_seconds,
start_time,
end_time
);
if (missingRanges.length === 0 && data.length > 0) {
// All data exists in Iceberg
this.logger.debug({ ticker, resolution, cached: true }, 'OHLC data found in cache');
return this.formatHistoryResult(data, countback);
}
// Step 3: Request missing data via relay
this.logger.debug({ ticker, resolution, missingRanges: missingRanges.length }, 'Requesting missing OHLC data');
try {
const notification = await this.relayClient.requestHistoricalOHLC(
ticker,
period_seconds,
start_time,
end_time,
countback
);
this.logger.info({
ticker,
resolution,
row_count: notification.row_count,
status: notification.status,
}, 'Historical data request completed');
// Step 4: Query Iceberg again for complete dataset
data = await this.icebergClient.queryOHLC(ticker, period_seconds, start_time, end_time);
return this.formatHistoryResult(data, countback);
} catch (error: any) {
this.logger.error({
error,
ticker,
resolution,
}, 'Failed to fetch historical data');
// Return empty result on error
return {
bars: [],
noData: true,
};
}
}
/**
* Format OHLC data as TradingView history result
*/
private formatHistoryResult(data: any[], countback?: number): HistoryResult {
if (data.length === 0) {
return {
bars: [],
noData: true,
};
}
// Convert to TradingView format
let bars: TradingViewBar[] = data.map(backendToTradingView);
// Sort by time
bars.sort((a, b) => a.time - b.time);
// Apply countback limit if specified
if (countback && bars.length > countback) {
bars = bars.slice(-countback);
}
return {
bars,
noData: false,
};
}
/**
* Get datafeed configuration
*/
async getConfig(): Promise<DatafeedConfig> {
return {
supported_resolutions: DEFAULT_SUPPORTED_RESOLUTIONS,
supports_search: true,
supports_group_request: false,
supports_marks: false,
supports_timescale_marks: false,
supports_time: false,
};
}
/**
* Search symbols
*
* For now, stub with default symbol
*/
async searchSymbols(
query: string,
type?: string,
exchange?: string,
limit: number = 30
): Promise<SearchResult[]> {
this.logger.debug({ query, type, exchange, limit }, 'Searching symbols');
// TODO: Implement central symbol registry
// For now, return default symbol if query matches
if (query.toLowerCase().includes('btc') || query.toLowerCase().includes('binance')) {
return [{
symbol: 'BINANCE:BTC/USDT',
full_name: 'BINANCE:BTC/USDT',
description: 'Bitcoin / Tether USD',
exchange: 'BINANCE',
ticker: 'BINANCE:BTC/USDT',
type: 'crypto',
}];
}
return [];
}
/**
* Resolve symbol metadata
*
* For now, stub with default symbol
*/
async resolveSymbol(symbol: string): Promise<SymbolInfo> {
this.logger.debug({ symbol }, 'Resolving symbol');
// TODO: Implement central symbol registry
// For now, return default symbol info for BINANCE:BTC/USDT
if (symbol === 'BINANCE:BTC/USDT' || symbol === 'BTC/USDT') {
return {
symbol: 'BINANCE:BTC/USDT',
name: 'BINANCE:BTC/USDT',
ticker: 'BINANCE:BTC/USDT',
description: 'Bitcoin / Tether USD',
type: 'crypto',
session: '24x7',
timezone: 'Etc/UTC',
exchange: 'BINANCE',
minmov: 1,
pricescale: 100,
has_intraday: true,
has_daily: true,
has_weekly_and_monthly: false,
supported_resolutions: DEFAULT_SUPPORTED_RESOLUTIONS,
data_status: 'streaming',
};
}
throw new Error(`Symbol not found: ${symbol}`);
}
}

View File

@@ -0,0 +1,266 @@
/**
* Symbol Index Service
*
* Provides fast in-memory search for symbol metadata.
* Loads initial data from Iceberg and stays synced via Kafka subscription.
*/
import type { FastifyBaseLogger } from 'fastify';
import type { IcebergClient } from '../clients/iceberg-client.js';
import type { SearchResult, SymbolInfo, SymbolMetadata } from '../types/ohlc.js';
import { DEFAULT_SUPPORTED_RESOLUTIONS } from '../types/ohlc.js';
export interface SymbolIndexServiceConfig {
icebergClient: IcebergClient;
logger: FastifyBaseLogger;
}
/**
* Symbol Index Service
*
* Maintains an in-memory index of all available symbols for fast search.
*/
export class SymbolIndexService {
private icebergClient: IcebergClient;
private logger: FastifyBaseLogger;
private symbols: Map<string, SymbolMetadata> = new Map(); // key: "EXCHANGE:MARKET_ID"
private initialized: boolean = false;
constructor(config: SymbolIndexServiceConfig) {
this.icebergClient = config.icebergClient;
this.logger = config.logger;
}
/**
* Initialize the index by loading symbols from Iceberg
*/
async initialize(): Promise<void> {
this.logger.info('Initializing symbol index from Iceberg');
try {
// Load all symbols from Iceberg symbol_metadata table
const symbols = await this.icebergClient.queryAllSymbols();
this.logger.info({
symbolsType: typeof symbols,
symbolsIsArray: Array.isArray(symbols),
symbolsLength: symbols?.length,
firstSymbol: symbols[0]
}, 'Loaded symbols debug info');
// Track unique keys for debugging
const uniqueKeys = new Set<string>();
for (const symbol of symbols) {
const key = `${symbol.exchange_id}:${symbol.market_id}`;
uniqueKeys.add(key);
this.symbols.set(key, symbol);
}
this.initialized = true;
this.logger.info({
count: this.symbols.size,
totalRows: symbols.length,
uniqueKeys: uniqueKeys.size,
sampleKeys: Array.from(uniqueKeys).slice(0, 5)
}, 'Symbol index initialized');
} catch (error: any) {
this.logger.warn({ error: error.message }, 'Failed to initialize symbol index (will retry on first request)');
// Don't throw - allow lazy loading
}
}
/**
* Ensure index is initialized (with retry on failure)
*/
private async ensureInitialized(): Promise<void> {
if (this.initialized) {
return;
}
this.logger.info('Lazy-loading symbol index');
await this.initialize();
}
/**
* Update or add a symbol to the index
*/
updateSymbol(symbol: SymbolMetadata): void {
const key = `${symbol.exchange_id}:${symbol.market_id}`;
this.symbols.set(key, symbol);
this.logger.debug({ key }, 'Updated symbol in index');
}
/**
* Search for symbols matching a query
*
* Simple case-insensitive substring matching across:
* - Ticker (EXCHANGE:MARKET_ID)
* - Base asset
* - Quote asset
* - Description
*/
async search(query: string, limit: number = 30): Promise<SearchResult[]> {
await this.ensureInitialized();
if (!this.initialized) {
this.logger.warn('Symbol index not initialized, returning empty results');
return [];
}
const queryLower = query.toLowerCase();
const results: SearchResult[] = [];
for (const [key, metadata] of this.symbols) {
// Match against various fields
const ticker = key;
const base = metadata.base_asset || '';
const quote = metadata.quote_asset || '';
const desc = metadata.description || '';
const marketId = metadata.market_id || '';
if (
ticker.toLowerCase().includes(queryLower) ||
base.toLowerCase().includes(queryLower) ||
quote.toLowerCase().includes(queryLower) ||
desc.toLowerCase().includes(queryLower) ||
marketId.toLowerCase().includes(queryLower)
) {
results.push(this.metadataToSearchResult(metadata));
if (results.length >= limit) {
break;
}
}
}
this.logger.debug({ query, count: results.length }, 'Symbol search completed');
return results;
}
/**
* Resolve symbol metadata by ticker
*/
async resolveSymbol(ticker: string): Promise<SymbolInfo | null> {
await this.ensureInitialized();
if (!this.initialized) {
this.logger.warn('Symbol index not initialized after retry');
return null;
}
// ticker format: "EXCHANGE:MARKET_ID" or just "MARKET_ID"
let key = ticker;
// If no exchange prefix, search for first match
if (!ticker.includes(':')) {
for (const [k, metadata] of this.symbols) {
if (metadata.market_id === ticker) {
key = k;
break;
}
}
}
const metadata = this.symbols.get(key);
if (!metadata) {
this.logger.debug({ ticker }, 'Symbol not found');
return null;
}
return this.metadataToSymbolInfo(metadata);
}
/**
* Convert SymbolMetadata to SearchResult
*/
private metadataToSearchResult(metadata: SymbolMetadata): SearchResult {
const symbol = metadata.market_id; // Clean format: "BTC/USDT"
const ticker = `${metadata.exchange_id}:${metadata.market_id}`; // "BINANCE:BTC/USDT"
const fullName = `${metadata.market_id} (${metadata.exchange_id})`;
return {
symbol,
ticker,
full_name: fullName,
description: metadata.description || `${metadata.base_asset}/${metadata.quote_asset} ${metadata.market_type || 'spot'} pair on ${metadata.exchange_id}`,
exchange: metadata.exchange_id,
type: metadata.market_type || 'spot',
};
}
/**
* Convert SymbolMetadata to SymbolInfo
*/
private metadataToSymbolInfo(metadata: SymbolMetadata): SymbolInfo {
const symbol = metadata.market_id;
const ticker = `${metadata.exchange_id}:${metadata.market_id}`;
// Convert supported_period_seconds to resolution strings
const supportedResolutions = this.periodSecondsToResolutions(metadata.supported_period_seconds || []);
// Calculate pricescale from tick_denom
// tick_denom is 10^n where n is the number of decimal places
// pricescale is the same value
const pricescale = metadata.tick_denom ? Number(metadata.tick_denom) : 100;
return {
symbol,
ticker,
name: symbol,
description: metadata.description || `${metadata.base_asset}/${metadata.quote_asset} ${metadata.market_type || 'spot'} pair on ${metadata.exchange_id}`,
type: metadata.market_type || 'spot',
exchange: metadata.exchange_id,
timezone: 'Etc/UTC',
session: '24x7',
supported_resolutions: supportedResolutions.length > 0 ? supportedResolutions : DEFAULT_SUPPORTED_RESOLUTIONS,
has_intraday: true,
has_daily: true,
has_weekly_and_monthly: false,
pricescale,
minmov: 1,
base_currency: metadata.base_asset,
quote_currency: metadata.quote_asset,
data_status: 'streaming',
tick_denominator: metadata.tick_denom ? Number(metadata.tick_denom) : undefined,
base_denominator: metadata.base_denom ? Number(metadata.base_denom) : undefined,
quote_denominator: metadata.quote_denom ? Number(metadata.quote_denom) : undefined,
};
}
/**
* Convert period_seconds array to TradingView resolution strings
*/
private periodSecondsToResolutions(periods: number[]): string[] {
const resolutions: string[] = [];
for (const seconds of periods) {
if (seconds < 3600) {
// Minutes
resolutions.push(String(seconds / 60));
} else if (seconds === 3600) {
resolutions.push('60');
} else if (seconds === 14400) {
resolutions.push('240');
} else if (seconds === 86400) {
resolutions.push('1D');
} else if (seconds === 604800) {
resolutions.push('1W');
} else if (seconds === 2592000) {
resolutions.push('1M');
}
}
return resolutions;
}
/**
* Get statistics about the symbol index
*/
getStats() {
return {
symbolCount: this.symbols.size,
initialized: this.initialized,
};
}
}

221
gateway/src/types/ohlc.ts Normal file
View File

@@ -0,0 +1,221 @@
/**
* OHLC data types and utilities
*
* Handles conversion between:
* - TradingView datafeed format (seconds, OHLCV structure)
* - Backend/Iceberg format (microseconds, ticker prefix)
* - ZMQ protocol format (protobuf messages)
*/
/**
* TradingView bar format (used by web frontend)
*/
export interface TradingViewBar {
time: number; // Unix timestamp in SECONDS
open: number;
high: number;
low: number;
close: number;
volume: number;
}
/**
* Backend OHLC format (from Iceberg)
*/
export interface BackendOHLC {
timestamp: number; // Unix timestamp in MICROSECONDS
ticker: string;
period_seconds: number;
open: number;
high: number;
low: number;
close: number;
volume: number;
}
/**
* Datafeed configuration (TradingView format)
*/
export interface DatafeedConfig {
supported_resolutions: string[];
supports_search: boolean;
supports_group_request: boolean;
supports_marks: boolean;
supports_timescale_marks: boolean;
supports_time: boolean;
}
/**
* Symbol info (TradingView format)
* Matches backend.old/src/datasource/schema.py SymbolInfo
*/
export interface SymbolInfo {
symbol: string; // Clean format (e.g., "BTC/USDT")
ticker: string; // With exchange prefix (e.g., "BINANCE:BTC/USDT")
name: string; // Display name
description: string; // Human-readable description
type: string; // "crypto", "spot", "futures", etc.
exchange: string; // Exchange identifier
timezone: string; // IANA timezone
session: string; // Trading session (e.g., "24x7")
supported_resolutions: string[]; // Supported time resolutions
has_intraday: boolean;
has_daily: boolean;
has_weekly_and_monthly: boolean;
pricescale: number; // Price scale factor
minmov: number; // Minimum price movement
base_currency?: string; // Base asset (e.g., "BTC")
quote_currency?: string; // Quote asset (e.g., "USDT")
data_status?: string; // "streaming", "delayed", etc.
tick_denominator?: number; // Denominator for price scaling (e.g., 1e6)
base_denominator?: number; // Denominator for base asset
quote_denominator?: number; // Denominator for quote asset
}
/**
* History result (TradingView format)
*/
export interface HistoryResult {
bars: TradingViewBar[];
noData: boolean;
nextTime?: number; // Unix timestamp in SECONDS for pagination
}
/**
* Search result (TradingView format)
* Matches backend.old/src/datasource/schema.py SearchResult
*/
export interface SearchResult {
symbol: string; // Clean format (e.g., "BTC/USDT")
ticker: string; // With exchange prefix for routing (e.g., "BINANCE:BTC/USDT")
full_name: string; // Full display name (e.g., "BTC/USDT (BINANCE)")
description: string; // Human-readable description
exchange: string; // Exchange identifier
type: string; // Instrument type ("spot", "futures", etc.)
}
/**
* ZMQ Protocol Messages (simplified TypeScript representations)
*/
export enum SubmitStatus {
QUEUED = 0,
DUPLICATE = 1,
INVALID = 2,
ERROR = 3,
}
export enum NotificationStatus {
OK = 0,
NOT_FOUND = 1,
ERROR = 2,
TIMEOUT = 3,
}
export interface SubmitHistoricalRequest {
request_id: string;
ticker: string;
start_time: bigint; // microseconds
end_time: bigint; // microseconds
period_seconds: number;
limit?: number;
client_id?: string;
}
export interface SubmitResponse {
request_id: string;
status: SubmitStatus;
error_message?: string;
notification_topic: string;
}
export interface HistoryReadyNotification {
request_id: string;
ticker: string;
period_seconds: number;
start_time: bigint; // microseconds
end_time: bigint; // microseconds
status: NotificationStatus;
error_message?: string;
iceberg_namespace: string;
iceberg_table: string;
row_count: number;
completed_at: bigint; // microseconds
}
/**
* Conversion utilities
*/
export function secondsToMicros(seconds: number): bigint {
return BigInt(Math.floor(seconds)) * 1000000n;
}
export function microsToSeconds(micros: bigint | number): number {
// Integer division: convert microseconds to seconds (truncates to integer)
return Number(BigInt(micros) / 1000000n);
}
export function backendToTradingView(backend: BackendOHLC): TradingViewBar {
return {
time: microsToSeconds(backend.timestamp),
open: backend.open,
high: backend.high,
low: backend.low,
close: backend.close,
volume: backend.volume,
};
}
/**
* Convert TradingView resolution to seconds
* Examples: "1" -> 60, "5" -> 300, "60" -> 3600, "1D" -> 86400
*/
export function resolutionToSeconds(resolution: string): number {
// Handle numeric resolutions (minutes)
if (/^\d+$/.test(resolution)) {
return parseInt(resolution, 10) * 60;
}
// Handle day/week/month resolutions
if (resolution.endsWith('D')) {
const days = parseInt(resolution.slice(0, -1), 10);
return days * 86400;
}
if (resolution.endsWith('W')) {
const weeks = parseInt(resolution.slice(0, -1), 10);
return weeks * 7 * 86400;
}
if (resolution.endsWith('M')) {
const months = parseInt(resolution.slice(0, -1), 10);
return months * 30 * 86400; // Approximate
}
throw new Error(`Unsupported resolution: ${resolution}`);
}
/**
* Default supported resolutions
*/
export const DEFAULT_SUPPORTED_RESOLUTIONS = [
'1', '5', '15', '30', '60', '240', '1D', '1W'
];
/**
* Symbol metadata from Iceberg (backend format)
* Maps to Market protobuf and Iceberg symbol_metadata table
*/
export interface SymbolMetadata {
exchange_id: string;
market_id: string;
market_type?: string;
description?: string;
base_asset?: string;
quote_asset?: string;
tick_denom?: bigint;
base_denom?: bigint;
quote_denom?: bigint;
supported_period_seconds?: number[];
earliest_time?: bigint;
updated_at: bigint;
}

View File

@@ -32,11 +32,18 @@ export const UserLicenseSchema = z.object({
maxTokensPerMessage: z.number(),
rateLimitPerMinute: z.number(),
}),
mcpServerUrl: z.string().url(),
mcpServerUrl: z.string(), // Allow any string including 'pending', URL validation happens later
preferredModel: ModelPreferenceSchema.optional(),
expiresAt: z.date().optional(),
createdAt: z.date(),
updatedAt: z.date(),
expiresAt: z.union([z.date(), z.string(), z.null()]).optional().transform(val => {
if (!val || val === null) return undefined;
return val instanceof Date ? val : new Date(val);
}),
createdAt: z.union([z.date(), z.string()]).transform(val =>
val instanceof Date ? val : new Date(val)
),
updatedAt: z.union([z.date(), z.string()]).transform(val =>
val instanceof Date ? val : new Date(val)
),
});
export type UserLicense = z.infer<typeof UserLicenseSchema>;

View File

@@ -0,0 +1,190 @@
/**
* Container Sync
*
* Handles synchronization of persistent workspace stores with the user container
* via MCP tools. Persistent stores (chartStore, userPreferences, etc.) are
* stored in the container and loaded/saved via MCP tool calls.
*
* Container-side storage: /data/workspace/{store_name}.json
*
* MCP Tools used:
* - workspace_read(store_name) -> dict
* - workspace_write(store_name, data) -> None
* - workspace_patch(store_name, patch) -> dict (new state)
*/
import type { FastifyBaseLogger } from 'fastify';
import type { Operation as JsonPatchOp } from 'fast-json-patch';
import type { MCPClientConnector } from '../harness/mcp-client.js';
/**
* Result of loading a store from the container.
*/
export interface LoadResult {
exists: boolean;
state?: unknown;
error?: string;
}
/**
* Result of saving a store to the container.
*/
export interface SaveResult {
success: boolean;
error?: string;
}
/**
* Result of patching a store in the container.
*/
export interface PatchResult {
success: boolean;
newState?: unknown;
error?: string;
}
/**
* Handles synchronization with the user's container via MCP.
*/
export class ContainerSync {
private mcpClient: MCPClientConnector;
private logger: FastifyBaseLogger;
constructor(mcpClient: MCPClientConnector, logger: FastifyBaseLogger) {
this.mcpClient = mcpClient;
this.logger = logger.child({ component: 'ContainerSync' });
}
/**
* Load a workspace store from the container.
* Returns the stored state or indicates the store doesn't exist.
*/
async loadStore(storeName: string): Promise<LoadResult> {
if (!this.mcpClient.isConnected()) {
this.logger.warn({ store: storeName }, 'MCP client not connected, cannot load store');
return { exists: false, error: 'MCP client not connected' };
}
try {
this.logger.debug({ store: storeName }, 'Loading store from container');
const result = (await this.mcpClient.callTool('workspace_read', {
store_name: storeName,
})) as { exists: boolean; data?: unknown; error?: string };
if (result.error) {
this.logger.warn({ store: storeName, error: result.error }, 'Container returned error');
return { exists: false, error: result.error };
}
if (!result.exists) {
this.logger.debug({ store: storeName }, 'Store does not exist in container');
return { exists: false };
}
this.logger.debug({ store: storeName }, 'Loaded store from container');
return { exists: true, state: result.data };
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
this.logger.error({ store: storeName, error: message }, 'Failed to load store from container');
return { exists: false, error: message };
}
}
/**
* Save a workspace store to the container.
* Overwrites any existing state.
*/
async saveStore(storeName: string, state: unknown): Promise<SaveResult> {
if (!this.mcpClient.isConnected()) {
this.logger.warn({ store: storeName }, 'MCP client not connected, cannot save store');
return { success: false, error: 'MCP client not connected' };
}
try {
this.logger.debug({ store: storeName }, 'Saving store to container');
const result = (await this.mcpClient.callTool('workspace_write', {
store_name: storeName,
data: state,
})) as { success: boolean; error?: string };
if (result.error || !result.success) {
this.logger.warn({ store: storeName, error: result.error }, 'Failed to save store');
return { success: false, error: result.error || 'Unknown error' };
}
this.logger.debug({ store: storeName }, 'Saved store to container');
return { success: true };
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
this.logger.error({ store: storeName, error: message }, 'Failed to save store to container');
return { success: false, error: message };
}
}
/**
* Apply a JSON patch to a store in the container.
* Returns the new state after applying the patch.
*/
async patchStore(storeName: string, patch: JsonPatchOp[]): Promise<PatchResult> {
if (!this.mcpClient.isConnected()) {
this.logger.warn({ store: storeName }, 'MCP client not connected, cannot patch store');
return { success: false, error: 'MCP client not connected' };
}
try {
this.logger.debug({ store: storeName, patchOps: patch.length }, 'Patching store in container');
const result = (await this.mcpClient.callTool('workspace_patch', {
store_name: storeName,
patch,
})) as { success: boolean; data?: unknown; error?: string };
if (result.error || !result.success) {
this.logger.warn({ store: storeName, error: result.error }, 'Failed to patch store');
return { success: false, error: result.error || 'Unknown error' };
}
this.logger.debug({ store: storeName }, 'Patched store in container');
return { success: true, newState: result.data };
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
this.logger.error({ store: storeName, error: message }, 'Failed to patch store in container');
return { success: false, error: message };
}
}
/**
* Load all persistent stores from the container.
* Returns a map of store name -> state.
*/
async loadAllStores(storeNames: string[]): Promise<Map<string, unknown>> {
const states = new Map<string, unknown>();
for (const storeName of storeNames) {
const result = await this.loadStore(storeName);
if (result.exists && result.state !== undefined) {
states.set(storeName, result.state);
}
}
return states;
}
/**
* Save all persistent stores to the container.
*/
async saveAllStores(stores: Map<string, unknown>): Promise<void> {
for (const [storeName, state] of stores) {
await this.saveStore(storeName, state);
}
}
/**
* Check if MCP client is connected.
*/
isConnected(): boolean {
return this.mcpClient.isConnected();
}
}

View File

@@ -0,0 +1,86 @@
/**
* Workspace Module
*
* Provides two-way state synchronization between web clients, gateway, and user containers.
*
* Key components:
* - WorkspaceManager: Per-session state manager with channel-agnostic interface
* - SyncRegistry: Handles JSON patch sync protocol
* - ContainerSync: Persists state to user containers via MCP
*
* Usage:
* ```typescript
* import { WorkspaceManager, ContainerSync, DEFAULT_STORES } from './workspace/index.js';
*
* // Create container sync (optional, for persistent stores)
* const containerSync = new ContainerSync(mcpClient, logger);
*
* // Create workspace manager for session
* const workspace = new WorkspaceManager({
* userId: 'user-123',
* sessionId: 'session-456',
* stores: DEFAULT_STORES,
* containerSync,
* logger,
* });
*
* // Initialize (loads persistent stores from container)
* await workspace.initialize();
*
* // Attach channel adapter
* workspace.setAdapter({
* sendSnapshot: (msg) => socket.send(JSON.stringify(msg)),
* sendPatch: (msg) => socket.send(JSON.stringify(msg)),
* getCapabilities: () => ({ supportsSync: true, ... }),
* });
*
* // Handle sync messages from client
* workspace.handleHello(clientSeqs);
* workspace.handlePatch(storeName, seq, patch);
*
* // Access state
* const chartState = workspace.getState('chartState');
* await workspace.setState('chartState', newState);
*
* // Register triggers (future use)
* const unsub = workspace.onPathChange('/chartState/symbol', (old, new, ctx) => {
* console.log('Symbol changed:', old, '->', new);
* });
*
* // Cleanup
* await workspace.shutdown();
* ```
*/
// Types
export type {
SnapshotMessage,
PatchMessage,
HelloMessage,
InboundSyncMessage,
OutboundSyncMessage,
StoreConfig,
ChannelAdapter,
ChannelCapabilities,
PathTrigger,
PathTriggerHandler,
PathTriggerContext,
ChartState,
ChartStore,
ChannelState,
ChannelInfo,
WorkspaceStores,
} from './types.js';
export { DEFAULT_STORES } from './types.js';
// Sync registry
export { SyncRegistry } from './sync-registry.js';
// Container sync
export { ContainerSync } from './container-sync.js';
export type { LoadResult, SaveResult, PatchResult } from './container-sync.js';
// Workspace manager
export { WorkspaceManager } from './workspace-manager.js';
export type { WorkspaceManagerConfig } from './workspace-manager.js';

View File

@@ -0,0 +1,407 @@
/**
* Sync Registry
*
* Manages synchronized state stores with JSON patch-based updates.
* Ported from backend.old/src/sync/registry.py.
*
* Key features:
* - Sequence-numbered patches for reliable sync
* - History buffer for catchup patches
* - Conflict resolution (frontend wins)
* - Optimistic updates with rollback on conflict
*/
import type { Operation as JsonPatchOp } from 'fast-json-patch';
import fastJsonPatch from 'fast-json-patch';
const { applyPatch, compare: computePatch, deepClone } = fastJsonPatch;
import type { FastifyBaseLogger } from 'fastify';
import type { SnapshotMessage, PatchMessage, StoreConfig } from './types.js';
/**
* History entry: sequence number and the patch that produced it.
*/
interface HistoryEntry {
seq: number;
patch: JsonPatchOp[];
}
/**
* Entry for a single synchronized store.
*/
class SyncEntry {
readonly storeName: string;
private state: unknown;
private seq: number = 0;
private lastSnapshot: unknown;
private history: HistoryEntry[] = [];
private readonly historySize: number;
constructor(storeName: string, initialState: unknown, historySize: number = 50) {
this.storeName = storeName;
this.state = deepClone(initialState);
this.lastSnapshot = deepClone(initialState);
this.historySize = historySize;
}
/**
* Get current state (deep clone to prevent mutation).
*/
getState(): unknown {
return deepClone(this.state);
}
/**
* Get current sequence number.
*/
getSeq(): number {
return this.seq;
}
/**
* Set state directly (used for loading from container).
* Resets sequence to 0.
*/
setState(newState: unknown): void {
this.state = deepClone(newState);
this.lastSnapshot = deepClone(newState);
this.seq = 0;
this.history = [];
}
/**
* Compute patch from last snapshot to current state.
* Returns null if no changes.
*/
computePatch(): JsonPatchOp[] | null {
const currentState = deepClone(this.state);
const patch = computePatch(this.lastSnapshot as any, currentState as any);
return patch.length > 0 ? patch : null;
}
/**
* Commit a patch to history and update snapshot.
*/
commitPatch(patch: JsonPatchOp[]): void {
this.seq += 1;
this.history.push({ seq: this.seq, patch });
// Trim history if needed
while (this.history.length > this.historySize) {
this.history.shift();
}
this.lastSnapshot = deepClone(this.state);
}
/**
* Get catchup patches since a given sequence.
* Returns null if catchup not possible (need full snapshot).
*/
getCatchupPatches(sinceSeq: number): HistoryEntry[] | null {
if (sinceSeq === this.seq) {
return [];
}
// Check if we have all patches needed
if (this.history.length === 0 || this.history[0].seq > sinceSeq + 1) {
return null; // Need full snapshot
}
return this.history.filter((entry) => entry.seq > sinceSeq);
}
/**
* Apply a patch to state (used when applying local changes).
*/
applyPatch(patch: JsonPatchOp[]): void {
const result = applyPatch(deepClone(this.state), patch, false, false);
this.state = result.newDocument;
}
/**
* Apply client patch with conflict resolution.
* Returns the resolved state and any patches to send back.
*/
applyClientPatch(
clientBaseSeq: number,
patch: JsonPatchOp[],
logger?: FastifyBaseLogger
): { needsSnapshot: boolean; resolvedState?: unknown } {
try {
if (clientBaseSeq === this.seq) {
// No conflict - apply directly
const currentState = deepClone(this.state);
const result = applyPatch(currentState, patch, false, false);
this.state = result.newDocument;
this.commitPatch(patch);
logger?.debug(
{ store: this.storeName, seq: this.seq },
'Applied client patch without conflict'
);
return { needsSnapshot: false };
}
if (clientBaseSeq < this.seq) {
// Conflict! Frontend wins.
logger?.debug(
{ store: this.storeName, clientSeq: clientBaseSeq, serverSeq: this.seq },
'Conflict detected, frontend wins'
);
// Get backend patches since client's base
const backendPatches: JsonPatchOp[][] = [];
for (const entry of this.history) {
if (entry.seq > clientBaseSeq) {
backendPatches.push(entry.patch);
}
}
// Get paths modified by frontend
const frontendPaths = new Set(patch.map((op) => op.path));
// Apply frontend patch first
const currentState = deepClone(this.state);
let newState: unknown;
try {
const result = applyPatch(currentState, patch, false, false);
newState = result.newDocument;
} catch (e) {
logger?.warn(
{ store: this.storeName, error: e },
'Failed to apply client patch during conflict resolution'
);
return { needsSnapshot: true, resolvedState: this.state };
}
// Re-apply backend patches that don't overlap with frontend
for (const bPatch of backendPatches) {
const filteredPatch = bPatch.filter((op) => !frontendPaths.has(op.path));
if (filteredPatch.length > 0) {
try {
const result = applyPatch(deepClone(newState), filteredPatch, false, false);
newState = result.newDocument;
} catch (e) {
logger?.debug(
{ store: this.storeName, error: e },
'Skipping backend patch during conflict resolution'
);
}
}
}
this.state = newState;
// Compute final patch from last snapshot
const finalPatch = computePatch(this.lastSnapshot as any, newState as any);
if (finalPatch.length > 0) {
this.commitPatch(finalPatch);
}
// Send snapshot to converge
return { needsSnapshot: true, resolvedState: this.state };
}
// clientBaseSeq > this.seq - client is ahead, shouldn't happen
logger?.warn(
{ store: this.storeName, clientSeq: clientBaseSeq, serverSeq: this.seq },
'Client ahead of server, sending snapshot'
);
return { needsSnapshot: true, resolvedState: this.state };
} catch (e) {
logger?.error(
{ store: this.storeName, error: e },
'Unexpected error applying client patch'
);
return { needsSnapshot: true, resolvedState: this.state };
}
}
}
/**
* Registry managing multiple synchronized stores.
*/
export class SyncRegistry {
private entries = new Map<string, SyncEntry>();
private logger?: FastifyBaseLogger;
constructor(logger?: FastifyBaseLogger) {
this.logger = logger?.child({ component: 'SyncRegistry' });
}
/**
* Register a store with initial state.
*/
register(config: StoreConfig): void {
const entry = new SyncEntry(config.name, config.initialState());
this.entries.set(config.name, entry);
this.logger?.debug({ store: config.name }, 'Registered store');
}
/**
* Check if a store is registered.
*/
has(storeName: string): boolean {
return this.entries.has(storeName);
}
/**
* Get current state of a store.
*/
getState<T = unknown>(storeName: string): T | undefined {
const entry = this.entries.get(storeName);
return entry?.getState() as T | undefined;
}
/**
* Get current sequence number for a store.
*/
getSeq(storeName: string): number {
const entry = this.entries.get(storeName);
return entry?.getSeq() ?? 0;
}
/**
* Set state directly (used for loading from container).
*/
setState(storeName: string, state: unknown): void {
const entry = this.entries.get(storeName);
if (entry) {
entry.setState(state);
this.logger?.debug({ store: storeName }, 'Set store state');
}
}
/**
* Update state locally and compute patch.
* Returns the patch if state changed, null otherwise.
*/
updateState(storeName: string, updater: (state: unknown) => unknown): JsonPatchOp[] | null {
const entry = this.entries.get(storeName);
if (!entry) {
return null;
}
const currentState = entry.getState();
const newState = updater(currentState);
// Compute patch
const patch = computePatch(currentState as any, newState as any);
if (patch.length === 0) {
return null;
}
// Apply and commit
entry.applyPatch(patch);
entry.commitPatch(patch);
this.logger?.debug(
{ store: storeName, seq: entry.getSeq(), patchOps: patch.length },
'Updated store state'
);
return patch;
}
/**
* Sync client based on their reported sequences.
* Returns messages to send (snapshots or patches).
*/
syncClient(clientSeqs: Record<string, number>): (SnapshotMessage | PatchMessage)[] {
const messages: (SnapshotMessage | PatchMessage)[] = [];
for (const [storeName, entry] of this.entries) {
const clientSeq = clientSeqs[storeName] ?? -1;
const catchupPatches = entry.getCatchupPatches(clientSeq);
if (catchupPatches === null) {
// Need full snapshot
messages.push({
type: 'snapshot',
store: storeName,
seq: entry.getSeq(),
state: entry.getState(),
});
this.logger?.debug(
{ store: storeName, clientSeq, serverSeq: entry.getSeq() },
'Sending snapshot'
);
} else {
// Send catchup patches
for (const { seq, patch } of catchupPatches) {
messages.push({
type: 'patch',
store: storeName,
seq,
patch,
});
}
if (catchupPatches.length > 0) {
this.logger?.debug(
{ store: storeName, patchCount: catchupPatches.length },
'Sending catchup patches'
);
}
}
}
return messages;
}
/**
* Apply a patch from the client.
* Returns message to send back (snapshot if conflict, null otherwise).
*/
applyClientPatch(
storeName: string,
clientBaseSeq: number,
patch: JsonPatchOp[]
): SnapshotMessage | null {
const entry = this.entries.get(storeName);
if (!entry) {
this.logger?.warn({ store: storeName }, 'Store not found');
return null;
}
const result = entry.applyClientPatch(clientBaseSeq, patch, this.logger);
if (result.needsSnapshot) {
return {
type: 'snapshot',
store: storeName,
seq: entry.getSeq(),
state: result.resolvedState ?? entry.getState(),
};
}
return null;
}
/**
* Get all registered store names.
*/
getStoreNames(): string[] {
return Array.from(this.entries.keys());
}
/**
* Get all current sequences (for persistence).
*/
getAllSeqs(): Record<string, number> {
const seqs: Record<string, number> = {};
for (const [name, entry] of this.entries) {
seqs[name] = entry.getSeq();
}
return seqs;
}
/**
* Get all current states (for persistence).
*/
getAllStates(): Record<string, unknown> {
const states: Record<string, unknown> = {};
for (const [name, entry] of this.entries) {
states[name] = entry.getState();
}
return states;
}
}

View File

@@ -0,0 +1,239 @@
/**
* Workspace Sync Types
*
* Defines the protocol messages and abstractions for two-way state sync
* between web clients, gateway, and user containers.
*
* The workspace is a unified namespace that:
* - Syncs transient state (chartState) between client and gateway
* - Syncs persistent state (chartStore) between client, gateway, and container
* - Provides triggers for path changes (future use)
* - Is channel-agnostic (works with WebSocket, Telegram, Slack, etc.)
*/
import type { Operation as JsonPatchOp } from 'fast-json-patch';
// =============================================================================
// Protocol Messages
// =============================================================================
/**
* Full state snapshot for a store.
* Sent when client connects or when catchup patches are unavailable.
*/
export interface SnapshotMessage {
type: 'snapshot';
store: string;
seq: number;
state: unknown;
}
/**
* Incremental patch for a store.
* Uses JSON Patch (RFC 6902) format.
*/
export interface PatchMessage {
type: 'patch';
store: string;
seq: number;
patch: JsonPatchOp[];
}
/**
* Client hello message with current sequence numbers.
* Sent on connect to request catchup patches or snapshots.
*/
export interface HelloMessage {
type: 'hello';
seqs: Record<string, number>;
}
/** Messages from client to gateway */
export type InboundSyncMessage = HelloMessage | PatchMessage;
/** Messages from gateway to client */
export type OutboundSyncMessage = SnapshotMessage | PatchMessage;
// =============================================================================
// Store Configuration
// =============================================================================
/**
* Configuration for a workspace store.
*/
export interface StoreConfig {
/** Unique store name (e.g., 'chartState', 'chartStore') */
name: string;
/** If true, store is persisted to user container via MCP */
persistent: boolean;
/** Factory function returning initial state for new sessions */
initialState: () => unknown;
}
/**
* Default store configurations.
* Additional stores can be registered at runtime.
*/
export const DEFAULT_STORES: StoreConfig[] = [
{
name: 'chartState',
persistent: false,
initialState: () => ({
symbol: 'BINANCE:BTC/USDT',
start_time: null,
end_time: null,
interval: '15',
selected_shapes: [],
}),
},
{
name: 'chartStore',
persistent: true,
initialState: () => ({
drawings: {},
templates: {},
}),
},
{
name: 'channelState',
persistent: false,
initialState: () => ({
connected: {},
}),
},
];
// =============================================================================
// Channel Adapter Interface
// =============================================================================
/**
* Capabilities that a channel may support.
*/
export interface ChannelCapabilities {
/** Channel supports sync protocol (snapshot/patch messages) */
supportsSync: boolean;
/** Channel supports sending images */
supportsImages: boolean;
/** Channel supports markdown formatting */
supportsMarkdown: boolean;
/** Channel supports streaming responses */
supportsStreaming: boolean;
/** Channel supports TradingView chart embeds */
supportsTradingViewEmbed: boolean;
}
/**
* Adapter interface for communication channels.
* Implemented by WebSocket handler, Telegram handler, etc.
*/
export interface ChannelAdapter {
/** Send a full state snapshot to the client */
sendSnapshot(msg: SnapshotMessage): void;
/** Send an incremental patch to the client */
sendPatch(msg: PatchMessage): void;
/** Get channel capabilities */
getCapabilities(): ChannelCapabilities;
}
// =============================================================================
// Path Triggers (Future Use)
// =============================================================================
/**
* Trigger handler function type.
* Called when a watched path changes.
*/
export type PathTriggerHandler = (
oldValue: unknown,
newValue: unknown,
context: PathTriggerContext
) => void | Promise<void>;
/**
* Context passed to trigger handlers.
*/
export interface PathTriggerContext {
/** Store name where change occurred */
store: string;
/** Full path that changed (JSON pointer) */
path: string;
/** Current sequence number after change */
seq: number;
/** User ID for this workspace */
userId: string;
/** Session ID for this workspace */
sessionId: string;
}
/**
* Registered path trigger.
*/
export interface PathTrigger {
/** JSON pointer path to watch (e.g., '/chartState/symbol') */
path: string;
/** Handler called when path changes */
handler: PathTriggerHandler;
}
// =============================================================================
// Store State Types (for type-safe access)
// =============================================================================
/**
* Chart state - transient, tracks current view.
*/
export interface ChartState {
symbol: string;
start_time: number | null;
end_time: number | null;
interval: string;
selected_shapes: string[];
}
/**
* Chart store - persistent, stores drawings and templates.
*/
export interface ChartStore {
drawings: Record<string, unknown>;
templates: Record<string, unknown>;
}
/**
* Channel state - transient, tracks connected channels.
*/
export interface ChannelState {
connected: Record<string, ChannelInfo>;
}
/**
* Information about a connected channel.
*/
export interface ChannelInfo {
type: string;
connectedAt: number;
capabilities: ChannelCapabilities;
}
/**
* Map of store names to their state types.
*/
export interface WorkspaceStores {
chartState: ChartState;
chartStore: ChartStore;
channelState: ChannelState;
[key: string]: unknown;
}

View File

@@ -0,0 +1,460 @@
/**
* Workspace Manager
*
* Central manager for workspace state synchronization across channels.
* Provides a channel-agnostic interface for:
* - Two-way sync of transient state (client ↔ gateway)
* - Two-way sync of persistent state (client ↔ gateway ↔ container)
* - Path-based change triggers (future use)
*
* Each user session gets one WorkspaceManager instance.
* Multiple channels (WebSocket, Telegram, etc.) can attach to the same workspace.
*/
import type { FastifyBaseLogger } from 'fastify';
import type { Operation as JsonPatchOp } from 'fast-json-patch';
import { SyncRegistry } from './sync-registry.js';
import type { ContainerSync } from './container-sync.js';
import type {
StoreConfig,
ChannelAdapter,
PathTrigger,
PathTriggerHandler,
PathTriggerContext,
WorkspaceStores,
} from './types.js';
import { DEFAULT_STORES } from './types.js';
export interface WorkspaceManagerConfig {
userId: string;
sessionId: string;
stores: StoreConfig[];
containerSync?: ContainerSync;
logger: FastifyBaseLogger;
}
/**
* Manages workspace state for a user session.
*/
export class WorkspaceManager {
private userId: string;
private sessionId: string;
private registry: SyncRegistry;
private containerSync?: ContainerSync;
private logger: FastifyBaseLogger;
private stores: StoreConfig[];
// Current channel adapter (WebSocket, Telegram, etc.)
private adapter: ChannelAdapter | null = null;
// Path triggers for change notifications
private triggers: PathTrigger[] = [];
// Track which stores are dirty (changed since last container sync)
private dirtyStores = new Set<string>();
// Track initialization state
private initialized = false;
constructor(config: WorkspaceManagerConfig) {
this.userId = config.userId;
this.sessionId = config.sessionId;
this.stores = config.stores;
this.containerSync = config.containerSync;
this.logger = config.logger.child({ component: 'WorkspaceManager', sessionId: config.sessionId });
this.registry = new SyncRegistry(this.logger);
// Register all stores
for (const store of this.stores) {
this.registry.register(store);
}
}
/**
* Initialize workspace - load persistent stores from container.
*/
async initialize(): Promise<void> {
if (this.initialized) {
return;
}
this.logger.info('Initializing workspace');
// Load persistent stores from container
if (this.containerSync?.isConnected()) {
const persistentStores = this.stores.filter((s) => s.persistent).map((s) => s.name);
if (persistentStores.length > 0) {
this.logger.debug({ stores: persistentStores }, 'Loading persistent stores from container');
const states = await this.containerSync.loadAllStores(persistentStores);
for (const [storeName, state] of states) {
this.registry.setState(storeName, state);
this.logger.debug({ store: storeName }, 'Loaded persistent store');
}
}
} else {
this.logger.debug('Container sync not available, using initial state for persistent stores');
}
this.initialized = true;
this.logger.info('Workspace initialized');
}
/**
* Shutdown workspace - save dirty persistent stores to container.
*/
async shutdown(): Promise<void> {
if (!this.initialized) {
return;
}
this.logger.info('Shutting down workspace');
// Save dirty persistent stores
await this.saveDirtyStores();
this.adapter = null;
this.initialized = false;
this.logger.info('Workspace shut down');
}
// ===========================================================================
// Channel Adapter Management
// ===========================================================================
/**
* Set the channel adapter for sending messages.
* Only one adapter can be active at a time.
*/
setAdapter(adapter: ChannelAdapter): void {
this.adapter = adapter;
this.logger.debug('Channel adapter set');
}
/**
* Clear the channel adapter.
*/
clearAdapter(): void {
this.adapter = null;
this.logger.debug('Channel adapter cleared');
}
/**
* Check if an adapter is connected.
*/
hasAdapter(): boolean {
return this.adapter !== null;
}
// ===========================================================================
// Sync Protocol Handlers (called by channel adapters)
// ===========================================================================
/**
* Handle hello message from client.
* Sends snapshots or catchup patches for all stores.
*/
async handleHello(clientSeqs: Record<string, number>): Promise<void> {
if (!this.adapter) {
this.logger.warn('No adapter connected, cannot respond to hello');
return;
}
this.logger.debug({ clientSeqs }, 'Handling hello');
const messages = this.registry.syncClient(clientSeqs);
for (const msg of messages) {
if (msg.type === 'snapshot') {
this.adapter.sendSnapshot(msg);
} else {
this.adapter.sendPatch(msg);
}
}
this.logger.debug({ messageCount: messages.length }, 'Sent sync messages');
}
/**
* Handle patch message from client.
* Applies patch and may send snapshot back on conflict.
*/
async handlePatch(storeName: string, clientSeq: number, patch: JsonPatchOp[]): Promise<void> {
this.logger.debug({ store: storeName, clientSeq, patchOps: patch.length }, 'Handling client patch');
// Get old state for triggers
const oldState = this.registry.getState(storeName);
// Apply patch
const response = this.registry.applyClientPatch(storeName, clientSeq, patch);
// Mark as dirty if persistent
const storeConfig = this.stores.find((s) => s.name === storeName);
if (storeConfig?.persistent) {
this.dirtyStores.add(storeName);
}
// Send response if needed
if (response && this.adapter) {
this.adapter.sendSnapshot(response);
}
// Fire triggers
const newState = this.registry.getState(storeName);
await this.fireTriggers(storeName, oldState, newState, patch);
}
// ===========================================================================
// State Access (for gateway code)
// ===========================================================================
/**
* Get current state of a store.
*/
getState<K extends keyof WorkspaceStores>(storeName: K): WorkspaceStores[K] | undefined;
getState<T = unknown>(storeName: string): T | undefined;
getState<T = unknown>(storeName: string): T | undefined {
return this.registry.getState<T>(storeName);
}
/**
* Update state of a store and notify client.
*/
async setState(storeName: string, state: unknown): Promise<void> {
// Get old state for triggers
const oldState = this.registry.getState(storeName);
// Update state (this computes and commits a patch)
const patch = this.registry.updateState(storeName, () => state);
if (patch) {
// Mark as dirty if persistent
const storeConfig = this.stores.find((s) => s.name === storeName);
if (storeConfig?.persistent) {
this.dirtyStores.add(storeName);
}
// Send patch to client
if (this.adapter) {
this.adapter.sendPatch({
type: 'patch',
store: storeName,
seq: this.registry.getSeq(storeName),
patch,
});
}
// Fire triggers
await this.fireTriggers(storeName, oldState, state, patch);
}
}
/**
* Update state with a partial merge.
*/
async updateState<T extends Record<string, unknown>>(
storeName: string,
updates: Partial<T>
): Promise<void> {
const current = this.registry.getState<T>(storeName);
if (current && typeof current === 'object') {
await this.setState(storeName, { ...current, ...updates });
}
}
/**
* Get all store names.
*/
getStoreNames(): string[] {
return this.registry.getStoreNames();
}
// ===========================================================================
// Path Triggers
// ===========================================================================
/**
* Register a trigger for path changes.
* Returns unsubscribe function.
*/
onPathChange(path: string, handler: PathTriggerHandler): () => void {
const trigger: PathTrigger = { path, handler };
this.triggers.push(trigger);
this.logger.debug({ path }, 'Registered path trigger');
return () => {
const index = this.triggers.indexOf(trigger);
if (index >= 0) {
this.triggers.splice(index, 1);
this.logger.debug({ path }, 'Unregistered path trigger');
}
};
}
/**
* Fire triggers for paths affected by a patch.
*/
private async fireTriggers(
storeName: string,
oldState: unknown,
newState: unknown,
patch: JsonPatchOp[]
): Promise<void> {
if (this.triggers.length === 0) {
return;
}
const context: PathTriggerContext = {
store: storeName,
path: '',
seq: this.registry.getSeq(storeName),
userId: this.userId,
sessionId: this.sessionId,
};
// Check each patch operation against triggers
for (const op of patch) {
const fullPath = `/${storeName}${op.path}`;
for (const trigger of this.triggers) {
if (this.pathMatches(fullPath, trigger.path)) {
context.path = fullPath;
// Extract old and new values at the path
const oldValue = this.getValueAtPath(oldState, op.path);
const newValue = this.getValueAtPath(newState, op.path);
try {
await trigger.handler(oldValue, newValue, context);
} catch (error) {
this.logger.error(
{ path: trigger.path, error },
'Error in path trigger handler'
);
}
}
}
}
}
/**
* Check if a path matches a trigger path pattern.
* Currently supports exact match and prefix match with wildcard.
*/
private pathMatches(path: string, pattern: string): boolean {
// Exact match
if (path === pattern) {
return true;
}
// Prefix match (e.g., /chartState/* matches /chartState/symbol)
if (pattern.endsWith('/*')) {
const prefix = pattern.slice(0, -2);
return path.startsWith(prefix + '/');
}
return false;
}
/**
* Get value at a JSON pointer path.
*/
private getValueAtPath(obj: unknown, path: string): unknown {
if (!path || path === '/') {
return obj;
}
const parts = path.split('/').filter(Boolean);
let current: any = obj;
for (const part of parts) {
if (current === null || current === undefined) {
return undefined;
}
current = current[part];
}
return current;
}
// ===========================================================================
// Container Persistence
// ===========================================================================
/**
* Save dirty persistent stores to container.
*/
async saveDirtyStores(): Promise<void> {
if (!this.containerSync?.isConnected()) {
this.logger.debug('Container sync not available, skipping save');
return;
}
if (this.dirtyStores.size === 0) {
this.logger.debug('No dirty stores to save');
return;
}
const toSave = new Map<string, unknown>();
for (const storeName of this.dirtyStores) {
const storeConfig = this.stores.find((s) => s.name === storeName);
if (storeConfig?.persistent) {
const state = this.registry.getState(storeName);
if (state !== undefined) {
toSave.set(storeName, state);
}
}
}
if (toSave.size > 0) {
this.logger.debug({ stores: Array.from(toSave.keys()) }, 'Saving dirty stores to container');
await this.containerSync.saveAllStores(toSave);
this.dirtyStores.clear();
}
}
/**
* Force save a specific store to container.
*/
async saveStore(storeName: string): Promise<void> {
if (!this.containerSync?.isConnected()) {
this.logger.warn({ store: storeName }, 'Container sync not available');
return;
}
const storeConfig = this.stores.find((s) => s.name === storeName);
if (!storeConfig?.persistent) {
this.logger.warn({ store: storeName }, 'Store is not persistent');
return;
}
const state = this.registry.getState(storeName);
if (state !== undefined) {
await this.containerSync.saveStore(storeName, state);
this.dirtyStores.delete(storeName);
}
}
// ===========================================================================
// Accessors
// ===========================================================================
getUserId(): string {
return this.userId;
}
getSessionId(): string {
return this.sessionId;
}
isInitialized(): boolean {
return this.initialized;
}
}
// Re-export DEFAULT_STORES for convenience
export { DEFAULT_STORES };