redesign fully scaffolded and web login works

This commit is contained in:
2026-03-17 20:10:47 -04:00
parent b9cc397e05
commit f6bd22a8ef
143 changed files with 17317 additions and 693 deletions

View File

@@ -35,5 +35,27 @@ AGENT_IMAGE=ghcr.io/dexorder/agent:latest
SIDECAR_IMAGE=ghcr.io/dexorder/lifecycle-sidecar:latest
AGENT_STORAGE_CLASS=standard
# Redis (for session management - future)
# REDIS_URL=redis://localhost:6379
# Redis (for hot storage and session management)
REDIS_URL=redis://localhost:6379
# Qdrant (for RAG vector search)
QDRANT_URL=http://localhost:6333
QDRANT_API_KEY= # optional, leave empty for local dev
# Iceberg (for durable storage via REST catalog)
ICEBERG_CATALOG_URI=http://iceberg-catalog:8181
ICEBERG_NAMESPACE=gateway
S3_ENDPOINT=http://minio:9000
S3_ACCESS_KEY=minioadmin
S3_SECRET_KEY=minioadmin
# Event router (ZeroMQ)
EVENT_ROUTER_BIND=tcp://*:5571
# Embeddings (for RAG vector search)
# Recommended: ollama with all-minilm (90MB model, CPU-friendly, ~100MB RAM)
EMBEDDING_PROVIDER=ollama
EMBEDDING_MODEL=all-minilm
OLLAMA_URL=http://localhost:11434
# Alternative models: nomic-embed-text (8K context), mxbai-embed-large (higher accuracy)
# For OpenAI embeddings, set: EMBEDDING_PROVIDER=openai, EMBEDDING_MODEL=text-embedding-3-small

View File

@@ -1,4 +1,4 @@
FROM node:22-alpine AS builder
FROM node:22-slim AS builder
WORKDIR /app
@@ -7,7 +7,7 @@ COPY package*.json ./
COPY tsconfig.json ./
# Install dependencies
RUN npm ci
RUN npm install
# Copy source
COPY src ./src
@@ -16,25 +16,52 @@ COPY src ./src
RUN npm run build
# Production image
FROM node:22-alpine
FROM node:22-slim
WORKDIR /app
# Install dependencies for Ollama (early in the build for caching)
RUN apt-get update && apt-get install -y curl bash zstd ca-certificates && rm -rf /var/lib/apt/lists/*
# Install Ollama (before npm dependencies for better caching)
RUN curl -fsSL https://ollama.com/install.sh | sh
# Create non-root user early (before pulling model)
RUN groupadd --gid 1001 nodejs && \
useradd --uid 1001 --gid nodejs --shell /bin/bash --create-home nodejs && \
chown -R nodejs:nodejs /app
# Pull embedding model (all-minilm: 90MB, CPU-friendly) as nodejs user
# This is the most expensive operation, so do it early
USER nodejs
RUN ollama serve & \
OLLAMA_PID=$! && \
sleep 10 && \
ollama pull all-minilm && \
kill $OLLAMA_PID && \
wait $OLLAMA_PID || true
# Switch back to root for remaining setup
USER root
# Copy package files
COPY package*.json ./
# Install production dependencies only
RUN npm ci --omit=dev
RUN npm install --omit=dev
# Copy built application
COPY --from=builder /app/dist ./dist
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
# Copy entrypoint script
COPY entrypoint.sh ./
RUN chmod +x entrypoint.sh
# Ensure nodejs user owns everything
RUN chown -R nodejs:nodejs /app
USER nodejs
EXPOSE 3000
CMD ["node", "dist/main.js"]
ENTRYPOINT ["./entrypoint.sh"]

View File

@@ -91,6 +91,10 @@ Containers self-manage their lifecycle using the lifecycle sidecar (see `../life
- OpenAI GPT
- Google Gemini
- OpenRouter (one key for 300+ models)
- Ollama (for embeddings): https://ollama.com/download
- Redis (for session/hot storage)
- Qdrant (for RAG vector search)
- Kafka + Flink + Iceberg (for durable storage)
### Development
@@ -119,7 +123,20 @@ DEFAULT_MODEL_PROVIDER=anthropic
DEFAULT_MODEL=claude-3-5-sonnet-20241022
```
4. Run development server:
4. Start Ollama and pull embedding model:
```bash
# Install Ollama (one-time): https://ollama.com/download
# Or with Docker: docker run -d -p 11434:11434 ollama/ollama
# Pull the all-minilm embedding model (90MB, CPU-friendly)
ollama pull all-minilm
# Alternative models:
# ollama pull nomic-embed-text # 8K context length
# ollama pull mxbai-embed-large # Higher accuracy, slower
```
5. Run development server:
```bash
npm run dev
```
@@ -200,11 +217,143 @@ ws.send(JSON.stringify({
**`GET /health`**
- Returns server health status
## Ollama Deployment Options
The gateway requires Ollama for embedding generation in RAG queries. You have two deployment options:
### Option 1: Ollama in Gateway Container (Recommended for simplicity)
Install Ollama directly in the gateway container. This keeps all dependencies local and simplifies networking.
**Dockerfile additions:**
```dockerfile
FROM node:22-slim
# Install Ollama
RUN curl -fsSL https://ollama.com/install.sh | sh
# Pull embedding model at build time
RUN ollama serve & \
sleep 5 && \
ollama pull all-minilm && \
pkill ollama
# ... rest of your gateway Dockerfile
```
**Start script (entrypoint.sh):**
```bash
#!/bin/bash
# Start Ollama in background
ollama serve &
# Start gateway
node dist/main.js
```
**Pros:**
- Simple networking (localhost:11434)
- No extra K8s resources
- Self-contained deployment
**Cons:**
- Larger container image (~200MB extra)
- CPU/memory shared with gateway process
**Resource requirements:**
- Add +200MB memory
- Add +0.2 CPU cores for embedding inference
### Option 2: Ollama as Separate Pod/Sidecar
Deploy Ollama as a separate container in the same pod (sidecar) or as its own deployment.
**K8s Deployment (sidecar pattern):**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: gateway
spec:
template:
spec:
containers:
- name: gateway
image: ghcr.io/dexorder/gateway:latest
env:
- name: OLLAMA_URL
value: http://localhost:11434
- name: ollama
image: ollama/ollama:latest
command: ["/bin/sh", "-c"]
args:
- |
ollama serve &
sleep 5
ollama pull all-minilm
wait
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
```
**K8s Deployment (separate service):**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
replicas: 1
template:
spec:
containers:
- name: ollama
image: ollama/ollama:latest
# ... same as above
---
apiVersion: v1
kind: Service
metadata:
name: ollama
spec:
selector:
app: ollama
ports:
- port: 11434
```
Gateway `.env`:
```bash
OLLAMA_URL=http://ollama:11434
```
**Pros:**
- Isolated resource limits
- Can scale separately
- Easier to monitor/debug
**Cons:**
- More K8s resources
- Network hop (minimal latency)
- More complex deployment
### Recommendation
For most deployments: **Use Option 1 (in-container)** for simplicity, unless you need to:
- Share Ollama across multiple services
- Scale embedding inference independently
- Run Ollama on GPU nodes (gateway on CPU nodes)
## TODO
- [ ] Implement JWT verification with JWKS
- [ ] Implement MCP HTTP/SSE transport
- [ ] Add Redis for session persistence
- [ ] Add rate limiting per user license
- [ ] Add message usage tracking
- [ ] Add streaming responses for WebSocket

View File

@@ -0,0 +1,61 @@
# Gateway Configuration
# Server configuration
server:
port: 3000
host: 0.0.0.0
log_level: info
cors_origin: "*"
base_url: http://localhost:3000
trusted_origins:
- http://localhost:3000
- http://localhost:5173
- http://localhost:8080
# Database
database:
url: postgresql://postgres:postgres@localhost:5432/dexorder
# Default model (if user has no preference)
defaults:
model_provider: anthropic
model: claude-3-5-sonnet-20241022
# Kubernetes configuration
kubernetes:
namespace: dexorder-agents
in_cluster: false
context: minikube
agent_image: ghcr.io/dexorder/agent:latest
sidecar_image: ghcr.io/dexorder/lifecycle-sidecar:latest
storage_class: standard
# DragonflyDB (Redis-compatible, for hot storage and session management)
redis:
url: redis://localhost:6379
# Qdrant (for RAG vector search)
qdrant:
url: http://localhost:6333
collection: gateway_memory
# Iceberg (for durable storage via REST catalog)
iceberg:
catalog_uri: http://iceberg-catalog:8181
namespace: gateway
s3_endpoint: http://minio:9000
# Event router (ZeroMQ)
events:
router_bind: tcp://*:5571
# Embeddings (for RAG vector search)
# Recommended: ollama with all-minilm (90MB model, CPU-friendly, ~100MB RAM)
embedding:
provider: ollama
model: all-minilm
ollama_url: http://localhost:11434
# Email service configuration
email:
from_address: noreply@dexorder.com

8
gateway/db-dev.sql Normal file
View File

@@ -0,0 +1,8 @@
-- Development seed data
-- This file contains sample data for local development and testing
--
-- Dev user: cryptochimp@dexorder.ai / moon2the
-- User is created via Better Auth API in bin/dev script
-- License is also created in bin/dev script
--
-- This file is kept for future dev seed data that may be needed

25
gateway/entrypoint.sh Normal file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
set -e
# Start Ollama server in background
echo "Starting Ollama server..."
ollama serve &
OLLAMA_PID=$!
# Wait for Ollama to be ready
echo "Waiting for Ollama to be ready..."
for i in {1..30}; do
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
echo "Ollama is ready!"
break
fi
if [ $i -eq 30 ]; then
echo "Ollama failed to start within 30 seconds"
exit 1
fi
sleep 1
done
# Start the Node.js gateway application
echo "Starting gateway..."
exec node dist/main.js

View File

@@ -0,0 +1,94 @@
# Dexorder Knowledge Base
This directory contains global knowledge documents that are automatically loaded into the RAG system as platform-wide knowledge (user_id="0").
## Structure
- **platform/**: Platform architecture and capabilities
- **trading/**: Trading concepts and fundamentals
- **indicators/**: Indicator development and usage
- **strategies/**: Strategy development and patterns
## Document Format
Documents should be in Markdown format with:
- Clear headings for chunking
- Optional YAML frontmatter for tags
- Code examples where relevant
- Cross-references to other docs
### Example with Frontmatter
```markdown
---
tags: [trading, risk-management, position-sizing]
---
# Risk Management
Content here...
```
## How It Works
1. At gateway startup, the DocumentLoader scans this directory
2. Each markdown file is chunked by headers (max ~1000 tokens per chunk)
3. Chunks are embedded using the configured embedding service
4. Embeddings are stored in Qdrant with user_id="0" (global namespace)
5. Content hash tracking enables incremental updates
## Updating Documents
### During Development
- Edit markdown files
- Restart gateway or call reload endpoint: `POST /admin/reload-knowledge`
### In Production
- Update markdown files in git
- Deploy new version
- Gateway will detect changes and update vectors automatically
## RAG Integration
When users query the agent:
1. Their query is embedded
2. Qdrant searches both global (user_id="0") and user-specific vectors
3. Relevant chunks from these docs are included in context
4. LLM generates response with platform knowledge
## Adding New Documents
1. Create markdown file in appropriate subdirectory
2. Use clear section headers (##, ###) for automatic chunking
3. Include practical examples and code samples
4. Add tags in frontmatter if using complex categorization
5. Restart gateway or reload knowledge
## Best Practices
- **Keep chunks focused**: Each section should cover one topic
- **Use examples**: Code samples and practical examples help
- **Link concepts**: Reference other docs for deeper dives
- **Update regularly**: Keep knowledge current with platform changes
- **Test queries**: Verify RAG retrieves relevant chunks
## Maintenance
The DocumentLoader tracks:
- Content hashes for change detection
- Number of chunks per document
- Last update timestamps
Check logs for load statistics:
```
Knowledge documents loaded: { loaded: 5, updated: 2, skipped: 3 }
```
Monitor Qdrant collection stats:
```
GET /health
{
"qdrantVectors": 1234,
"qdrantIndexed": 1234
}
```

View File

@@ -0,0 +1,142 @@
# Indicator Development Guide
Custom indicators in Dexorder are Python functions that process OHLCV data and return signals or values.
## Indicator Structure
```python
def my_indicator(df, **params):
"""
Calculate custom indicator
Args:
df: DataFrame with columns [open, high, low, close, volume]
**params: Indicator parameters
Returns:
Series or DataFrame with indicator values
"""
# Implementation
return result
```
## Common Patterns
### Simple Moving Average
```python
def sma(df, period=20):
return df['close'].rolling(window=period).mean()
```
### Exponential Moving Average
```python
def ema(df, period=20):
return df['close'].ewm(span=period, adjust=False).mean()
```
### RSI (Relative Strength Index)
```python
def rsi(df, period=14):
delta = df['close'].diff()
gain = delta.where(delta > 0, 0).rolling(window=period).mean()
loss = -delta.where(delta < 0, 0).rolling(window=period).mean()
rs = gain / loss
return 100 - (100 / (1 + rs))
```
### MACD
```python
def macd(df, fast=12, slow=26, signal=9):
ema_fast = df['close'].ewm(span=fast).mean()
ema_slow = df['close'].ewm(span=slow).mean()
macd_line = ema_fast - ema_slow
signal_line = macd_line.ewm(span=signal).mean()
histogram = macd_line - signal_line
return pd.DataFrame({
'macd': macd_line,
'signal': signal_line,
'histogram': histogram
})
```
## Best Practices
### Data Handling
- Always validate input DataFrame has required columns
- Handle NaN values appropriately
- Use `.copy()` to avoid modifying original data
- Consider edge cases (not enough data, etc.)
### Performance
- Vectorize operations when possible (avoid loops)
- Use pandas/numpy built-in functions
- Cache expensive calculations
- Test on large datasets
### Parameters
- Provide sensible defaults
- Document parameter ranges
- Validate parameter values
- Consider optimization bounds
### Testing
```python
def test_indicator():
# Create sample data
df = pd.DataFrame({
'close': [100, 102, 101, 103, 105]
})
# Test calculation
result = my_indicator(df, param=10)
# Validate output
assert not result.isna().all()
assert len(result) == len(df)
```
## Common Pitfalls
### Look-Ahead Bias
Never use future data:
```python
# WRONG - uses future data
df['signal'] = df['close'].shift(-1) > df['close']
# CORRECT - only past data
df['signal'] = df['close'] > df['close'].shift(1)
```
### Repainting
Indicator values should not change for closed bars:
```python
# Ensure calculations are based on closed candles
# Avoid using unstable data sources
```
### Overfitting
- Don't optimize on same data you test on
- Use separate train/validation/test sets
- Walk-forward analysis for robustness
- Simple is often better than complex
## Integration with Strategies
Indicators are used in strategy signals:
```python
def my_strategy(df):
# Calculate indicators
df['rsi'] = rsi(df, period=14)
df['sma_fast'] = sma(df, period=20)
df['sma_slow'] = sma(df, period=50)
# Generate signals
df['signal'] = 0
df.loc[(df['rsi'] < 30) & (df['sma_fast'] > df['sma_slow']), 'signal'] = 1
df.loc[(df['rsi'] > 70) & (df['sma_fast'] < df['sma_slow']), 'signal'] = -1
return df
```
Store indicators in your git repository under `indicators/` directory.

View File

@@ -0,0 +1,71 @@
# Agent System Architecture
The Dexorder AI platform uses a sophisticated agent harness that orchestrates between user interactions, LLM models, and user-specific tools.
## Core Components
### Gateway
Multi-channel gateway supporting:
- WebSocket connections for web/mobile
- Telegram integration
- Real-time event streaming
### Agent Harness
Stateless orchestrator that:
1. Fetches context from user's MCP server
2. Routes to appropriate LLM model based on license
3. Calls LLM with embedded context
4. Routes tool calls to user's MCP or platform tools
5. Saves conversation history back to MCP
### Memory Architecture
Three-tier storage system:
- **Redis**: Hot state for active sessions and checkpoints
- **Qdrant**: Vector search for RAG and semantic memory
- **Iceberg**: Cold storage for durable conversations and analytics
### User Context
Every interaction includes:
- User ID and license information
- Active channel (websocket, telegram, etc.)
- Channel capabilities (markdown, images, buttons)
- Conversation history
- Relevant memories from RAG
- Workspace state
## Skills vs Subagents
### Skills
Self-contained capabilities for specific tasks:
- Market analysis
- Strategy validation
- Indicator development
- Defined in markdown + TypeScript
- Use when task is well-defined and scoped
### Subagents
Specialized agents with dedicated memory:
- Code reviewer with review guidelines
- Risk analyzer with risk models
- Multi-file knowledge base
- Custom system prompts
- Use when domain expertise is needed
## Global vs User Memory
### Global Memory (user_id="0")
Platform-wide knowledge available to all users:
- Trading concepts and terminology
- Platform capabilities
- Indicator documentation
- Strategy patterns
- Best practices
### User Memory
Personal context specific to each user:
- Conversation history
- Preferences and trading style
- Custom indicators and strategies
- Workspace state
All RAG queries automatically search both global and user-specific memories.

View File

@@ -0,0 +1,88 @@
# Model Context Protocol (MCP) Integration
Dexorder uses the Model Context Protocol for user-specific tool execution and state management.
## Container Architecture
Each user has a dedicated Kubernetes pod running:
- **Agent Container**: Python environment with conda packages
- **Lifecycle Sidecar**: Manages container lifecycle and communication
- **Persistent Storage**: User's git repository with indicators/strategies
## Authentication Modes
Three MCP authentication modes:
### 1. Public Mode (Free Tier)
- No authentication required
- Container creates anonymous session
- Limited to read-only resources
- Session expires after timeout
### 2. Gateway Auth Mode (Standard)
- Gateway authenticates user
- Passes verified user ID to container
- Container trusts gateway's authentication
- Full access to user's tools and data
### 3. Direct Auth Mode (Enterprise)
- User authenticates directly with container
- Gateway forwards encrypted credentials
- Container validates credentials independently
- Highest security for sensitive operations
## MCP Resources
The container exposes standard resources:
### context://user-profile
User preferences and trading style
### context://conversation-summary
Recent conversation context and history
### context://workspace-state
Current chart, indicators, and analysis state
### context://system-prompt
User's custom agent instructions
### indicators://list
Available indicators with signatures
### strategies://list
User's trading strategies
## Tool Execution Flow
1. User sends message to gateway
2. Gateway queries user's MCP resources for context
3. LLM generates response with tool calls
4. Gateway routes tool calls:
- Platform tools → handled by gateway
- User tools → proxied to MCP container
5. Tool results returned to LLM
6. Final response sent to user
7. Conversation saved to MCP container
## Container Lifecycle
### Startup
1. Gateway receives user connection
2. Checks if container exists
3. Creates pod if needed (cold start ~5-10s)
4. Waits for container ready
5. Establishes MCP connection
### Active
- Container stays alive during active session
- Receives tool calls via MCP
- Maintains workspace state
- Saves files to persistent storage
### Shutdown
- Free users: timeout after 15 minutes idle
- Paid users: longer timeout based on license
- Graceful shutdown saves state
- Persistent storage retained
- Fast restart on next connection

View File

@@ -0,0 +1,188 @@
# Strategy Development Guide
Trading strategies in Dexorder define entry/exit rules and position management logic.
## Strategy Structure
```python
class Strategy:
def __init__(self, **params):
"""Initialize strategy with parameters"""
self.params = params
def generate_signals(self, df):
"""
Generate trading signals
Args:
df: DataFrame with OHLCV + indicator columns
Returns:
DataFrame with 'signal' column:
1 = long entry
-1 = short entry
0 = no action
"""
pass
def calculate_position_size(self, capital, price, risk_pct):
"""Calculate position size based on risk"""
pass
def get_stop_loss(self, entry_price, direction):
"""Calculate stop loss level"""
pass
def get_take_profit(self, entry_price, direction):
"""Calculate take profit level"""
pass
```
## Example: Simple Moving Average Crossover
```python
class SMACrossoverStrategy:
def __init__(self, fast_period=20, slow_period=50, risk_pct=0.02):
self.fast_period = fast_period
self.slow_period = slow_period
self.risk_pct = risk_pct
def generate_signals(self, df):
# Calculate moving averages
df['sma_fast'] = df['close'].rolling(self.fast_period).mean()
df['sma_slow'] = df['close'].rolling(self.slow_period).mean()
# Generate signals
df['signal'] = 0
# Long when fast crosses above slow
df.loc[
(df['sma_fast'] > df['sma_slow']) &
(df['sma_fast'].shift(1) <= df['sma_slow'].shift(1)),
'signal'
] = 1
# Short when fast crosses below slow
df.loc[
(df['sma_fast'] < df['sma_slow']) &
(df['sma_fast'].shift(1) >= df['sma_slow'].shift(1)),
'signal'
] = -1
return df
def calculate_position_size(self, capital, price, atr):
# Risk-based position sizing
risk_amount = capital * self.risk_pct
stop_distance = 2 * atr
position_size = risk_amount / stop_distance
return position_size
def get_stop_loss(self, entry_price, direction, atr):
if direction == 1: # Long
return entry_price - (2 * atr)
else: # Short
return entry_price + (2 * atr)
def get_take_profit(self, entry_price, direction, atr):
if direction == 1: # Long
return entry_price + (4 * atr) # 2:1 risk/reward
else: # Short
return entry_price - (4 * atr)
```
## Strategy Components
### Signal Generation
Entry conditions based on:
- Indicator crossovers
- Price patterns
- Volume confirmation
- Multiple timeframe confluence
### Risk Management
Essential elements:
- **Position Sizing**: Based on account risk percentage
- **Stop Losses**: ATR-based or support/resistance
- **Take Profits**: Multiple targets or trailing stops
- **Max Positions**: Limit concurrent trades
### Filters
Reduce false signals:
- **Trend Filter**: Only trade with the trend
- **Volatility Filter**: Avoid low volatility periods
- **Time Filter**: Specific trading hours
- **Volume Filter**: Minimum volume requirements
### Exit Rules
Multiple exit types:
- **Stop Loss**: Protect capital
- **Take Profit**: Lock in gains
- **Trailing Stop**: Follow profitable moves
- **Time Exit**: Close at end of period
- **Signal Exit**: Opposite signal
## Backtesting Considerations
### Data Quality
- Use clean, validated data
- Handle missing data appropriately
- Account for survivorship bias
- Include realistic spreads and slippage
### Performance Metrics
Track key metrics:
- **Total Return**: Cumulative profit/loss
- **Sharpe Ratio**: Risk-adjusted returns
- **Max Drawdown**: Largest peak-to-trough decline
- **Win Rate**: Percentage of profitable trades
- **Profit Factor**: Gross profit / gross loss
- **Expectancy**: Average $ per trade
### Validation
Prevent overfitting:
- **Train/Test Split**: 70/30 or 60/40
- **Walk-Forward**: Rolling windows
- **Out-of-Sample**: Test on recent unseen data
- **Monte Carlo**: Randomize trade order
- **Paper Trading**: Live validation
## Common Strategy Types
### Trend Following
Follow sustained price movements:
- Moving average crossovers
- Breakout strategies
- Trend channels
- Works best in trending markets
### Mean Reversion
Profit from price returning to average:
- Bollinger Band reversals
- RSI extremes
- Statistical arbitrage
- Works best in ranging markets
### Momentum
Trade in direction of strong moves:
- Relative strength
- Price acceleration
- Volume surges
- Breakout confirmation
### Arbitrage
Exploit price discrepancies:
- Cross-exchange spreads
- Funding rate arbitrage
- Statistical pairs trading
- Requires low latency
## Integration with Platform
Store strategies in your git repository under `strategies/` directory.
Test using the backtesting tools provided by the platform.
Deploy live strategies through the execution engine with proper risk controls.
Monitor performance and adjust parameters as market conditions change.

View File

@@ -0,0 +1,72 @@
# Technical Analysis Fundamentals
Technical analysis is the study of historical price and volume data to identify patterns and predict future market movements.
## Key Concepts
### Price Action
Raw price movement without indicators:
- Support and resistance levels
- Trend lines and channels
- Chart patterns (head and shoulders, double tops, etc.)
- Candlestick patterns
### Trends
Markets move in trends:
- **Uptrend**: Higher highs and higher lows
- **Downtrend**: Lower highs and lower lows
- **Sideways**: Range-bound consolidation
- Trend strength measured by consistency
### Volume
Trading volume confirms price movements:
- High volume confirms trends
- Low volume suggests weak moves
- Volume precedes price
- Divergences signal reversals
## Common Indicators
### Trend Indicators
- **Moving Averages**: SMA, EMA, WMA
- **MACD**: Trend and momentum
- **ADX**: Trend strength
- **Parabolic SAR**: Trend direction
### Momentum Indicators
- **RSI**: Overbought/oversold conditions (0-100)
- **Stochastic**: Fast vs slow momentum
- **CCI**: Cyclical trends
- **Williams %R**: Momentum oscillator
### Volatility Indicators
- **Bollinger Bands**: Price envelope around MA
- **ATR**: Average True Range for volatility
- **Keltner Channels**: ATR-based bands
- **Donchian Channels**: High/low breakouts
### Volume Indicators
- **OBV**: On Balance Volume
- **VWAP**: Volume Weighted Average Price
- **Volume Profile**: Price levels by volume
- **Chaikin Money Flow**: Volume pressure
## Timeframes
Different timeframes for different strategies:
- **Scalping**: 1m, 5m charts
- **Day Trading**: 5m, 15m, 1h charts
- **Swing Trading**: 4h, 1D charts
- **Position Trading**: 1D, 1W charts
Always analyze multiple timeframes for context.
## Risk Management
Essential principles:
- **Position Sizing**: Risk 1-2% per trade
- **Stop Losses**: Define exit before entry
- **Risk/Reward**: Minimum 1:2 ratio
- **Diversification**: Multiple uncorrelated positions
Never trade without a plan and defined risk parameters.

View File

@@ -12,28 +12,33 @@
},
"dependencies": {
"@fastify/cors": "^10.0.1",
"@fastify/jwt": "^9.0.1",
"@fastify/websocket": "^11.0.1",
"@kubernetes/client-node": "^0.21.0",
"@langchain/anthropic": "^0.3.8",
"@langchain/core": "^0.3.24",
"@langchain/google-genai": "^0.1.6",
"@langchain/langgraph": "^0.2.26",
"@langchain/openai": "^0.3.21",
"@langchain/openrouter": "^0.1.2",
"@kubernetes/client-node": "^1.0.0",
"@langchain/anthropic": "latest",
"@langchain/core": "latest",
"@langchain/langgraph": "latest",
"@modelcontextprotocol/sdk": "^1.0.4",
"@qdrant/js-client-rest": "^1.17.0",
"argon2": "^0.41.1",
"better-auth": "^1.5.3",
"fastify": "^5.2.0",
"iceberg-js": "latest",
"ioredis": "^5.4.2",
"js-yaml": "^4.1.0",
"kysely": "^0.27.3",
"ollama": "^0.5.10",
"pg": "^8.13.1",
"pino": "^9.6.0",
"pino-pretty": "^13.0.0",
"zeromq": "^6.0.0-beta.20",
"zod": "^3.24.1"
},
"devDependencies": {
"@types/js-yaml": "^4.0.9",
"@types/node": "^22.10.2",
"@types/pg": "^8.11.10",
"tsx": "^4.19.2",
"tsx": "^4.21.0",
"typescript": "^5.7.2"
},
"engines": {

View File

@@ -0,0 +1,258 @@
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.dexorder.proto";
// User container event system for delivering notifications to users
// via active sessions or external channels (Telegram, email, push).
//
// Two ZMQ patterns:
// - XPUB/SUB (port 5570): Fast path for informational events to active sessions
// - DEALER/ROUTER (port 5571): Guaranteed delivery for critical events with ack
//
// See doc/protocol.md and doc/user_container_events.md for details.
// =============================================================================
// User Event (Container → Gateway)
// Message Type ID: 0x20
// =============================================================================
message UserEvent {
// User ID this event belongs to
string user_id = 1;
// Unique event ID for deduplication and ack tracking (UUID)
string event_id = 2;
// Timestamp when event was generated (Unix milliseconds)
int64 timestamp = 3;
// Type of event
EventType event_type = 4;
// Event payload (JSON or nested protobuf, depending on event_type)
bytes payload = 5;
// Delivery specification (priority and channel preferences)
DeliverySpec delivery = 6;
}
enum EventType {
// Trading events
ORDER_PLACED = 0;
ORDER_FILLED = 1;
ORDER_CANCELLED = 2;
ORDER_REJECTED = 3;
ORDER_EXPIRED = 4;
// Alert events
ALERT_TRIGGERED = 10;
ALERT_CREATED = 11;
ALERT_DELETED = 12;
// Position events
POSITION_OPENED = 20;
POSITION_CLOSED = 21;
POSITION_UPDATED = 22;
POSITION_LIQUIDATED = 23;
// Workspace/chart events
WORKSPACE_CHANGED = 30;
CHART_ANNOTATION_ADDED = 31;
CHART_ANNOTATION_REMOVED = 32;
INDICATOR_UPDATED = 33;
// Strategy events
STRATEGY_STARTED = 40;
STRATEGY_STOPPED = 41;
STRATEGY_LOG = 42;
STRATEGY_ERROR = 43;
BACKTEST_COMPLETED = 44;
// System events
CONTAINER_STARTING = 50;
CONTAINER_READY = 51;
CONTAINER_SHUTTING_DOWN = 52;
ERROR = 53;
}
// =============================================================================
// Delivery Specification
// =============================================================================
message DeliverySpec {
// Priority determines routing behavior
Priority priority = 1;
// Ordered list of channel preferences (try first, then second, etc.)
repeated ChannelPreference channels = 2;
}
enum Priority {
// Drop if no active session (fire-and-forget via XPUB)
// Use for: indicator updates, chart syncs, strategy logs when watching
INFORMATIONAL = 0;
// Best effort delivery - queue briefly, deliver when possible
// Uses XPUB if subscribed, otherwise DEALER
// Use for: alerts, position updates
NORMAL = 1;
// Must deliver - retry until acked, escalate channels
// Always uses DEALER for guaranteed delivery
// Use for: order fills, liquidations, critical errors
CRITICAL = 2;
}
message ChannelPreference {
// Channel to deliver to
ChannelType channel = 1;
// If true, skip this channel if user is not connected to it
// If false, deliver even if user is not actively connected
// (e.g., send Telegram message even if user isn't in Telegram chat)
bool only_if_active = 2;
}
enum ChannelType {
// Whatever channel the user currently has open (WebSocket, Telegram session)
ACTIVE_SESSION = 0;
// Specific channels
WEB = 1; // WebSocket to web UI
TELEGRAM = 2; // Telegram bot message
EMAIL = 3; // Email notification
PUSH = 4; // Mobile push notification (iOS/Android)
DISCORD = 5; // Discord webhook (future)
SLACK = 6; // Slack webhook (future)
}
// =============================================================================
// Event Acknowledgment (Gateway → Container)
// Message Type ID: 0x21
// =============================================================================
message EventAck {
// Event ID being acknowledged
string event_id = 1;
// Delivery status
AckStatus status = 2;
// Error message if status is ERROR
string error_message = 3;
// Channel that successfully delivered (for logging/debugging)
ChannelType delivered_via = 4;
}
enum AckStatus {
// Successfully delivered to at least one channel
DELIVERED = 0;
// Accepted and queued for delivery (e.g., rate limited, will retry)
QUEUED = 1;
// Permanent failure - all channels failed
ERROR = 2;
}
// =============================================================================
// Event Payloads
// These are JSON-encoded in the UserEvent.payload field.
// Defined here for documentation; actual encoding is JSON for flexibility.
// =============================================================================
// Payload for ORDER_PLACED, ORDER_FILLED, ORDER_CANCELLED, etc.
message OrderEventPayload {
string order_id = 1;
string symbol = 2;
string side = 3; // "buy" or "sell"
string order_type = 4; // "market", "limit", "stop_limit", etc.
string quantity = 5; // Decimal string
string price = 6; // Decimal string (for limit orders)
string fill_price = 7; // Decimal string (for fills)
string fill_quantity = 8; // Decimal string (for partial fills)
string status = 9; // "open", "filled", "cancelled", etc.
string exchange = 10;
int64 timestamp = 11; // Unix milliseconds
string strategy_id = 12; // If order was placed by a strategy
string error_message = 13; // If rejected/failed
}
// Payload for ALERT_TRIGGERED
message AlertEventPayload {
string alert_id = 1;
string symbol = 2;
string condition = 3; // Human-readable condition (e.g., "BTC > 50000")
string triggered_price = 4; // Decimal string
int64 timestamp = 5;
}
// Payload for POSITION_OPENED, POSITION_CLOSED, POSITION_UPDATED
message PositionEventPayload {
string position_id = 1;
string symbol = 2;
string side = 3; // "long" or "short"
string size = 4; // Decimal string
string entry_price = 5; // Decimal string
string current_price = 6; // Decimal string
string unrealized_pnl = 7; // Decimal string
string realized_pnl = 8; // Decimal string (for closed positions)
string leverage = 9; // Decimal string (for margin)
string liquidation_price = 10;
string exchange = 11;
int64 timestamp = 12;
}
// Payload for WORKSPACE_CHANGED, CHART_ANNOTATION_*, INDICATOR_UPDATED
message WorkspaceEventPayload {
string workspace_id = 1;
string change_type = 2; // "symbol_changed", "timeframe_changed", "annotation_added", etc.
string symbol = 3;
string timeframe = 4;
// For annotations
string annotation_id = 5;
string annotation_type = 6; // "trendline", "horizontal", "rectangle", "text", etc.
string annotation_data = 7; // JSON string with coordinates, style, etc.
// For indicators
string indicator_name = 8;
string indicator_params = 9; // JSON string with indicator parameters
int64 timestamp = 10;
}
// Payload for STRATEGY_LOG, STRATEGY_ERROR
message StrategyEventPayload {
string strategy_id = 1;
string strategy_name = 2;
string log_level = 3; // "debug", "info", "warn", "error"
string message = 4;
string details = 5; // JSON string with additional context
int64 timestamp = 6;
}
// Payload for BACKTEST_COMPLETED
message BacktestEventPayload {
string backtest_id = 1;
string strategy_id = 2;
string strategy_name = 3;
string symbol = 4;
string timeframe = 5;
int64 start_time = 6;
int64 end_time = 7;
// Results summary
int32 total_trades = 8;
int32 winning_trades = 9;
int32 losing_trades = 10;
string total_pnl = 11; // Decimal string
string win_rate = 12; // Decimal string (0-1)
string sharpe_ratio = 13; // Decimal string
string max_drawdown = 14; // Decimal string (0-1)
string results_path = 15; // Path to full results file
int64 completed_at = 16;
}

View File

@@ -1,7 +1,70 @@
-- User license and authorization schema
-- Better Auth Core Schema
-- See: https://better-auth.com/docs/concepts/database
-- Note: Using quoted "user" to avoid SQL keyword issues while keeping Better Auth's expected table name
-- User table (better-auth core)
CREATE TABLE IF NOT EXISTS "user" (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
email TEXT UNIQUE NOT NULL,
"emailVerified" BOOLEAN NOT NULL DEFAULT FALSE,
image TEXT,
"createdAt" TIMESTAMP NOT NULL DEFAULT NOW(),
"updatedAt" TIMESTAMP NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_user_email ON "user"(email);
-- Session table (better-auth core)
CREATE TABLE IF NOT EXISTS session (
id TEXT PRIMARY KEY,
"expiresAt" TIMESTAMP NOT NULL,
token TEXT UNIQUE NOT NULL,
"createdAt" TIMESTAMP NOT NULL DEFAULT NOW(),
"updatedAt" TIMESTAMP NOT NULL DEFAULT NOW(),
"ipAddress" TEXT,
"userAgent" TEXT,
"userId" TEXT NOT NULL REFERENCES "user"(id) ON DELETE CASCADE
);
CREATE INDEX idx_session_userId ON session("userId");
CREATE INDEX idx_session_token ON session(token);
-- Account table (better-auth core, for OAuth providers)
CREATE TABLE IF NOT EXISTS account (
id TEXT PRIMARY KEY,
"accountId" TEXT NOT NULL,
"providerId" TEXT NOT NULL,
"userId" TEXT NOT NULL REFERENCES "user"(id) ON DELETE CASCADE,
"accessToken" TEXT,
"refreshToken" TEXT,
"idToken" TEXT,
"accessTokenExpiresAt" TIMESTAMP,
"refreshTokenExpiresAt" TIMESTAMP,
scope TEXT,
password TEXT,
"createdAt" TIMESTAMP NOT NULL DEFAULT NOW(),
"updatedAt" TIMESTAMP NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_account_userId ON account("userId");
-- Verification table (better-auth core)
CREATE TABLE IF NOT EXISTS verification (
id TEXT PRIMARY KEY,
identifier TEXT NOT NULL,
value TEXT NOT NULL,
"expiresAt" TIMESTAMP NOT NULL,
"createdAt" TIMESTAMP,
"updatedAt" TIMESTAMP
);
CREATE INDEX idx_verification_identifier ON verification(identifier);
-- User license and authorization schema (custom tables)
CREATE TABLE IF NOT EXISTS user_licenses (
user_id TEXT PRIMARY KEY,
user_id TEXT PRIMARY KEY REFERENCES "user"(id) ON DELETE CASCADE,
email TEXT,
license_type TEXT NOT NULL CHECK (license_type IN ('free', 'pro', 'enterprise')),
features JSONB NOT NULL DEFAULT '{
@@ -43,37 +106,3 @@ CREATE TABLE IF NOT EXISTS user_channel_links (
CREATE INDEX idx_user_channel_links_user_id ON user_channel_links(user_id);
CREATE INDEX idx_user_channel_links_channel ON user_channel_links(channel_type, channel_user_id);
-- Example data for development
INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url, features, resource_limits, preferred_model)
VALUES (
'dev-user-001',
'dev@example.com',
'pro',
'http://localhost:8080/mcp',
'{
"maxIndicators": 50,
"maxStrategies": 20,
"maxBacktestDays": 365,
"realtimeData": true,
"customExecutors": true,
"apiAccess": true
}',
'{
"maxConcurrentSessions": 5,
"maxMessagesPerDay": 1000,
"maxTokensPerMessage": 8192,
"rateLimitPerMinute": 60
}',
'{
"provider": "anthropic",
"model": "claude-3-5-sonnet-20241022",
"temperature": 0.7
}'
)
ON CONFLICT (user_id) DO NOTHING;
-- Example Telegram link
INSERT INTO user_channel_links (user_id, channel_type, channel_user_id)
VALUES ('dev-user-001', 'telegram', '123456789')
ON CONFLICT (channel_type, channel_user_id) DO NOTHING;

View File

@@ -0,0 +1,40 @@
# Gateway Secrets
# These should be mounted from k8s secrets
# Authentication
# IMPORTANT: Generate a strong random secret for production
# Example: openssl rand -base64 32
auth:
secret: "change-me-in-production-use-openssl-rand-base64-32"
# LLM Provider API Keys (configure at least one)
llm_providers:
anthropic_api_key: sk-ant-xxxxx
openai_api_key: sk-xxxxx
google_api_key: xxxxx
openrouter_api_key: sk-or-xxxxx
# Telegram (optional)
telegram:
bot_token: ""
# Email service (optional)
email:
service_key: ""
# Push notification service (optional)
push:
service_key: ""
# Qdrant API key (optional, for hosted Qdrant)
qdrant:
api_key: ""
# Iceberg S3 credentials
iceberg:
s3_access_key: minioadmin
s3_secret_key: minioadmin
# Embedding API key (if using external provider)
embedding:
api_key: ""

View File

@@ -0,0 +1,173 @@
import type { BetterAuthInstance } from './better-auth-config.js';
import type { FastifyBaseLogger } from 'fastify';
import type { Pool } from 'pg';
export interface AuthServiceConfig {
auth: BetterAuthInstance;
pool: Pool;
logger: FastifyBaseLogger;
}
/**
* Authentication service that integrates Better Auth with existing user system
*/
export class AuthService {
private config: AuthServiceConfig;
constructor(config: AuthServiceConfig) {
this.config = config;
}
/**
* Verify JWT token and return user ID
* Replaces the placeholder implementation in UserService
*/
async verifyToken(token: string): Promise<string | null> {
try {
// Better Auth's session verification
const session = await this.config.auth.api.getSession({
headers: {
authorization: `Bearer ${token}`,
},
});
if (!session || !session.user) {
return null;
}
return session.user.id;
} catch (error) {
this.config.logger.debug({ error }, 'Token verification failed');
return null;
}
}
/**
* Create user with email and password
*/
async createUser(email: string, password: string, name?: string): Promise<{ userId: string; error?: string }> {
try {
const result = await this.config.auth.api.signUpEmail({
body: {
email,
password,
name: name || email.split('@')[0],
},
});
if (!result.user) {
return {
userId: '',
error: 'Failed to create user',
};
}
return {
userId: result.user.id,
};
} catch (error: any) {
this.config.logger.error({ error }, 'User creation failed');
return {
userId: '',
error: error.message || 'User creation failed',
};
}
}
/**
* Sign in with email and password
*/
async signIn(email: string, password: string): Promise<{ token: string; userId: string; error?: string }> {
try {
const result = await this.config.auth.api.signInEmail({
body: {
email,
password,
},
});
if (!result.token || !result.user) {
return {
token: '',
userId: '',
error: 'Invalid credentials',
};
}
return {
token: result.token,
userId: result.user.id,
};
} catch (error: any) {
this.config.logger.error({ error }, 'Sign in failed');
return {
token: '',
userId: '',
error: error.message || 'Sign in failed',
};
}
}
/**
* Sign out and invalidate session
*/
async signOut(token: string): Promise<{ success: boolean }> {
try {
await this.config.auth.api.signOut({
headers: {
authorization: `Bearer ${token}`,
},
});
return { success: true };
} catch (error) {
this.config.logger.error({ error }, 'Sign out failed');
return { success: false };
}
}
/**
* Get current session from token
*/
async getSession(token: string) {
try {
const session = await this.config.auth.api.getSession({
headers: {
authorization: `Bearer ${token}`,
},
});
return session;
} catch (error) {
this.config.logger.debug({ error }, 'Get session failed');
return null;
}
}
/**
* Ensure user has a license (create default license if needed)
*/
async ensureUserLicense(userId: string, email: string): Promise<void> {
const client = await this.config.pool.connect();
try {
// Check if license exists
const licenseCheck = await client.query(
'SELECT user_id FROM user_licenses WHERE user_id = $1',
[userId]
);
if (licenseCheck.rows.length === 0) {
// Create default free license
await client.query(
`INSERT INTO user_licenses (user_id, email, license_type, mcp_server_url)
VALUES ($1, $2, 'free', 'pending')`,
[userId, email]
);
this.config.logger.info({ userId }, 'Created default free license for new user');
}
} finally {
client.release();
}
}
}

View File

@@ -0,0 +1,106 @@
import { betterAuth } from 'better-auth';
import { Pool } from 'pg';
import { Kysely, PostgresDialect } from 'kysely';
import type { FastifyBaseLogger } from 'fastify';
export interface BetterAuthConfig {
databaseUrl: string;
pool?: Pool;
secret: string;
baseUrl: string;
trustedOrigins: string[];
logger: FastifyBaseLogger;
}
/**
* Create Better Auth instance with PostgreSQL adapter and passkey support
*/
export async function createBetterAuth(config: BetterAuthConfig) {
try {
config.logger.debug({
databaseUrl: config.databaseUrl.replace(/:[^:@]+@/, ':***@'),
baseUrl: config.baseUrl,
}, 'Creating Better Auth instance');
// Use existing pool if provided, otherwise create new one
const pool = config.pool || new Pool({
connectionString: config.databaseUrl,
});
config.logger.debug('PostgreSQL pool created');
// Test database connection first
try {
config.logger.debug('Testing database connection...');
const testClient = await pool.connect();
await testClient.query('SELECT 1');
testClient.release();
config.logger.debug('Database connection test successful');
} catch (dbError: any) {
config.logger.error({
error: dbError,
message: dbError.message,
stack: dbError.stack,
}, 'Database connection test failed');
throw new Error(`Database connection failed: ${dbError.message}`);
}
// Create Kysely instance for Better Auth
config.logger.debug('Creating Kysely database instance...');
const db = new Kysely({
dialect: new PostgresDialect({ pool }),
});
config.logger.debug('Kysely instance created');
// Better Auth v1.5.3 postgres configuration
const auth = betterAuth({
database: {
db,
type: 'postgres',
},
// Secret for JWT signing
secret: config.secret,
// Base URL for callbacks and redirects
baseURL: config.baseUrl,
// Trusted origins for CORS
trustedOrigins: config.trustedOrigins,
// Email/password authentication
emailAndPassword: {
enabled: true,
requireEmailVerification: false, // Set to true in production
sendResetPassword: async ({ user, url }) => {
// TODO: Implement email sending
config.logger.info({ userId: user.id, resetUrl: url }, 'Password reset requested');
},
},
// Session configuration
session: {
expiresIn: 60 * 60 * 24 * 7, // 7 days
updateAge: 60 * 60 * 24, // Update session every 24 hours
cookieCache: {
enabled: true,
maxAge: 5 * 60, // 5 minutes
},
},
});
config.logger.debug('Better Auth instance created');
return auth;
} catch (error: any) {
config.logger.error({
error,
message: error.message,
stack: error.stack,
cause: error.cause,
}, 'Error creating Better Auth instance');
throw error;
}
}
export type BetterAuthInstance = Awaited<ReturnType<typeof createBetterAuth>>;

View File

@@ -156,7 +156,7 @@ export class TelegramHandler {
/**
* Cleanup old sessions (call periodically)
*/
async cleanupSessions(maxAgeMs = 30 * 60 * 1000): Promise<void> {
async cleanupSessions(_maxAgeMs = 30 * 60 * 1000): Promise<void> {
// TODO: Track session last activity and cleanup
// For now, sessions persist until server restart
}

View File

@@ -6,18 +6,24 @@ import type { InboundMessage } from '../types/messages.js';
import { randomUUID } from 'crypto';
import type { ProviderConfig } from '../llm/provider.js';
import type { SessionRegistry, EventSubscriber, Session } from '../events/index.js';
export interface WebSocketHandlerConfig {
authenticator: Authenticator;
providerConfig: ProviderConfig;
sessionRegistry: SessionRegistry;
eventSubscriber: EventSubscriber;
}
/**
* WebSocket channel handler
*
* Handles WebSocket connections for chat and integrates with the event system
* for container-to-client notifications.
*/
export class WebSocketHandler {
private config: WebSocketHandlerConfig;
private sessions = new Map<string, AgentHarness>();
private harnesses = new Map<string, AgentHarness>();
constructor(config: WebSocketHandlerConfig) {
this.config = config;
@@ -94,7 +100,30 @@ export class WebSocketHandler {
try {
await harness.initialize();
this.sessions.set(authContext.sessionId, harness);
this.harnesses.set(authContext.sessionId, harness);
// Register session for event system
// Container endpoint is derived from the MCP server URL (same container, different port)
const containerEventEndpoint = this.getContainerEventEndpoint(authContext.license.mcpServerUrl);
const session: Session = {
userId: authContext.userId,
sessionId: authContext.sessionId,
socket,
channelType: 'websocket',
containerEndpoint: containerEventEndpoint,
connectedAt: new Date(),
};
this.config.sessionRegistry.register(session);
// Subscribe to informational events from user's container
await this.config.eventSubscriber.onSessionConnect(session);
logger.info(
{ userId: authContext.userId, containerEndpoint: containerEventEndpoint },
'Session registered for events'
);
// Send connected message
socket.send(
@@ -145,11 +174,19 @@ export class WebSocketHandler {
// Handle disconnection
socket.on('close', async () => {
logger.info({ sessionId: authContext.sessionId }, 'WebSocket disconnected');
// Unregister from event system
const removedSession = this.config.sessionRegistry.unregister(authContext.sessionId);
if (removedSession) {
await this.config.eventSubscriber.onSessionDisconnect(removedSession);
}
// Cleanup harness
await harness.cleanup();
this.sessions.delete(authContext.sessionId);
this.harnesses.delete(authContext.sessionId);
});
socket.on('error', (error) => {
socket.on('error', (error: any) => {
logger.error({ error, sessionId: authContext.sessionId }, 'WebSocket error');
});
} catch (error) {
@@ -158,4 +195,21 @@ export class WebSocketHandler {
await harness.cleanup();
}
}
/**
* Derive the container's XPUB event endpoint from the MCP server URL.
*
* MCP URL format: http://agent-user-abc123.dexorder-agents.svc.cluster.local:3000
* Event endpoint: tcp://agent-user-abc123.dexorder-agents.svc.cluster.local:5570
*/
private getContainerEventEndpoint(mcpServerUrl: string): string {
try {
const url = new URL(mcpServerUrl);
// Replace protocol and port
return `tcp://${url.hostname}:5570`;
} catch {
// Fallback if URL parsing fails
return mcpServerUrl.replace('http://', 'tcp://').replace(':3000', ':5570');
}
}
}

View File

@@ -0,0 +1,209 @@
import { IcebergRestCatalog } from 'iceberg-js';
import type { FastifyBaseLogger } from 'fastify';
/**
* Iceberg client configuration
*/
export interface IcebergConfig {
catalogUri: string;
namespace: string;
s3Endpoint?: string;
s3AccessKey?: string;
s3SecretKey?: string;
}
/**
* Message record for Iceberg storage
*/
export interface IcebergMessage {
id: string;
user_id: string;
session_id: string;
role: 'user' | 'assistant' | 'system';
content: string;
metadata: string; // JSON string
timestamp: number; // microseconds
}
/**
* Checkpoint record for Iceberg storage
*/
export interface IcebergCheckpoint {
user_id: string;
session_id: string;
checkpoint_id: string;
checkpoint_data: string; // JSON string
metadata: string; // JSON string
timestamp: number; // microseconds
}
/**
* Iceberg REST client wrapper for durable storage
*
* Uses Iceberg REST Catalog API to:
* - Query conversation history from gateway.conversations
* - Query checkpoints from gateway.checkpoints
* - Note: Writes are handled by Flink; this is read-only
*
* For writes, we'll send to a Kafka topic that Flink consumes
* (or implement direct REST catalog write if needed)
*/
export class IcebergClient {
private namespace: string;
private logger: FastifyBaseLogger;
constructor(config: IcebergConfig, logger: FastifyBaseLogger) {
this.logger = logger;
this.namespace = config.namespace;
// Initialize Iceberg REST client
const clientConfig: any = {
uri: config.catalogUri,
};
if (config.s3Endpoint) {
clientConfig.s3 = {
endpoint: config.s3Endpoint,
'access-key-id': config.s3AccessKey,
'secret-access-key': config.s3SecretKey,
'path-style-access': 'true',
};
}
// TODO: Store client for queries when needed
new IcebergRestCatalog(clientConfig);
this.logger.info({
catalogUri: config.catalogUri,
namespace: this.namespace,
}, 'Iceberg client initialized');
}
/**
* Query messages from gateway.conversations table
*
* Note: This is a simplified interface. The actual Iceberg REST API
* returns table metadata, and you'd need to query the underlying
* Parquet files via S3 or use a query engine like DuckDB/Trino.
*
* For now, we'll document the expected schema and leave actual
* implementation as TODO since Flink handles writes.
*/
async queryMessages(
userId: string,
sessionId: string,
_options?: {
startTime?: number;
endTime?: number;
limit?: number;
}
): Promise<IcebergMessage[]> {
this.logger.debug({
userId,
sessionId,
table: `${this.namespace}.conversations`,
}, 'Querying messages from Iceberg');
// TODO: Implement actual Iceberg query
// Options:
// 1. Use iceberg-js to get table metadata and Parquet file locations
// 2. Query Parquet files directly via S3 + parquet-wasm
// 3. Use external query engine (DuckDB, Trino, Presto)
// 4. Use Flink SQL REST endpoint for queries
this.logger.warn('Iceberg query not yet implemented - returning empty array');
return [];
}
/**
* Query checkpoint from gateway.checkpoints table
*/
async queryCheckpoint(
userId: string,
sessionId: string,
checkpointId?: string
): Promise<IcebergCheckpoint | null> {
this.logger.debug({
userId,
sessionId,
checkpointId,
table: `${this.namespace}.checkpoints`,
}, 'Querying checkpoint from Iceberg');
// TODO: Implement actual Iceberg query
this.logger.warn('Iceberg query not yet implemented - returning null');
return null;
}
/**
* Get table metadata
*/
async getTableMetadata(tableName: string): Promise<any> {
try {
const tableId = `${this.namespace}.${tableName}`;
// Note: iceberg-js provides catalog operations
// For actual data queries, you'd need to:
// 1. Get table metadata
// 2. Find data file locations
// 3. Query Parquet files from S3
this.logger.info({ table: tableId }, 'Getting table metadata');
// TODO: Implement table metadata query via REST API
return null;
} catch (error) {
this.logger.error({ error, tableName }, 'Failed to get table metadata');
throw error;
}
}
/**
* List tables in namespace
*/
async listTables(): Promise<string[]> {
try {
this.logger.info({ namespace: this.namespace }, 'Listing tables');
// TODO: Use iceberg-js to list tables
// const tables = await this.client.listTables(this.namespace);
// return tables.map(t => t.name);
return [];
} catch (error) {
this.logger.error({ error }, 'Failed to list tables');
throw error;
}
}
/**
* Check if table exists
*/
async tableExists(tableName: string): Promise<boolean> {
try {
const tables = await this.listTables();
return tables.includes(tableName);
} catch (error) {
this.logger.error({ error, tableName }, 'Failed to check table existence');
return false;
}
}
}
/**
* Note on Iceberg Writes:
*
* For appending messages and checkpoints to Iceberg, we have two options:
*
* 1. **Via Kafka + Flink** (Recommended):
* - Gateway writes to Kafka topics (gateway_messages, gateway_checkpoints)
* - Flink consumes and writes to Iceberg with proper partitioning
* - Benefits: Proven architecture, handles backpressure, deduplication
*
* 2. **Direct REST Catalog Write**:
* - Use Iceberg REST API to commit new data files
* - More complex: need to create Parquet files, upload to S3, commit transaction
* - Library like parquet-wasm could help
*
* For now, recommend Option 1 (Kafka + Flink) for consistency with existing architecture.
*/

View File

@@ -0,0 +1,319 @@
import { QdrantClient as QdrantRestClient } from '@qdrant/js-client-rest';
import type { FastifyBaseLogger } from 'fastify';
/**
* Qdrant client configuration
*/
export interface QdrantConfig {
url: string;
apiKey?: string;
collectionName?: string;
}
/**
* Qdrant client wrapper for RAG vector storage
*
* Features:
* - Global namespace (user_id = "0") for platform knowledge
* - User-specific namespaces for personal memories
* - Payload-indexed by user_id for GDPR compliance
* - Cosine similarity search
*/
export class QdrantClient {
private client: QdrantRestClient;
private collectionName: string;
private vectorDimension: number;
private logger: FastifyBaseLogger;
constructor(config: QdrantConfig, logger: FastifyBaseLogger, vectorDimension: number = 1536) {
this.logger = logger;
this.collectionName = config.collectionName || 'gateway_memory';
this.vectorDimension = vectorDimension;
// Initialize Qdrant REST client
this.client = new QdrantRestClient({
url: config.url,
apiKey: config.apiKey,
});
this.logger.info({
url: config.url,
collection: this.collectionName,
vectorDimension,
}, 'Qdrant client initialized');
}
/**
* Initialize collection with proper schema and indexes
*/
async initialize(): Promise<void> {
this.logger.info({ collection: this.collectionName }, 'Initializing Qdrant collection');
try {
// Check if collection exists
const collections = await this.client.getCollections();
const exists = collections.collections.some(c => c.name === this.collectionName);
if (!exists) {
this.logger.info({ collection: this.collectionName }, 'Creating new collection');
// Create collection with vector configuration
await this.client.createCollection(this.collectionName, {
vectors: {
size: this.vectorDimension,
distance: 'Cosine',
},
});
// Create payload indexes for efficient filtering
await this.client.createPayloadIndex(this.collectionName, {
field_name: 'user_id',
field_schema: 'keyword',
});
await this.client.createPayloadIndex(this.collectionName, {
field_name: 'session_id',
field_schema: 'keyword',
});
await this.client.createPayloadIndex(this.collectionName, {
field_name: 'timestamp',
field_schema: 'integer',
});
this.logger.info({ collection: this.collectionName }, 'Collection created successfully');
} else {
this.logger.info({ collection: this.collectionName }, 'Collection already exists');
}
} catch (error) {
this.logger.error({ error, collection: this.collectionName }, 'Failed to initialize collection');
throw error;
}
}
/**
* Store a vector point with payload
*/
async upsertPoint(
id: string,
vector: number[],
payload: Record<string, any>
): Promise<void> {
try {
await this.client.upsert(this.collectionName, {
wait: true,
points: [{
id,
vector,
payload,
}],
});
} catch (error) {
this.logger.error({ error, id }, 'Failed to upsert point');
throw error;
}
}
/**
* Search for similar vectors
* Queries both global (user_id="0") and user-specific vectors
*/
async search(
userId: string,
queryVector: number[],
options?: {
limit?: number;
scoreThreshold?: number;
sessionId?: string;
timeRange?: { start: number; end: number };
}
): Promise<Array<{
id: string;
score: number;
payload: Record<string, any>;
}>> {
const limit = options?.limit || 5;
const scoreThreshold = options?.scoreThreshold || 0.7;
try {
// Build filter: (user_id = userId OR user_id = "0") AND other conditions
const mustConditions: any[] = [];
const shouldConditions: any[] = [
{ key: 'user_id', match: { value: userId } },
{ key: 'user_id', match: { value: '0' } }, // Global namespace
];
// Add session filter if provided
if (options?.sessionId) {
mustConditions.push({
key: 'session_id',
match: { value: options.sessionId },
});
}
// Add time range filter if provided
if (options?.timeRange) {
mustConditions.push({
key: 'timestamp',
range: {
gte: options.timeRange.start,
lte: options.timeRange.end,
},
});
}
// Perform search
const results = await this.client.search(this.collectionName, {
vector: queryVector,
filter: {
must: mustConditions.length > 0 ? mustConditions : undefined,
should: shouldConditions,
},
limit,
score_threshold: scoreThreshold,
with_payload: true,
});
return results.map(r => ({
id: r.id as string,
score: r.score,
payload: r.payload || {},
}));
} catch (error) {
this.logger.error({ error, userId }, 'Search failed');
throw error;
}
}
/**
* Get points by filter (without vector search)
*/
async scroll(
userId: string,
options?: {
limit?: number;
sessionId?: string;
offset?: string;
}
): Promise<{
points: Array<{ id: string; payload: Record<string, any> }>;
nextOffset?: string;
}> {
try {
const filter: any = {
must: [
{ key: 'user_id', match: { value: userId } },
],
};
if (options?.sessionId) {
filter.must.push({
key: 'session_id',
match: { value: options.sessionId },
});
}
const result = await this.client.scroll(this.collectionName, {
filter,
limit: options?.limit || 10,
offset: options?.offset,
with_payload: true,
with_vector: false,
});
return {
points: result.points.map(p => ({
id: p.id as string,
payload: p.payload || {},
})),
nextOffset: result.next_page_offset as string | undefined,
};
} catch (error) {
this.logger.error({ error, userId }, 'Scroll failed');
throw error;
}
}
/**
* Delete all points for a user (GDPR compliance)
*/
async deleteUserData(userId: string): Promise<void> {
this.logger.info({ userId }, 'Deleting user vectors for GDPR compliance');
try {
await this.client.delete(this.collectionName, {
wait: true,
filter: {
must: [
{ key: 'user_id', match: { value: userId } },
],
},
});
this.logger.info({ userId }, 'User vectors deleted');
} catch (error) {
this.logger.error({ error, userId }, 'Failed to delete user data');
throw error;
}
}
/**
* Delete points for a specific session
*/
async deleteSession(userId: string, sessionId: string): Promise<void> {
this.logger.info({ userId, sessionId }, 'Deleting session vectors');
try {
await this.client.delete(this.collectionName, {
wait: true,
filter: {
must: [
{ key: 'user_id', match: { value: userId } },
{ key: 'session_id', match: { value: sessionId } },
],
},
});
this.logger.info({ userId, sessionId }, 'Session vectors deleted');
} catch (error) {
this.logger.error({ error, userId, sessionId }, 'Failed to delete session');
throw error;
}
}
/**
* Get collection info and statistics
*/
async getCollectionInfo(): Promise<{
vectorsCount: number;
indexedVectorsCount: number;
pointsCount: number;
}> {
try {
const info = await this.client.getCollection(this.collectionName);
return {
vectorsCount: (info as any).vectors_count || 0,
indexedVectorsCount: info.indexed_vectors_count || 0,
pointsCount: info.points_count || 0,
};
} catch (error) {
this.logger.error({ error }, 'Failed to get collection info');
throw error;
}
}
/**
* Store global platform knowledge (user_id = "0")
*/
async storeGlobalKnowledge(
id: string,
vector: number[],
payload: Omit<Record<string, any>, 'user_id'>
): Promise<void> {
return this.upsertPoint(id, vector, {
...payload,
user_id: '0', // Global namespace
});
}
}

View File

@@ -1,9 +1,11 @@
import { Pool, PoolClient } from 'pg';
import { Pool } from 'pg';
import type { UserLicense } from '../types/user.js';
import { UserLicenseSchema } from '../types/user.js';
import type { AuthService } from '../auth/auth-service.js';
export class UserService {
private pool: Pool;
private authService?: AuthService;
constructor(connectionString: string) {
this.pool = new Pool({
@@ -14,6 +16,21 @@ export class UserService {
});
}
/**
* Get the database pool (for AuthService)
*/
getPool(): Pool {
return this.pool;
}
/**
* Set auth service for JWT verification
* Called after AuthService is initialized
*/
setAuthService(authService: AuthService): void {
this.authService = authService;
}
/**
* Get user license by user ID
*/
@@ -83,19 +100,24 @@ export class UserService {
/**
* Verify JWT token from web client
* TODO: Implement JWT verification with JWKS
* Uses Better Auth for proper JWT verification
*/
async verifyWebToken(token: string): Promise<string | null> {
// Placeholder - implement JWT verification
// For now, decode without verification (INSECURE - FOR DEV ONLY)
try {
const payload = JSON.parse(
Buffer.from(token.split('.')[1], 'base64').toString()
);
return payload.sub || null;
} catch {
return null;
if (!this.authService) {
// Fallback for development - decode without verification
// This allows backward compatibility during migration
try {
const payload = JSON.parse(
Buffer.from(token.split('.')[1], 'base64').toString()
);
return payload.sub || null;
} catch {
return null;
}
}
// Use Better Auth for proper verification
return await this.authService.verifyToken(token);
}
/**

View File

@@ -0,0 +1,507 @@
import type { FastifyBaseLogger } from 'fastify';
import {
EventType,
parseEventPayload,
getEventTypeName,
type UserEvent,
type OrderEventPayload,
type AlertEventPayload,
type PositionEventPayload,
type StrategyEventPayload,
} from './types.js';
/**
* User channel configuration loaded from database.
*/
export interface UserChannelConfig {
telegramChatId?: string;
email?: string;
pushToken?: string;
discordWebhook?: string;
slackWebhook?: string;
}
export interface DeliveryServiceConfig {
telegramBotToken?: string;
emailServiceKey?: string;
emailFromAddress?: string;
pushServiceKey?: string;
logger: FastifyBaseLogger;
}
/**
* DeliveryService handles actual delivery to external channels.
*
* Owns credentials for:
* - Telegram bot
* - Email service (SendGrid, SES, etc.)
* - Push notifications (Firebase, APNs)
* - Discord/Slack webhooks
*
* User-specific channel configs (chat IDs, emails, tokens) are loaded
* from the database on demand.
*/
export class DeliveryService {
private telegramBotToken?: string;
private emailServiceKey?: string;
private pushServiceKey?: string;
private logger: FastifyBaseLogger;
// Cache of user channel configs
// In production, this should have TTL and be backed by Redis
private userConfigs = new Map<string, UserChannelConfig>();
constructor(config: DeliveryServiceConfig) {
this.telegramBotToken = config.telegramBotToken;
this.emailServiceKey = config.emailServiceKey;
this.pushServiceKey = config.pushServiceKey;
this.logger = config.logger.child({ component: 'DeliveryService' });
}
/**
* Load user's channel configuration from database.
* TODO: Implement actual database lookup.
*/
async loadUserConfig(userId: string): Promise<UserChannelConfig> {
// Check cache first
const cached = this.userConfigs.get(userId);
if (cached) return cached;
// TODO: Load from database
// For now, return empty config
const config: UserChannelConfig = {};
this.userConfigs.set(userId, config);
return config;
}
/**
* Update cached user config (called when user updates their settings).
*/
updateUserConfig(userId: string, config: Partial<UserChannelConfig>): void {
const existing = this.userConfigs.get(userId) || {};
this.userConfigs.set(userId, { ...existing, ...config });
}
/**
* Clear cached user config.
*/
clearUserConfig(userId: string): void {
this.userConfigs.delete(userId);
}
// ===========================================================================
// Telegram
// ===========================================================================
/**
* Send event notification via Telegram.
*/
async sendTelegram(userId: string, event: UserEvent): Promise<void> {
if (!this.telegramBotToken) {
throw new Error('Telegram bot token not configured');
}
const config = await this.loadUserConfig(userId);
if (!config.telegramChatId) {
throw new Error('User has no Telegram chat ID configured');
}
const message = this.formatTelegramMessage(event);
const response = await fetch(
`https://api.telegram.org/bot${this.telegramBotToken}/sendMessage`,
{
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
chat_id: config.telegramChatId,
text: message,
parse_mode: 'Markdown',
}),
}
);
if (!response.ok) {
const error = await response.text();
throw new Error(`Telegram API error: ${error}`);
}
this.logger.info(
{ userId, eventId: event.eventId, chatId: config.telegramChatId },
'Sent Telegram notification'
);
}
/**
* Format event as Telegram message with Markdown.
*/
private formatTelegramMessage(event: UserEvent): string {
switch (event.eventType) {
case EventType.ORDER_PLACED:
case EventType.ORDER_FILLED:
case EventType.ORDER_CANCELLED:
case EventType.ORDER_REJECTED:
case EventType.ORDER_EXPIRED: {
const payload = parseEventPayload<OrderEventPayload>(event);
if (!payload) return this.formatGenericMessage(event);
const emoji = this.getOrderEmoji(event.eventType);
const title = this.getOrderTitle(event.eventType);
return (
`${emoji} *${title}*\n\n` +
`Symbol: \`${payload.symbol}\`\n` +
`Side: ${payload.side.toUpperCase()}\n` +
`Quantity: ${payload.quantity}\n` +
(payload.fillPrice ? `Fill Price: ${payload.fillPrice}\n` : '') +
(payload.price ? `Limit Price: ${payload.price}\n` : '') +
`Exchange: ${payload.exchange}\n` +
(payload.strategyId ? `Strategy: ${payload.strategyId}\n` : '') +
(payload.errorMessage ? `Error: ${payload.errorMessage}\n` : '')
);
}
case EventType.ALERT_TRIGGERED: {
const payload = parseEventPayload<AlertEventPayload>(event);
if (!payload) return this.formatGenericMessage(event);
return (
`🔔 *Alert Triggered*\n\n` +
`Symbol: \`${payload.symbol}\`\n` +
`Condition: ${payload.condition}\n` +
`Price: ${payload.triggeredPrice}`
);
}
case EventType.POSITION_OPENED:
case EventType.POSITION_CLOSED:
case EventType.POSITION_UPDATED:
case EventType.POSITION_LIQUIDATED: {
const payload = parseEventPayload<PositionEventPayload>(event);
if (!payload) return this.formatGenericMessage(event);
const emoji = this.getPositionEmoji(event.eventType);
const title = this.getPositionTitle(event.eventType);
return (
`${emoji} *${title}*\n\n` +
`Symbol: \`${payload.symbol}\`\n` +
`Side: ${payload.side.toUpperCase()}\n` +
`Size: ${payload.size}\n` +
`Entry: ${payload.entryPrice}\n` +
`Current: ${payload.currentPrice}\n` +
`Unrealized PnL: ${payload.unrealizedPnl}\n` +
(payload.realizedPnl ? `Realized PnL: ${payload.realizedPnl}\n` : '') +
(payload.liquidationPrice ? `Liquidation: ${payload.liquidationPrice}\n` : '')
);
}
case EventType.STRATEGY_ERROR: {
const payload = parseEventPayload<StrategyEventPayload>(event);
if (!payload) return this.formatGenericMessage(event);
return (
`🚨 *Strategy Error*\n\n` +
`Strategy: ${payload.strategyName}\n` +
`Message: ${payload.message}\n` +
(payload.details ? `Details: ${payload.details}` : '')
);
}
default:
return this.formatGenericMessage(event);
}
}
private formatGenericMessage(event: UserEvent): string {
const payload = parseEventPayload(event);
return (
`📌 *${getEventTypeName(event.eventType)}*\n\n` +
`\`\`\`\n${JSON.stringify(payload, null, 2)}\n\`\`\``
);
}
private getOrderEmoji(eventType: EventType): string {
switch (eventType) {
case EventType.ORDER_FILLED:
return '✅';
case EventType.ORDER_PLACED:
return '📝';
case EventType.ORDER_CANCELLED:
return '❌';
case EventType.ORDER_REJECTED:
return '🚫';
case EventType.ORDER_EXPIRED:
return '⏰';
default:
return '📋';
}
}
private getOrderTitle(eventType: EventType): string {
switch (eventType) {
case EventType.ORDER_FILLED:
return 'Order Filled';
case EventType.ORDER_PLACED:
return 'Order Placed';
case EventType.ORDER_CANCELLED:
return 'Order Cancelled';
case EventType.ORDER_REJECTED:
return 'Order Rejected';
case EventType.ORDER_EXPIRED:
return 'Order Expired';
default:
return 'Order Update';
}
}
private getPositionEmoji(eventType: EventType): string {
switch (eventType) {
case EventType.POSITION_OPENED:
return '📈';
case EventType.POSITION_CLOSED:
return '📉';
case EventType.POSITION_UPDATED:
return '📊';
case EventType.POSITION_LIQUIDATED:
return '💥';
default:
return '📊';
}
}
private getPositionTitle(eventType: EventType): string {
switch (eventType) {
case EventType.POSITION_OPENED:
return 'Position Opened';
case EventType.POSITION_CLOSED:
return 'Position Closed';
case EventType.POSITION_UPDATED:
return 'Position Updated';
case EventType.POSITION_LIQUIDATED:
return 'Position Liquidated';
default:
return 'Position Update';
}
}
// ===========================================================================
// Email
// ===========================================================================
/**
* Send event notification via email.
* TODO: Implement with SendGrid, SES, or similar.
*/
async sendEmail(userId: string, event: UserEvent): Promise<void> {
if (!this.emailServiceKey) {
throw new Error('Email service not configured');
}
const config = await this.loadUserConfig(userId);
if (!config.email) {
throw new Error('User has no email configured');
}
const subject = this.formatEmailSubject(event);
// const body = this.formatEmailBody(event);
// TODO: Implement actual email sending
// Example with SendGrid:
// await sendgrid.send({
// to: config.email,
// from: this.emailFromAddress,
// subject,
// html: body,
// });
this.logger.info(
{ userId, eventId: event.eventId, email: config.email, subject },
'Sent email notification (stub)'
);
}
private formatEmailSubject(event: UserEvent): string {
switch (event.eventType) {
case EventType.ORDER_FILLED: {
const payload = parseEventPayload<OrderEventPayload>(event);
return `Order Filled: ${payload?.symbol || 'Unknown'}`;
}
case EventType.ALERT_TRIGGERED: {
const payload = parseEventPayload<AlertEventPayload>(event);
return `Alert: ${payload?.symbol || 'Unknown'} - ${payload?.condition || ''}`;
}
case EventType.POSITION_LIQUIDATED: {
const payload = parseEventPayload<PositionEventPayload>(event);
return `⚠️ Position Liquidated: ${payload?.symbol || 'Unknown'}`;
}
default:
return `Dexorder: ${getEventTypeName(event.eventType)}`;
}
}
// ===========================================================================
// Push Notifications
// ===========================================================================
/**
* Send push notification via Firebase/APNs.
* TODO: Implement with Firebase Admin SDK.
*/
async sendPush(userId: string, event: UserEvent): Promise<void> {
if (!this.pushServiceKey) {
throw new Error('Push service not configured');
}
const config = await this.loadUserConfig(userId);
if (!config.pushToken) {
throw new Error('User has no push token configured');
}
const title = this.formatPushTitle(event);
// const body = this.formatPushBody(event);
// TODO: Implement actual push notification
// Example with Firebase:
// await admin.messaging().send({
// token: config.pushToken,
// notification: { title, body },
// data: { eventId: event.eventId, eventType: String(event.eventType) },
// });
this.logger.info(
{ userId, eventId: event.eventId, title },
'Sent push notification (stub)'
);
}
private formatPushTitle(event: UserEvent): string {
switch (event.eventType) {
case EventType.ORDER_FILLED:
return 'Order Filled';
case EventType.ALERT_TRIGGERED:
return 'Alert Triggered';
case EventType.POSITION_LIQUIDATED:
return '⚠️ Position Liquidated';
default:
return 'Dexorder';
}
}
// ===========================================================================
// Discord
// ===========================================================================
/**
* Send event notification via Discord webhook.
*/
async sendDiscord(userId: string, event: UserEvent): Promise<void> {
const config = await this.loadUserConfig(userId);
if (!config.discordWebhook) {
throw new Error('User has no Discord webhook configured');
}
const embed = this.formatDiscordEmbed(event);
const response = await fetch(config.discordWebhook, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ embeds: [embed] }),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Discord webhook error: ${error}`);
}
this.logger.info({ userId, eventId: event.eventId }, 'Sent Discord notification');
}
private formatDiscordEmbed(event: UserEvent): object {
const payload = parseEventPayload(event);
return {
title: getEventTypeName(event.eventType),
description: JSON.stringify(payload, null, 2).slice(0, 2000),
color: this.getDiscordColor(event.eventType),
timestamp: new Date(event.timestamp).toISOString(),
footer: { text: `Event ID: ${event.eventId}` },
};
}
private getDiscordColor(eventType: EventType): number {
switch (eventType) {
case EventType.ORDER_FILLED:
return 0x00ff00; // Green
case EventType.ORDER_REJECTED:
case EventType.POSITION_LIQUIDATED:
case EventType.STRATEGY_ERROR:
return 0xff0000; // Red
case EventType.ALERT_TRIGGERED:
return 0xffff00; // Yellow
default:
return 0x0099ff; // Blue
}
}
// ===========================================================================
// Slack
// ===========================================================================
/**
* Send event notification via Slack webhook.
*/
async sendSlack(userId: string, event: UserEvent): Promise<void> {
const config = await this.loadUserConfig(userId);
if (!config.slackWebhook) {
throw new Error('User has no Slack webhook configured');
}
const blocks = this.formatSlackBlocks(event);
const response = await fetch(config.slackWebhook, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ blocks }),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Slack webhook error: ${error}`);
}
this.logger.info({ userId, eventId: event.eventId }, 'Sent Slack notification');
}
private formatSlackBlocks(event: UserEvent): object[] {
const payload = parseEventPayload(event);
return [
{
type: 'header',
text: {
type: 'plain_text',
text: getEventTypeName(event.eventType),
},
},
{
type: 'section',
text: {
type: 'mrkdwn',
text: '```' + JSON.stringify(payload, null, 2).slice(0, 2000) + '```',
},
},
{
type: 'context',
elements: [
{
type: 'mrkdwn',
text: `Event ID: ${event.eventId}`,
},
],
},
];
}
}

View File

@@ -0,0 +1,338 @@
import { Router } from 'zeromq';
import type { FastifyBaseLogger } from 'fastify';
import type { SessionRegistry } from './session-registry.js';
import type { DeliveryService } from './delivery-service.js';
import {
deserializeUserEvent,
serializeEventAck,
parseEventPayload,
getEventTypeName,
getChannelTypeName,
AckStatus,
ChannelType,
Priority,
type UserEvent,
type EventAck,
} from './types.js';
export interface EventRouterConfig {
sessions: SessionRegistry;
delivery: DeliveryService;
logger: FastifyBaseLogger;
bindEndpoint?: string;
dedupTtlMs?: number;
}
/**
* EventRouter handles critical events from user containers.
*
* Uses ZMQ ROUTER socket that all containers connect to via DEALER.
* Provides guaranteed delivery with acknowledgments.
*
* Flow:
* 1. Container sends UserEvent via DEALER
* 2. Router receives with identity frame
* 3. Router delivers event through channel preferences
* 4. Router sends EventAck back to container
*
* Features:
* - Deduplication (5 minute window by default)
* - Channel preference ordering (try first channel, then second, etc.)
* - Acknowledgment with delivery status
*/
export class EventRouter {
private socket: Router;
private sessions: SessionRegistry;
private delivery: DeliveryService;
private logger: FastifyBaseLogger;
private bindEndpoint: string;
private dedupTtlMs: number;
// Deduplication: track recently processed event IDs
// Map: eventId -> timestamp when processed
private processedEvents = new Map<string, number>();
private running = false;
private messageLoopPromise: Promise<void> | null = null;
private cleanupInterval: NodeJS.Timeout | null = null;
constructor(config: EventRouterConfig) {
this.socket = new Router();
this.sessions = config.sessions;
this.delivery = config.delivery;
this.logger = config.logger.child({ component: 'EventRouter' });
this.bindEndpoint = config.bindEndpoint || 'tcp://*:5571';
this.dedupTtlMs = config.dedupTtlMs || 5 * 60 * 1000; // 5 minutes
}
/**
* Start the event router.
*/
async start(): Promise<void> {
await this.socket.bind(this.bindEndpoint);
this.logger.info({ endpoint: this.bindEndpoint }, 'Event router bound');
this.running = true;
this.messageLoopPromise = this.messageLoop();
this.startCleanupTimer();
this.logger.info('Event router started');
}
/**
* Stop the event router.
*/
async stop(): Promise<void> {
this.running = false;
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval);
this.cleanupInterval = null;
}
this.socket.close();
if (this.messageLoopPromise) {
try {
await this.messageLoopPromise;
} catch {
// Ignore errors during shutdown
}
}
this.logger.info('Event router stopped');
}
/**
* Main message processing loop.
*/
private async messageLoop(): Promise<void> {
this.logger.debug('Starting message loop');
try {
for await (const [identity, payload] of this.socket) {
if (!this.running) break;
try {
await this.processMessage(identity, payload);
} catch (error) {
this.logger.error({ error }, 'Error processing critical event');
// Don't ack on error - let container retry
}
}
} catch (error) {
if (this.running) {
this.logger.error({ error }, 'Message loop error');
}
}
this.logger.debug('Message loop ended');
}
/**
* Process a single message from the ROUTER socket.
*/
private async processMessage(identity: Buffer, payload: Buffer): Promise<void> {
// Deserialize the event
const event = deserializeUserEvent(payload);
this.logger.info(
{
userId: event.userId,
eventId: event.eventId,
eventType: getEventTypeName(event.eventType),
priority: Priority[event.delivery.priority],
},
'Received critical event'
);
// Deduplication check
if (this.processedEvents.has(event.eventId)) {
this.logger.debug({ eventId: event.eventId }, 'Duplicate event, sending cached ack');
await this.sendAck(identity, {
eventId: event.eventId,
status: AckStatus.DELIVERED,
errorMessage: '',
});
return;
}
// Deliver through channel preferences
const result = await this.deliverEvent(event);
// Mark as processed (for deduplication)
this.processedEvents.set(event.eventId, Date.now());
// Send ack back to container
await this.sendAck(identity, result);
this.logger.info(
{
eventId: event.eventId,
status: AckStatus[result.status],
deliveredVia: result.deliveredVia ? getChannelTypeName(result.deliveredVia) : undefined,
},
'Event processed'
);
}
/**
* Deliver an event through channel preferences.
* Tries each channel in order until one succeeds.
*/
private async deliverEvent(event: UserEvent): Promise<EventAck> {
for (const pref of event.delivery.channels) {
// Skip if channel requires active session but none exists
if (pref.onlyIfActive && !this.sessions.has(event.userId)) {
this.logger.debug(
{
eventId: event.eventId,
channel: getChannelTypeName(pref.channel),
},
'Skipping channel (requires active session)'
);
continue;
}
try {
const delivered = await this.deliverToChannel(event, pref.channel);
if (delivered) {
return {
eventId: event.eventId,
status: AckStatus.DELIVERED,
errorMessage: '',
deliveredVia: pref.channel,
};
}
} catch (error) {
this.logger.warn(
{
error,
eventId: event.eventId,
channel: getChannelTypeName(pref.channel),
},
'Channel delivery failed, trying next'
);
// Continue to next channel preference
}
}
// All channels failed
this.logger.error({ eventId: event.eventId }, 'All delivery channels failed');
return {
eventId: event.eventId,
status: AckStatus.ACK_ERROR,
errorMessage: 'All delivery channels failed',
};
}
/**
* Deliver event to a specific channel.
* Returns true if delivery succeeded.
*/
private async deliverToChannel(event: UserEvent, channel: ChannelType): Promise<boolean> {
switch (channel) {
case ChannelType.ACTIVE_SESSION: {
const session = this.sessions.get(event.userId);
if (!session) return false;
const message = this.formatWebSocketMessage(event);
session.socket.send(message);
return true;
}
case ChannelType.WEB: {
// WEB is same as ACTIVE_SESSION for WebSocket connections
const session = this.sessions.get(event.userId);
if (!session || session.channelType !== 'websocket') return false;
const message = this.formatWebSocketMessage(event);
session.socket.send(message);
return true;
}
case ChannelType.TELEGRAM:
await this.delivery.sendTelegram(event.userId, event);
return true;
case ChannelType.EMAIL:
await this.delivery.sendEmail(event.userId, event);
return true;
case ChannelType.PUSH:
await this.delivery.sendPush(event.userId, event);
return true;
case ChannelType.DISCORD:
await this.delivery.sendDiscord(event.userId, event);
return true;
case ChannelType.SLACK:
await this.delivery.sendSlack(event.userId, event);
return true;
default:
this.logger.warn({ channel }, 'Unknown channel type');
return false;
}
}
/**
* Send an EventAck back to a container.
*/
private async sendAck(identity: Buffer, ack: EventAck): Promise<void> {
const payload = serializeEventAck(ack);
await this.socket.send([identity, payload]);
}
/**
* Format a UserEvent as a WebSocket message.
*/
private formatWebSocketMessage(event: UserEvent): string {
const payload = parseEventPayload(event);
return JSON.stringify({
type: 'event',
eventType: getEventTypeName(event.eventType),
eventId: event.eventId,
timestamp: event.timestamp,
payload: payload,
priority: Priority[event.delivery.priority],
});
}
/**
* Start the deduplication cleanup timer.
*/
private startCleanupTimer(): void {
this.cleanupInterval = setInterval(() => {
this.cleanupProcessedEvents();
}, 60000); // Cleanup every minute
}
/**
* Remove expired entries from the processed events map.
*/
private cleanupProcessedEvents(): void {
const now = Date.now();
let cleaned = 0;
for (const [eventId, timestamp] of this.processedEvents) {
if (now - timestamp > this.dedupTtlMs) {
this.processedEvents.delete(eventId);
cleaned++;
}
}
if (cleaned > 0) {
this.logger.debug({ cleaned }, 'Cleaned up processed events');
}
}
/**
* Get count of tracked processed events (for monitoring).
*/
getProcessedEventCount(): number {
return this.processedEvents.size;
}
}

View File

@@ -0,0 +1,218 @@
import { Subscriber } from 'zeromq';
import type { FastifyBaseLogger } from 'fastify';
import type { SessionRegistry, Session } from './session-registry.js';
import {
deserializeUserEvent,
parseEventPayload,
getEventTypeName,
type UserEvent,
} from './types.js';
/**
* EventSubscriber handles informational events from user containers.
*
* Uses ZMQ SUB socket to connect to container XPUB sockets.
* When a user session connects, we subscribe to their events.
* When they disconnect, we unsubscribe.
*
* This is for fire-and-forget events that only matter if the user
* is actively connected (e.g., chart updates, strategy logs).
*/
export class EventSubscriber {
private socket: Subscriber;
private sessions: SessionRegistry;
private logger: FastifyBaseLogger;
// Track which container endpoints we're connected to
// Map: userId -> containerEndpoint
private containerConnections = new Map<string, string>();
// Track active subscriptions
// Set of topic strings we're subscribed to
private activeSubscriptions = new Set<string>();
private running = false;
private messageLoopPromise: Promise<void> | null = null;
constructor(sessions: SessionRegistry, logger: FastifyBaseLogger) {
this.socket = new Subscriber();
this.sessions = sessions;
this.logger = logger.child({ component: 'EventSubscriber' });
}
/**
* Start the event subscriber.
*/
async start(): Promise<void> {
this.running = true;
this.messageLoopPromise = this.messageLoop();
this.logger.info('Event subscriber started');
}
/**
* Stop the event subscriber.
*/
async stop(): Promise<void> {
this.running = false;
this.socket.close();
if (this.messageLoopPromise) {
try {
await this.messageLoopPromise;
} catch {
// Ignore errors during shutdown
}
}
this.logger.info('Event subscriber stopped');
}
/**
* Called when a user session connects.
* Connects to the user's container XPUB and subscribes to their events.
*/
async onSessionConnect(session: Session): Promise<void> {
const topic = `USER:${session.userId}`;
const endpoint = session.containerEndpoint;
// Connect to container if not already connected
if (!this.containerConnections.has(session.userId)) {
try {
this.socket.connect(endpoint);
this.containerConnections.set(session.userId, endpoint);
this.logger.info(
{ userId: session.userId, endpoint },
'Connected to container XPUB'
);
} catch (error) {
this.logger.error(
{ error, userId: session.userId, endpoint },
'Failed to connect to container XPUB'
);
return;
}
}
// Subscribe to user's topic
if (!this.activeSubscriptions.has(topic)) {
this.socket.subscribe(topic);
this.activeSubscriptions.add(topic);
this.logger.info({ userId: session.userId, topic }, 'Subscribed to user events');
}
}
/**
* Called when a user session disconnects.
* Unsubscribes from their events.
*/
async onSessionDisconnect(session: Session): Promise<void> {
const topic = `USER:${session.userId}`;
// Unsubscribe from user's topic
if (this.activeSubscriptions.has(topic)) {
this.socket.unsubscribe(topic);
this.activeSubscriptions.delete(topic);
this.logger.info({ userId: session.userId, topic }, 'Unsubscribed from user events');
}
// Optionally disconnect from container after a delay
// (in case user reconnects quickly)
// For now, we keep the connection open
}
/**
* Main message processing loop.
*/
private async messageLoop(): Promise<void> {
this.logger.debug('Starting message loop');
try {
for await (const [topicBuf, payloadBuf] of this.socket) {
if (!this.running) break;
try {
await this.processMessage(topicBuf, payloadBuf);
} catch (error) {
this.logger.error({ error }, 'Error processing informational event');
}
}
} catch (error) {
if (this.running) {
this.logger.error({ error }, 'Message loop error');
}
}
this.logger.debug('Message loop ended');
}
/**
* Process a single message from the SUB socket.
*/
private async processMessage(topicBuf: Buffer, payloadBuf: Buffer): Promise<void> {
const topic = topicBuf.toString();
const userId = topic.replace('USER:', '');
// Deserialize the event
const event = deserializeUserEvent(payloadBuf);
this.logger.debug(
{
userId,
eventId: event.eventId,
eventType: getEventTypeName(event.eventType),
},
'Received informational event'
);
// Forward to active session
const session = this.sessions.get(userId);
if (!session) {
this.logger.debug({ userId }, 'No active session for event, dropping');
return;
}
// Format and send to WebSocket
const wsMessage = this.formatWebSocketMessage(event);
try {
session.socket.send(wsMessage);
this.logger.debug(
{ userId, eventId: event.eventId },
'Forwarded event to WebSocket'
);
} catch (error) {
this.logger.warn(
{ error, userId, eventId: event.eventId },
'Failed to send event to WebSocket'
);
}
}
/**
* Format a UserEvent as a WebSocket message.
*/
private formatWebSocketMessage(event: UserEvent): string {
const payload = parseEventPayload(event);
return JSON.stringify({
type: 'event',
eventType: getEventTypeName(event.eventType),
eventId: event.eventId,
timestamp: event.timestamp,
payload: payload,
});
}
/**
* Get current subscription count.
*/
getSubscriptionCount(): number {
return this.activeSubscriptions.size;
}
/**
* Get current connection count.
*/
getConnectionCount(): number {
return this.containerConnections.size;
}
}

View File

@@ -0,0 +1,15 @@
/**
* User Container Event System
*
* Handles events from user containers via dual ZMQ patterns:
* - EventSubscriber (SUB): Informational events to active sessions
* - EventRouter (ROUTER): Critical events with guaranteed delivery
*
* See doc/protocol.md and doc/user_container_events.md for details.
*/
export * from './types.js';
export * from './session-registry.js';
export * from './event-subscriber.js';
export * from './event-router.js';
export * from './delivery-service.js';

View File

@@ -0,0 +1,134 @@
import type { WebSocket } from '@fastify/websocket';
/**
* Session information for an active user connection.
*/
export interface Session {
userId: string;
sessionId: string;
socket: WebSocket;
channelType: 'websocket' | 'telegram';
containerEndpoint: string; // Container's XPUB endpoint for informational events
connectedAt: Date;
}
/**
* Registry of active user sessions.
* Used by event system to route events to connected users.
*/
export class SessionRegistry {
// Map of userId -> Session (only most recent session per user)
private sessions = new Map<string, Session>();
// Map of sessionId -> userId (for reverse lookup)
private sessionToUser = new Map<string, string>();
/**
* Register a new session for a user.
* If user already has a session, the old one is replaced.
*/
register(session: Session): void {
// Remove old session if exists
const oldSession = this.sessions.get(session.userId);
if (oldSession) {
this.sessionToUser.delete(oldSession.sessionId);
}
this.sessions.set(session.userId, session);
this.sessionToUser.set(session.sessionId, session.userId);
}
/**
* Unregister a session.
*/
unregister(sessionId: string): Session | undefined {
const userId = this.sessionToUser.get(sessionId);
if (!userId) return undefined;
const session = this.sessions.get(userId);
if (session && session.sessionId === sessionId) {
this.sessions.delete(userId);
this.sessionToUser.delete(sessionId);
return session;
}
return undefined;
}
/**
* Get session by user ID.
*/
get(userId: string): Session | undefined {
return this.sessions.get(userId);
}
/**
* Get session by session ID.
*/
getBySessionId(sessionId: string): Session | undefined {
const userId = this.sessionToUser.get(sessionId);
if (!userId) return undefined;
return this.sessions.get(userId);
}
/**
* Check if user has an active session.
*/
has(userId: string): boolean {
return this.sessions.has(userId);
}
/**
* Get all active sessions.
*/
all(): Session[] {
return Array.from(this.sessions.values());
}
/**
* Get count of active sessions.
*/
size(): number {
return this.sessions.size;
}
/**
* Get all user IDs with active sessions.
*/
userIds(): string[] {
return Array.from(this.sessions.keys());
}
/**
* Send a message to a user's active session.
* Returns true if message was sent, false if no active session.
*/
sendToUser(userId: string, message: string | Buffer): boolean {
const session = this.sessions.get(userId);
if (!session) return false;
try {
session.socket.send(message);
return true;
} catch {
return false;
}
}
/**
* Broadcast a message to all active sessions.
* Returns number of sessions that received the message.
*/
broadcast(message: string | Buffer): number {
let sent = 0;
for (const session of this.sessions.values()) {
try {
session.socket.send(message);
sent++;
} catch {
// Session may have disconnected
}
}
return sent;
}
}

289
gateway/src/events/types.ts Normal file
View File

@@ -0,0 +1,289 @@
/**
* User Event Types for Container → Gateway communication
*
* These types mirror the protobuf definitions in protobuf/user_events.proto
* Message Type IDs:
* - UserEvent: 0x20
* - EventAck: 0x21
*/
// Message type IDs (must match protocol.md)
export const MSG_TYPE_USER_EVENT = 0x20;
export const MSG_TYPE_EVENT_ACK = 0x21;
// =============================================================================
// Enums
// =============================================================================
export enum EventType {
// Trading events
ORDER_PLACED = 0,
ORDER_FILLED = 1,
ORDER_CANCELLED = 2,
ORDER_REJECTED = 3,
ORDER_EXPIRED = 4,
// Alert events
ALERT_TRIGGERED = 10,
ALERT_CREATED = 11,
ALERT_DELETED = 12,
// Position events
POSITION_OPENED = 20,
POSITION_CLOSED = 21,
POSITION_UPDATED = 22,
POSITION_LIQUIDATED = 23,
// Workspace/chart events
WORKSPACE_CHANGED = 30,
CHART_ANNOTATION_ADDED = 31,
CHART_ANNOTATION_REMOVED = 32,
INDICATOR_UPDATED = 33,
// Strategy events
STRATEGY_STARTED = 40,
STRATEGY_STOPPED = 41,
STRATEGY_LOG = 42,
STRATEGY_ERROR = 43,
BACKTEST_COMPLETED = 44,
// System events
CONTAINER_STARTING = 50,
CONTAINER_READY = 51,
CONTAINER_SHUTTING_DOWN = 52,
EVENT_ERROR = 53,
}
export enum Priority {
INFORMATIONAL = 0,
NORMAL = 1,
CRITICAL = 2,
}
export enum ChannelType {
ACTIVE_SESSION = 0,
WEB = 1,
TELEGRAM = 2,
EMAIL = 3,
PUSH = 4,
DISCORD = 5,
SLACK = 6,
}
export enum AckStatus {
DELIVERED = 0,
QUEUED = 1,
ACK_ERROR = 2,
}
// =============================================================================
// Message Types
// =============================================================================
export interface ChannelPreference {
channel: ChannelType;
onlyIfActive: boolean;
}
export interface DeliverySpec {
priority: Priority;
channels: ChannelPreference[];
}
export interface UserEvent {
userId: string;
eventId: string;
timestamp: number; // Unix milliseconds
eventType: EventType;
payload: Buffer;
delivery: DeliverySpec;
}
export interface EventAck {
eventId: string;
status: AckStatus;
errorMessage: string;
deliveredVia?: ChannelType;
}
// =============================================================================
// Serialization
// =============================================================================
/**
* Serialize UserEvent to wire format.
* Format: [1 byte msg type][JSON payload]
*
* Note: In production, replace with proper protobuf serialization.
*/
export function serializeUserEvent(event: UserEvent): Buffer {
const json = JSON.stringify({
user_id: event.userId,
event_id: event.eventId,
timestamp: event.timestamp,
event_type: event.eventType,
payload: event.payload.toString('base64'),
delivery: {
priority: event.delivery.priority,
channels: event.delivery.channels.map((c) => ({
channel: c.channel,
only_if_active: c.onlyIfActive,
})),
},
});
const msgType = Buffer.from([MSG_TYPE_USER_EVENT]);
return Buffer.concat([msgType, Buffer.from(json)]);
}
/**
* Deserialize UserEvent from wire format.
*/
export function deserializeUserEvent(data: Buffer): UserEvent {
const msgType = data[0];
if (msgType !== MSG_TYPE_USER_EVENT) {
throw new Error(`Invalid message type: expected ${MSG_TYPE_USER_EVENT}, got ${msgType}`);
}
const json = JSON.parse(data.subarray(1).toString());
return {
userId: json.user_id,
eventId: json.event_id,
timestamp: json.timestamp,
eventType: json.event_type as EventType,
payload: Buffer.from(json.payload, 'base64'),
delivery: {
priority: json.delivery.priority as Priority,
channels: json.delivery.channels.map(
(c: { channel: number; only_if_active: boolean }) => ({
channel: c.channel as ChannelType,
onlyIfActive: c.only_if_active,
})
),
},
};
}
/**
* Serialize EventAck to wire format.
*/
export function serializeEventAck(ack: EventAck): Buffer {
const json = JSON.stringify({
event_id: ack.eventId,
status: ack.status,
error_message: ack.errorMessage,
delivered_via: ack.deliveredVia,
});
const msgType = Buffer.from([MSG_TYPE_EVENT_ACK]);
return Buffer.concat([msgType, Buffer.from(json)]);
}
/**
* Deserialize EventAck from wire format.
*/
export function deserializeEventAck(data: Buffer): EventAck {
const msgType = data[0];
if (msgType !== MSG_TYPE_EVENT_ACK) {
throw new Error(`Invalid message type: expected ${MSG_TYPE_EVENT_ACK}, got ${msgType}`);
}
const json = JSON.parse(data.subarray(1).toString());
return {
eventId: json.event_id,
status: json.status as AckStatus,
errorMessage: json.error_message || '',
deliveredVia: json.delivered_via as ChannelType | undefined,
};
}
// =============================================================================
// Payload Parsing Helpers
// =============================================================================
export interface OrderEventPayload {
orderId: string;
symbol: string;
side: string;
orderType: string;
quantity: string;
price?: string;
fillPrice?: string;
fillQuantity?: string;
status: string;
exchange: string;
timestamp: number;
strategyId?: string;
errorMessage?: string;
}
export interface AlertEventPayload {
alertId: string;
symbol: string;
condition: string;
triggeredPrice: string;
timestamp: number;
}
export interface PositionEventPayload {
positionId: string;
symbol: string;
side: string;
size: string;
entryPrice: string;
currentPrice: string;
unrealizedPnl: string;
realizedPnl?: string;
leverage?: string;
liquidationPrice?: string;
exchange: string;
timestamp: number;
}
export interface WorkspaceEventPayload {
workspaceId: string;
changeType: string;
symbol?: string;
timeframe?: string;
annotationId?: string;
annotationType?: string;
annotationData?: string;
indicatorName?: string;
indicatorParams?: string;
timestamp: number;
}
export interface StrategyEventPayload {
strategyId: string;
strategyName: string;
logLevel: string;
message: string;
details?: string;
timestamp: number;
}
/**
* Parse event payload as JSON.
* Returns the parsed object or null if parsing fails.
*/
export function parseEventPayload<T>(event: UserEvent): T | null {
try {
return JSON.parse(event.payload.toString()) as T;
} catch {
return null;
}
}
/**
* Get human-readable event type name.
*/
export function getEventTypeName(eventType: EventType): string {
return EventType[eventType] || `UNKNOWN(${eventType})`;
}
/**
* Get human-readable channel type name.
*/
export function getChannelTypeName(channelType: ChannelType): string {
return ChannelType[channelType] || `UNKNOWN(${channelType})`;
}

View File

@@ -0,0 +1,351 @@
# Agent Harness
Comprehensive agent orchestration system for Dexorder AI platform, built on LangChain.js and LangGraph.js.
## Architecture Overview
```
gateway/src/harness/
├── memory/ # Storage layer (Redis + Iceberg + Qdrant)
├── skills/ # Individual capabilities (markdown + TypeScript)
├── subagents/ # Specialized agents with multi-file memory
├── workflows/ # LangGraph state machines
├── tools/ # Platform tools (non-MCP)
├── config/ # Configuration files
└── index.ts # Main exports
```
## Core Components
### 1. Memory Layer (`memory/`)
Tiered storage architecture as per [architecture discussion](/chat/harness-rag.txt):
- **Redis**: Hot state (active sessions, checkpoints)
- **Iceberg**: Cold storage (durable conversations, analytics)
- **Qdrant**: Vector search (RAG, semantic memory)
**Key Files:**
- `checkpoint-saver.ts`: LangGraph checkpoint persistence
- `conversation-store.ts`: Message history management
- `rag-retriever.ts`: Vector similarity search
- `embedding-service.ts`: Text→vector conversion
- `session-context.ts`: User context with channel metadata
### 2. Skills (`skills/`)
Self-contained capabilities with markdown definitions:
- `*.skill.md`: Human-readable documentation
- `*.ts`: Implementation extending `BaseSkill`
- Input validation and error handling
- Can use LLM, MCP tools, or platform tools
**Example:**
```typescript
import { MarketAnalysisSkill } from './skills';
const skill = new MarketAnalysisSkill(logger, model);
const result = await skill.execute({
context: userContext,
parameters: { ticker: 'BTC/USDT', period: '4h' }
});
```
See [skills/README.md](skills/README.md) for authoring guide.
### 3. Subagents (`subagents/`)
Specialized agents with multi-file memory:
```
subagents/
code-reviewer/
config.yaml # Model, memory files, capabilities
system-prompt.md # System instructions
memory/ # Multi-file knowledge base
review-guidelines.md
common-patterns.md
best-practices.md
index.ts # Implementation
```
**Features:**
- Dedicated system prompts
- Split memory into logical files (better organization)
- Model overrides
- Capability tagging
**Example:**
```typescript
const codeReviewer = await createCodeReviewerSubagent(model, logger, basePath);
const review = await codeReviewer.execute({ userContext }, strategyCode);
```
### 4. Workflows (`workflows/`)
LangGraph state machines with:
- Validation loops (retry with fixes)
- Human-in-the-loop (approval gates)
- Multi-step orchestration
- Error recovery
**Example Workflows:**
- `strategy-validation/`: Code review → backtest → risk → approval
- `trading-request/`: Analysis → risk → approval → execute
See individual workflow READMEs for details.
### 5. Configuration (`config/`)
YAML-based configuration:
- `models.yaml`: LLM providers, routing, rate limits
- `subagent-routing.yaml`: When to use which subagent
## User Context
Enhanced session context with channel awareness for multi-channel support:
```typescript
interface UserContext {
userId: string;
sessionId: string;
license: UserLicense;
activeChannel: {
type: 'websocket' | 'telegram' | 'slack' | 'discord';
channelUserId: string;
capabilities: {
supportsMarkdown: boolean;
supportsImages: boolean;
supportsButtons: boolean;
maxMessageLength: number;
};
};
conversationHistory: BaseMessage[];
relevantMemories: MemoryChunk[];
workspaceState: WorkspaceContext;
}
```
This allows workflows to:
- Route responses to correct channel
- Format output for channel capabilities
- Handle channel-specific interactions (buttons, voice, etc.)
## Storage Architecture
Based on [harness-rag.txt discussion](../../chat/harness-rag.txt):
### Hot Path (Redis)
- Active checkpoints (TTL: 1 hour)
- Recent messages (last 50)
- Session metadata
- Fast reads for active conversations
### Cold Path (Iceberg)
- Full conversation history (partitioned by user_id, session_id)
- Checkpoint snapshots
- Time-travel queries
- GDPR-compliant deletion with compaction
### Vector Search (Qdrant)
- Conversation embeddings
- Long-term memory
- RAG retrieval
- Payload-indexed by user_id for fast GDPR deletion
- **Global knowledge base** (user_id="0") loaded from markdown files
### GDPR Compliance
```typescript
// Delete user data across all stores
await conversationStore.deleteUserData(userId);
await ragRetriever.deleteUserData(userId);
await checkpointSaver.delete(userId);
await containerManager.deleteContainer(userId);
// Iceberg physical delete
await icebergTable.expire_snapshots();
await icebergTable.rewrite_data_files();
```
## Standard Patterns
### Validation Loop (Retry with Fixes)
```typescript
graph.addConditionalEdges('validate', (state) => {
if (state.errors.length > 0 && state.retryCount < 3) {
return 'fix_errors'; // Loop back
}
return state.errors.length === 0 ? 'approve' : 'reject';
});
```
### Human-in-the-Loop (Approval Gates)
```typescript
const approvalNode = async (state) => {
// Send to user's channel
await sendToChannel(state.userContext.activeChannel, {
type: 'approval_request',
data: { /* details */ }
});
// LangGraph pauses here via Interrupt
// Resume with user input: graph.invoke(state, { ...resumeConfig })
return { approvalRequested: true };
};
```
## Getting Started
### 1. Install Dependencies
Already in `gateway/package.json`:
```json
{
"@langchain/core": "^0.3.24",
"@langchain/langgraph": "^0.2.26",
"@langchain/anthropic": "^0.3.8",
"ioredis": "^5.4.2"
}
```
### 2. Initialize Memory Layer
```typescript
import Redis from 'ioredis';
import {
TieredCheckpointSaver,
ConversationStore,
EmbeddingService,
RAGRetriever
} from './harness/memory';
const redis = new Redis(process.env.REDIS_URL);
const checkpointSaver = new TieredCheckpointSaver(redis, logger);
const conversationStore = new ConversationStore(redis, logger);
const embeddings = new EmbeddingService({ provider: 'openai', apiKey }, logger);
const ragRetriever = new RAGRetriever({ url: QDRANT_URL }, logger);
await ragRetriever.initialize();
```
### 3. Create Subagents
```typescript
import { createCodeReviewerSubagent } from './harness/subagents';
import { ModelRouter } from './llm/router';
const model = await modelRouter.route(query, license);
const codeReviewer = await createCodeReviewerSubagent(
model,
logger,
'gateway/src/harness/subagents/code-reviewer'
);
```
### 4. Build Workflows
```typescript
import { createStrategyValidationWorkflow } from './harness/workflows';
const workflow = await createStrategyValidationWorkflow(
model,
codeReviewer,
mcpBacktestFn,
logger,
'gateway/src/harness/workflows/strategy-validation/config.yaml'
);
const result = await workflow.execute({
userContext,
strategyCode: '...',
ticker: 'BTC/USDT',
timeframe: '4h'
});
```
### 5. Use Skills
```typescript
import { MarketAnalysisSkill } from './harness/skills';
const skill = new MarketAnalysisSkill(logger, model);
const analysis = await skill.execute({
context: userContext,
parameters: { ticker: 'BTC/USDT', period: '1h' }
});
```
## Global Knowledge System
The harness includes a document loader that automatically loads markdown files from `gateway/knowledge/` into Qdrant as global knowledge (user_id="0").
### Directory Structure
```
gateway/knowledge/
├── platform/ # Platform capabilities and architecture
├── trading/ # Trading concepts and fundamentals
├── indicators/ # Indicator development guides
└── strategies/ # Strategy patterns and examples
```
### How It Works
1. **Startup**: Documents are loaded automatically when gateway starts
2. **Chunking**: Intelligent splitting by markdown headers (~1000 tokens/chunk)
3. **Embedding**: Chunks are embedded using configured embedding service
4. **Storage**: Stored in Qdrant with user_id="0" (global namespace)
5. **Updates**: Content hashing detects changes for incremental updates
### RAG Query Flow
When a user sends a message:
1. Query is embedded using same embedding service
2. Qdrant searches vectors with filter: `user_id = current_user OR user_id = "0"`
3. Results include both user-specific and global knowledge
4. Relevant chunks are added to LLM context
5. LLM generates response with platform knowledge
### Managing Knowledge
**Add new documents**:
```bash
# Create markdown file in appropriate directory
echo "# New Topic" > gateway/knowledge/platform/new-topic.md
# Reload knowledge (development)
curl -X POST http://localhost:3000/admin/reload-knowledge
```
**Check stats**:
```bash
curl http://localhost:3000/admin/knowledge-stats
```
**In production**: Just deploy updated markdown files - they'll be loaded on startup.
See [gateway/knowledge/README.md](../../knowledge/README.md) for detailed documentation.
## Next Steps
1. **Implement Iceberg Integration**: Complete TODOs in checkpoint-saver.ts and conversation-store.ts
2. **Add More Subagents**: Risk analyzer, market analyst, etc.
3. **Implement Interrupts**: Full human-in-the-loop with LangGraph interrupts
4. **Add Platform Tools**: Market data queries, chart rendering, etc.
5. **Expand Knowledge Base**: Add more platform documentation to knowledge/
## References
- Architecture discussion: [chat/harness-rag.txt](../../chat/harness-rag.txt)
- LangGraph docs: https://langchain-ai.github.io/langgraphjs/
- Qdrant docs: https://qdrant.tech/documentation/
- Apache Iceberg: https://iceberg.apache.org/docs/latest/

View File

@@ -1,4 +1,4 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify';
@@ -286,15 +286,7 @@ Available features: ${JSON.stringify(this.config.license.features, null, 2)}`;
return prompt;
}
/**
* Get platform tools (non-user-specific tools)
*/
private getPlatformTools(): Array<{ name: string; description?: string }> {
// Platform tools that don't need user's MCP
return [
// TODO: Add platform tools like market data queries, chart rendering, etc.
];
}
/**
* Cleanup resources

View File

@@ -0,0 +1,110 @@
# Default LLM Model Configuration
# Default model for general agent tasks
default:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.7
maxTokens: 4096
# Model overrides for specific use cases
models:
# Fast model for simple tasks (routing, classification)
fast:
provider: anthropic
model: claude-3-haiku-20240307
temperature: 0.3
maxTokens: 1024
# Reasoning model for complex analysis
reasoning:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.5
maxTokens: 8192
# Precise model for code generation/review
code:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.2
maxTokens: 8192
# Creative model for strategy brainstorming
creative:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.9
maxTokens: 4096
# Embedding model configuration
embeddings:
provider: openai
model: text-embedding-3-small
dimensions: 1536
# Model routing rules (complexity-based)
routing:
# Simple queries → fast model
simple:
keywords:
- "what is"
- "define"
- "list"
- "show me"
maxTokens: 100
model: fast
# Code-related → code model
code:
keywords:
- "code"
- "function"
- "implement"
- "debug"
- "review"
model: code
# Analysis tasks → reasoning model
analysis:
keywords:
- "analyze"
- "compare"
- "evaluate"
- "assess"
model: reasoning
# Everything else → default
default:
model: default
# Cost optimization settings
costControl:
# Cache system prompts (Anthropic prompt caching)
cacheSystemPrompts: true
# Token limits per license type
tokenLimits:
free:
maxTokensPerMessage: 2048
maxTokensPerDay: 50000
pro:
maxTokensPerMessage: 8192
maxTokensPerDay: 500000
enterprise:
maxTokensPerMessage: 16384
maxTokensPerDay: -1 # unlimited
# Rate limiting
rateLimits:
# Requests per minute by license
requestsPerMinute:
free: 10
pro: 60
enterprise: 120
# Concurrent requests
concurrentRequests:
free: 1
pro: 3
enterprise: 10

View File

@@ -0,0 +1,98 @@
# Subagent Routing Configuration
# When to use which subagent based on task type
subagents:
# Code Reviewer Subagent
code-reviewer:
enabled: true
path: src/harness/subagents/code-reviewer
triggers:
keywords:
- "review code"
- "check code"
- "code review"
- "analyze code"
- "audit code"
patterns:
- "review.*code"
- "check.*strategy"
- "analyze.*function"
priority: high
timeout: 60000 # 1 minute
# Risk Analyzer Subagent (TODO: implement)
risk-analyzer:
enabled: false
path: src/harness/subagents/risk-analyzer
triggers:
keywords:
- "risk"
- "exposure"
- "drawdown"
- "volatility"
patterns:
- "assess.*risk"
- "calculate.*risk"
- "risk.*analysis"
priority: high
timeout: 30000
# Market Analyst Subagent (TODO: implement)
market-analyst:
enabled: false
path: src/harness/subagents/market-analyst
triggers:
keywords:
- "market"
- "trend"
- "technical analysis"
- "price action"
patterns:
- "analyze.*market"
- "market.*conditions"
priority: medium
timeout: 45000
# Routing strategy
routing:
# Check triggers in priority order
strategy: priority
# Fallback to main agent if no subagent matches
fallback: main_agent
# Allow chaining (one subagent can invoke another)
allowChaining: true
maxChainDepth: 3
# Subagent memory settings
memory:
# Reload memory files on every request (dev mode)
hotReload: false
# Cache memory files in production
cacheMemory: true
cacheTTL: 3600000 # 1 hour
# Parallel execution
parallel:
# Allow multiple subagents to run in parallel
enabled: true
# Max concurrent subagents
maxConcurrent: 3
# Combine results strategy
combineStrategy: merge # merge | first | best
# Monitoring
monitoring:
# Log subagent performance
logPerformance: true
# Track usage by subagent
trackUsage: true
# Alert on slow subagents
alertThreshold: 30000 # 30 seconds

View File

@@ -0,0 +1,17 @@
// Main harness exports
// Memory
export * from './memory/index.js';
// Skills
export * from './skills/index.js';
// Subagents
export * from './subagents/index.js';
// Workflows
export * from './workflows/index.js';
// Re-export agent harness (for backward compatibility)
export { AgentHarness, type AgentHarnessConfig } from './agent-harness.js';
export { MCPClientConnector } from './mcp-client.js';

View File

@@ -1,5 +1,5 @@
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
import type { FastifyBaseLogger } from 'fastify';
export interface MCPClientConfig {
@@ -44,10 +44,9 @@ export class MCPClientConnector {
},
{
capabilities: {
tools: {},
resources: {},
sampling: {},
},
}
} as any
);
// TODO: Replace with HTTP transport when user containers are ready

View File

@@ -0,0 +1,236 @@
import { BaseCheckpointSaver } from '@langchain/langgraph';
import type { Checkpoint, CheckpointMetadata, CheckpointTuple } from '@langchain/langgraph';
import type { RunnableConfig } from '@langchain/core/runnables';
import type Redis from 'ioredis';
import type { FastifyBaseLogger } from 'fastify';
/**
* Tiered checkpoint saver: Redis (hot) + Iceberg (cold)
*
* Hot path: Active checkpoints stored in Redis with TTL
* Cold path: Durable storage in Iceberg for long-term retention
*
* Based on architecture discussion: Redis for active sessions,
* Iceberg for durable storage with time-travel capabilities.
*/
export class TieredCheckpointSaver extends BaseCheckpointSaver<number> {
private readonly HOT_TTL_SECONDS = 3600; // 1 hour
private readonly KEY_PREFIX = 'ckpt:';
constructor(
private redis: Redis,
private logger: FastifyBaseLogger,
// Note: Iceberg writes are handled via Kafka + Flink for consistency
// Reads can be implemented when needed using IcebergClient
// private iceberg?: IcebergClient
) {
super();
}
/**
* Get checkpoint from Redis (hot) or Iceberg (cold)
*/
async getTuple(config: RunnableConfig): Promise<CheckpointTuple | undefined> {
const threadId = config.configurable?.thread_id as string;
if (!threadId) {
throw new Error('thread_id required in config.configurable');
}
const checkpointId = config.configurable?.checkpoint_id as string | undefined;
this.logger.debug({ threadId, checkpointId }, 'Getting checkpoint');
// Hot path: Try Redis first
const key = this.getRedisKey(threadId, checkpointId);
const cached = await this.redis.get(key);
if (cached) {
this.logger.debug({ threadId, checkpointId }, 'Checkpoint found in Redis (hot)');
return this.deserialize(cached);
}
// Cold path: Load from Iceberg (if needed)
// Note: Implement when Iceberg query is required
// Can use IcebergClient to query gateway.checkpoints table
// or set up a Kafka topic for checkpoint persistence
this.logger.debug({ threadId, checkpointId }, 'Checkpoint not in Redis, Iceberg cold storage not yet implemented');
return undefined;
}
/**
* Save checkpoint to Redis (hot) and async flush to Iceberg (cold)
*/
async put(
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata
): Promise<RunnableConfig> {
const threadId = config.configurable?.thread_id as string;
if (!threadId) {
throw new Error('thread_id required in config.configurable');
}
this.logger.debug({ threadId, checkpointId: checkpoint.id }, 'Saving checkpoint');
const serialized = this.serialize(checkpoint, metadata);
// Hot: Redis with TTL
const key = this.getRedisKey(threadId, checkpoint.id);
await this.redis.set(key, serialized, 'EX', this.HOT_TTL_SECONDS);
// Also store latest checkpoint pointer
const latestKey = this.getRedisKey(threadId);
await this.redis.set(latestKey, serialized, 'EX', this.HOT_TTL_SECONDS);
// Cold: Async flush to Iceberg (fire and forget)
this.flushToIceberg(threadId, checkpoint, metadata).catch((error) => {
this.logger.error({ error, threadId }, 'Failed to flush checkpoint to Iceberg');
});
return {
configurable: {
...config.configurable,
thread_id: threadId,
checkpoint_id: checkpoint.id,
},
};
}
/**
* List all checkpoints for a thread
*/
async *list(
config: RunnableConfig
): AsyncGenerator<CheckpointTuple> {
const threadId = config.configurable?.thread_id as string;
if (!threadId) {
throw new Error('thread_id required in config.configurable');
}
// Try to get from Redis first
const pattern = `${this.KEY_PREFIX}${threadId}:*`;
const keys = await this.redis.keys(pattern);
for (const key of keys) {
const data = await this.redis.get(key);
if (data) {
const tuple = this.deserialize(data);
if (tuple) {
yield tuple;
}
}
}
// TODO: Also scan Iceberg for historical checkpoints
}
/**
* Delete thread (for GDPR compliance)
*/
async deleteThread(threadId: string): Promise<void> {
this.logger.info({ threadId }, 'Deleting thread');
const pattern = `${this.KEY_PREFIX}${threadId}*`;
const keys = await this.redis.keys(pattern);
if (keys.length > 0) {
await this.redis.del(...keys);
}
// TODO: Also delete from Iceberg
// await this.deleteFromIceberg(threadId);
}
/**
* Put writes (required by BaseCheckpointSaver)
*/
async putWrites(
config: RunnableConfig,
writes: [string, unknown][],
taskId: string
): Promise<void> {
// For this simple implementation, we just log writes
// In a full implementation, you'd store pending writes separately
const threadId = config.configurable?.thread_id;
this.logger.debug({ threadId, taskId, writes }, 'Put writes called');
}
/**
* Generate Redis key for checkpoint
*/
private getRedisKey(threadId: string, checkpointId?: string): string {
if (checkpointId) {
return `${this.KEY_PREFIX}${threadId}:${checkpointId}`;
}
return `${this.KEY_PREFIX}${threadId}:latest`;
}
/**
* Serialize checkpoint to JSON string
*/
private serialize(checkpoint: Checkpoint, metadata: CheckpointMetadata): string {
return JSON.stringify({
checkpoint,
metadata,
savedAt: new Date().toISOString(),
});
}
/**
* Deserialize checkpoint from JSON string
*/
private deserialize(data: string): CheckpointTuple | undefined {
try {
const parsed = JSON.parse(data);
return {
config: {
configurable: {
thread_id: parsed.checkpoint.id,
checkpoint_id: parsed.checkpoint.id,
},
},
checkpoint: parsed.checkpoint,
metadata: parsed.metadata,
parentConfig: undefined,
};
} catch (error) {
this.logger.error({ error }, 'Failed to deserialize checkpoint');
return undefined;
}
}
/**
* Async flush checkpoint to Iceberg for durable storage
*
* Note: For production, send to Kafka topic that Flink consumes:
* - Topic: gateway_checkpoints
* - Flink job writes to gateway.checkpoints Iceberg table
* - Ensures consistent write pattern with rest of system
*/
private async flushToIceberg(
_threadId: string,
checkpoint: Checkpoint,
_metadata: CheckpointMetadata
): Promise<void> {
// TODO: Send to Kafka topic for Flink processing
// const kafkaMessage = {
// user_id: metadata.userId || '0',
// session_id: threadId,
// checkpoint_id: checkpoint.id,
// checkpoint_data: JSON.stringify(checkpoint),
// metadata: JSON.stringify(metadata),
// timestamp: Date.now() * 1000, // microseconds
// };
// await this.kafkaProducer.send({
// topic: 'gateway_checkpoints',
// messages: [{ value: JSON.stringify(kafkaMessage) }]
// });
this.logger.debug({ threadId: _threadId, checkpointId: checkpoint.id },
'Checkpoint flush to Iceberg (via Kafka) not yet implemented');
}
}

View File

@@ -0,0 +1,252 @@
import type Redis from 'ioredis';
import type { FastifyBaseLogger } from 'fastify';
import type { BaseMessage } from '@langchain/core/messages';
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
/**
* Message record for storage
*/
export interface StoredMessage {
id: string;
userId: string;
sessionId: string;
role: 'user' | 'assistant' | 'system';
content: string;
timestamp: number; // microseconds (Iceberg convention)
metadata?: Record<string, unknown>;
}
/**
* Conversation store: Redis (hot) + Iceberg (cold)
*
* Hot path: Recent messages in Redis for fast access
* Cold path: Full history in Iceberg for durability and analytics
*
* Architecture:
* - Redis stores last N messages per session with TTL
* - Iceberg stores all messages partitioned by user_id, session_id
* - Supports time-travel queries for debugging and analysis
*/
export class ConversationStore {
private readonly HOT_MESSAGE_LIMIT = 50; // Keep last 50 messages in Redis
private readonly HOT_TTL_SECONDS = 3600; // 1 hour
constructor(
private redis: Redis,
private logger: FastifyBaseLogger
// TODO: Add Iceberg catalog
// private iceberg: IcebergCatalog
) {}
/**
* Save a message to both Redis and Iceberg
*/
async saveMessage(
userId: string,
sessionId: string,
role: 'user' | 'assistant' | 'system',
content: string,
metadata?: Record<string, unknown>
): Promise<void> {
const message: StoredMessage = {
id: `${userId}:${sessionId}:${Date.now()}`,
userId,
sessionId,
role,
content,
timestamp: Date.now() * 1000, // Convert to microseconds
metadata,
};
this.logger.debug({ userId, sessionId, role }, 'Saving message');
// Hot: Add to Redis list (LPUSH for newest first)
const key = this.getRedisKey(userId, sessionId);
await this.redis.lpush(key, JSON.stringify(message));
// Trim to keep only recent messages
await this.redis.ltrim(key, 0, this.HOT_MESSAGE_LIMIT - 1);
// Set TTL
await this.redis.expire(key, this.HOT_TTL_SECONDS);
// Cold: Async append to Iceberg
this.appendToIceberg(message).catch((error) => {
this.logger.error({ error, userId, sessionId }, 'Failed to append message to Iceberg');
});
}
/**
* Get recent messages from Redis (hot path)
*/
async getRecentMessages(
userId: string,
sessionId: string,
limit: number = 20
): Promise<StoredMessage[]> {
const key = this.getRedisKey(userId, sessionId);
const messages = await this.redis.lrange(key, 0, limit - 1);
return messages
.map((msg) => {
try {
return JSON.parse(msg) as StoredMessage;
} catch (error) {
this.logger.error({ error, message: msg }, 'Failed to parse message');
return null;
}
})
.filter((msg): msg is StoredMessage => msg !== null)
.reverse(); // Oldest first
}
/**
* Get full conversation history from Iceberg (cold path)
*/
async getFullHistory(
userId: string,
sessionId: string,
timeRange?: { start: number; end: number }
): Promise<StoredMessage[]> {
this.logger.debug({ userId, sessionId, timeRange }, 'Loading full history from Iceberg');
// TODO: Implement Iceberg query
// const table = this.iceberg.loadTable('gateway.conversations');
// const filters = [
// EqualTo('user_id', userId),
// EqualTo('session_id', sessionId),
// ];
//
// if (timeRange) {
// filters.push(GreaterThanOrEqual('timestamp', timeRange.start));
// filters.push(LessThanOrEqual('timestamp', timeRange.end));
// }
//
// const df = await table.scan({
// row_filter: And(...filters)
// }).to_pandas();
//
// if (!df.empty) {
// return df.sort_values('timestamp').to_dict('records');
// }
// Fallback to Redis if Iceberg not available
return await this.getRecentMessages(userId, sessionId, 1000);
}
/**
* Convert stored messages to LangChain message format
*/
toLangChainMessages(messages: StoredMessage[]): BaseMessage[] {
return messages.map((msg) => {
switch (msg.role) {
case 'user':
return new HumanMessage(msg.content);
case 'assistant':
return new AIMessage(msg.content);
case 'system':
return new SystemMessage(msg.content);
default:
throw new Error(`Unknown role: ${msg.role}`);
}
});
}
/**
* Delete all messages for a session (Redis only, Iceberg handled separately)
*/
async deleteSession(userId: string, sessionId: string): Promise<void> {
this.logger.info({ userId, sessionId }, 'Deleting session from Redis');
const key = this.getRedisKey(userId, sessionId);
await this.redis.del(key);
}
/**
* Delete all messages for a user (GDPR compliance)
*/
async deleteUserData(userId: string): Promise<void> {
this.logger.info({ userId }, 'Deleting all user messages for GDPR compliance');
// Delete from Redis
const pattern = `conv:${userId}:*`;
const keys = await this.redis.keys(pattern);
if (keys.length > 0) {
await this.redis.del(...keys);
}
// Delete from Iceberg
// Note: For GDPR compliance, need to:
// 1. Send delete command via Kafka OR
// 2. Use Iceberg REST API to delete rows (if supported) OR
// 3. Coordinate with Flink job to handle deletes
//
// Iceberg delete flow:
// - Mark rows for deletion (equality delete files)
// - Run compaction to physically remove
// - Expire old snapshots
this.logger.info({ userId }, 'User messages deleted from Redis - Iceberg GDPR delete not yet implemented');
}
/**
* Get Redis key for conversation
*/
private getRedisKey(userId: string, sessionId: string): string {
return `conv:${userId}:${sessionId}`;
}
/**
* Append message to Iceberg for durable storage
*
* Note: For production, send to Kafka topic that Flink consumes:
* - Topic: gateway_conversations
* - Flink job writes to gateway.conversations Iceberg table
* - Ensures consistent write pattern with rest of system
*/
private async appendToIceberg(message: StoredMessage): Promise<void> {
// TODO: Send to Kafka topic for Flink processing
// const kafkaMessage = {
// id: message.id,
// user_id: message.userId,
// session_id: message.sessionId,
// role: message.role,
// content: message.content,
// metadata: JSON.stringify(message.metadata || {}),
// timestamp: message.timestamp,
// };
// await this.kafkaProducer.send({
// topic: 'gateway_conversations',
// messages: [{ value: JSON.stringify(kafkaMessage) }]
// });
this.logger.debug(
{ messageId: message.id, userId: message.userId, sessionId: message.sessionId },
'Message append to Iceberg (via Kafka) not yet implemented'
);
}
/**
* Get conversation statistics
*/
async getStats(userId: string, sessionId: string): Promise<{
messageCount: number;
firstMessage?: Date;
lastMessage?: Date;
}> {
const key = this.getRedisKey(userId, sessionId);
const count = await this.redis.llen(key);
if (count === 0) {
return { messageCount: 0 };
}
const messages = await this.getRecentMessages(userId, sessionId, count);
const timestamps = messages.map((m) => m.timestamp / 1000); // Convert to milliseconds
return {
messageCount: count,
firstMessage: new Date(Math.min(...timestamps)),
lastMessage: new Date(Math.max(...timestamps)),
};
}
}

View File

@@ -0,0 +1,356 @@
import { readdir, readFile } from 'fs/promises';
import { join, relative } from 'path';
import { createHash } from 'crypto';
import type { FastifyBaseLogger } from 'fastify';
import { RAGRetriever } from './rag-retriever.js';
import { EmbeddingService } from './embedding-service.js';
/**
* Document metadata stored with each chunk
*/
export interface DocumentMetadata {
document_id: string;
chunk_index: number;
content_hash: string;
last_updated: number;
tags: string[];
heading?: string;
file_path: string;
}
/**
* Document chunk with content and metadata
*/
export interface DocumentChunk {
content: string;
metadata: DocumentMetadata;
}
/**
* Document loader configuration
*/
export interface DocumentLoaderConfig {
knowledgeDir: string;
maxChunkSize?: number; // in tokens (approximate by chars)
chunkOverlap?: number; // overlap between chunks
}
/**
* Global knowledge document loader
*
* Loads markdown documents from a directory structure and stores them
* as global knowledge (user_id="0") in Qdrant for RAG retrieval.
*
* Features:
* - Intelligent chunking by markdown headers
* - Content hashing for change detection
* - Metadata extraction (tags, headings)
* - Automatic embedding generation
* - Incremental updates (only changed docs)
*
* Directory structure:
* gateway/knowledge/
* platform/
* trading/
* indicators/
* strategies/
*/
export class DocumentLoader {
private config: DocumentLoaderConfig;
private logger: FastifyBaseLogger;
private embeddings: EmbeddingService;
private rag: RAGRetriever;
private loadedDocs: Map<string, string> = new Map(); // path -> hash
constructor(
config: DocumentLoaderConfig,
embeddings: EmbeddingService,
rag: RAGRetriever,
logger: FastifyBaseLogger
) {
this.config = {
maxChunkSize: 4000, // ~1000 tokens
chunkOverlap: 200,
...config,
};
this.embeddings = embeddings;
this.rag = rag;
this.logger = logger;
}
/**
* Load all documents from knowledge directory
*/
async loadAll(): Promise<{ loaded: number; updated: number; skipped: number }> {
this.logger.info({ dir: this.config.knowledgeDir }, 'Loading knowledge documents');
const stats = { loaded: 0, updated: 0, skipped: 0 };
try {
const files = await this.findMarkdownFiles(this.config.knowledgeDir);
for (const filePath of files) {
const result = await this.loadDocument(filePath);
if (result === 'loaded') stats.loaded++;
else if (result === 'updated') stats.updated++;
else stats.skipped++;
}
this.logger.info(stats, 'Knowledge documents loaded');
return stats;
} catch (error) {
this.logger.error({ error }, 'Failed to load knowledge documents');
throw error;
}
}
/**
* Load a single document
*/
async loadDocument(filePath: string): Promise<'loaded' | 'updated' | 'skipped'> {
try {
// Read file content
const content = await readFile(filePath, 'utf-8');
const contentHash = this.hashContent(content);
// Check if document has changed
const relativePath = relative(this.config.knowledgeDir, filePath);
const existingHash = this.loadedDocs.get(relativePath);
if (existingHash === contentHash) {
this.logger.debug({ file: relativePath }, 'Document unchanged, skipping');
return 'skipped';
}
const isUpdate = !!existingHash;
// Parse and chunk document
const chunks = this.chunkDocument(content, relativePath);
this.logger.info(
{ file: relativePath, chunks: chunks.length, update: isUpdate },
'Processing document'
);
// Generate embeddings and store chunks
for (const chunk of chunks) {
const embedding = await this.embeddings.embed(chunk.content);
// Create unique ID for this chunk
const chunkId = `global:${chunk.metadata.document_id}:${chunk.metadata.chunk_index}`;
// Store in Qdrant as global knowledge
await this.rag.storeGlobalKnowledge(
chunkId,
chunk.content,
embedding,
{
...chunk.metadata,
type: 'knowledge_doc',
}
);
}
// Update loaded docs tracking
this.loadedDocs.set(relativePath, contentHash);
return isUpdate ? 'updated' : 'loaded';
} catch (error) {
this.logger.error({ error, file: filePath }, 'Failed to load document');
throw error;
}
}
/**
* Reload a specific document (for updates)
*/
async reloadDocument(filePath: string): Promise<void> {
this.logger.info({ file: filePath }, 'Reloading document');
await this.loadDocument(filePath);
}
/**
* Chunk document by markdown headers with smart splitting
*/
private chunkDocument(content: string, documentId: string): DocumentChunk[] {
const chunks: DocumentChunk[] = [];
const tags = this.extractTags(content);
const lastModified = Date.now();
// Split by headers (## or ###)
const sections = this.splitByHeaders(content);
let chunkIndex = 0;
for (const section of sections) {
// If section is too large, split it further
const subChunks = this.splitLargeSection(section.content);
for (const subContent of subChunks) {
if (subContent.trim().length === 0) continue;
chunks.push({
content: subContent,
metadata: {
document_id: documentId,
chunk_index: chunkIndex++,
content_hash: this.hashContent(content),
last_updated: lastModified,
tags,
heading: section.heading,
file_path: documentId,
},
});
}
}
return chunks;
}
/**
* Split document by markdown headers
*/
private splitByHeaders(content: string): Array<{ heading?: string; content: string }> {
const lines = content.split('\n');
const sections: Array<{ heading?: string; content: string }> = [];
let currentSection: string[] = [];
let currentHeading: string | undefined;
for (const line of lines) {
// Check for markdown header (##, ###, ####)
const headerMatch = line.match(/^(#{2,4})\s+(.+)$/);
if (headerMatch) {
// Save previous section
if (currentSection.length > 0) {
sections.push({
heading: currentHeading,
content: currentSection.join('\n'),
});
}
// Start new section
currentHeading = headerMatch[2].trim();
currentSection = [line];
} else {
currentSection.push(line);
}
}
// Add final section
if (currentSection.length > 0) {
sections.push({
heading: currentHeading,
content: currentSection.join('\n'),
});
}
return sections;
}
/**
* Split large sections into smaller chunks
*/
private splitLargeSection(content: string): string[] {
const maxSize = this.config.maxChunkSize!;
const overlap = this.config.chunkOverlap!;
if (content.length <= maxSize) {
return [content];
}
const chunks: string[] = [];
let start = 0;
while (start < content.length) {
const end = Math.min(start + maxSize, content.length);
let chunkEnd = end;
// Try to break at sentence boundary
if (end < content.length) {
const sentenceEnd = content.lastIndexOf('.', end);
const paragraphEnd = content.lastIndexOf('\n\n', end);
if (paragraphEnd > start + maxSize / 2) {
chunkEnd = paragraphEnd;
} else if (sentenceEnd > start + maxSize / 2) {
chunkEnd = sentenceEnd + 1;
}
}
chunks.push(content.substring(start, chunkEnd));
start = chunkEnd - overlap;
}
return chunks;
}
/**
* Extract tags from document (frontmatter or first heading)
*/
private extractTags(content: string): string[] {
const tags: string[] = [];
// Try to extract from YAML frontmatter
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
if (frontmatterMatch) {
const frontmatter = frontmatterMatch[1];
const tagsMatch = frontmatter.match(/tags:\s*\[([^\]]+)\]/);
if (tagsMatch) {
tags.push(...tagsMatch[1].split(',').map((t) => t.trim()));
}
}
// Extract from first heading
const headingMatch = content.match(/^#\s+(.+)$/m);
if (headingMatch) {
tags.push(headingMatch[1].toLowerCase().replace(/\s+/g, '-'));
}
return tags;
}
/**
* Hash content for change detection
*/
private hashContent(content: string): string {
return createHash('md5').update(content).digest('hex');
}
/**
* Recursively find all markdown files
*/
private async findMarkdownFiles(dir: string): Promise<string[]> {
const files: string[] = [];
try {
const entries = await readdir(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = join(dir, entry.name);
if (entry.isDirectory()) {
const subFiles = await this.findMarkdownFiles(fullPath);
files.push(...subFiles);
} else if (entry.isFile() && entry.name.endsWith('.md')) {
files.push(fullPath);
}
}
} catch (error) {
this.logger.warn({ error, dir }, 'Failed to read directory');
}
return files;
}
/**
* Get loaded document stats
*/
getStats(): { totalDocs: number; totalSize: number } {
return {
totalDocs: this.loadedDocs.size,
totalSize: Array.from(this.loadedDocs.values()).reduce((sum, hash) => sum + hash.length, 0),
};
}
}

View File

@@ -0,0 +1,270 @@
import type { FastifyBaseLogger } from 'fastify';
import { Ollama } from 'ollama';
/**
* Embedding provider configuration
*/
export interface EmbeddingConfig {
provider: 'ollama' | 'openai' | 'anthropic' | 'local' | 'voyage' | 'cohere' | 'none';
model?: string;
apiKey?: string;
dimensions?: number;
ollamaUrl?: string;
}
/**
* Embedding service for generating vectors from text
*
* Supports multiple providers:
* - Ollama (all-minilm, nomic-embed-text, mxbai-embed-large) - RECOMMENDED
* - OpenAI (text-embedding-3-small/large)
* - Voyage AI (voyage-2)
* - Cohere (embed-english-v3.0)
* - Local models (via transformers.js or Python sidecar)
* - None (for development without embeddings)
*
* Used by RAGRetriever to generate embeddings for storage and search.
*
* For production, use Ollama with all-minilm (90MB model, runs on CPU, ~100MB RAM).
* Ollama can run in-container or as a separate pod/sidecar.
*/
export class EmbeddingService {
private readonly model: string;
private readonly dimensions: number;
private ollama?: Ollama;
constructor(
private config: EmbeddingConfig,
private logger: FastifyBaseLogger
) {
// Set defaults based on provider
switch (config.provider) {
case 'ollama':
this.model = config.model || 'all-minilm';
this.dimensions = config.dimensions || 384;
this.ollama = new Ollama({
host: config.ollamaUrl || 'http://localhost:11434',
});
break;
case 'openai':
this.model = config.model || 'text-embedding-3-small';
this.dimensions = config.dimensions || 1536;
break;
case 'anthropic':
case 'voyage':
this.model = config.model || 'voyage-2';
this.dimensions = config.dimensions || 1024;
break;
case 'cohere':
this.model = config.model || 'embed-english-v3.0';
this.dimensions = config.dimensions || 1024;
break;
case 'local':
this.model = config.model || 'all-MiniLM-L6-v2';
this.dimensions = config.dimensions || 384;
break;
case 'none':
// No embeddings configured - will return zero vectors
this.model = 'none';
this.dimensions = config.dimensions || 1536;
this.logger.warn('Embedding service initialized with provider=none - RAG will not function properly');
break;
default:
throw new Error(`Unknown embedding provider: ${config.provider}`);
}
if (config.provider !== 'none') {
this.logger.info(
{ provider: config.provider, model: this.model, dimensions: this.dimensions },
'Initialized embedding service'
);
}
}
/**
* Generate embedding for a single text
*/
async embed(text: string): Promise<number[]> {
if (this.config.provider === 'none') {
// Return zero vector when no embeddings configured
return new Array(this.dimensions).fill(0);
}
this.logger.debug({ textLength: text.length, provider: this.config.provider }, 'Generating embedding');
try {
switch (this.config.provider) {
case 'ollama':
return await this.embedOllama(text);
case 'openai':
return await this.embedOpenAI(text);
case 'anthropic':
case 'voyage':
return await this.embedVoyage(text);
case 'cohere':
return await this.embedCohere(text);
case 'local':
return await this.embedLocal(text);
default:
throw new Error(`Unknown provider: ${this.config.provider}`);
}
} catch (error) {
this.logger.error({ error, provider: this.config.provider }, 'Failed to generate embedding');
// Return zero vector as fallback to prevent crashes
return new Array(this.dimensions).fill(0);
}
}
/**
* Generate embeddings for multiple texts (batch)
*/
async embedBatch(texts: string[]): Promise<number[][]> {
this.logger.debug({ count: texts.length, provider: this.config.provider }, 'Generating batch embeddings');
// Ollama supports native batch operations
if (this.config.provider === 'ollama' && this.ollama) {
try {
const response = await this.ollama.embed({
model: this.model,
input: texts,
});
return response.embeddings;
} catch (error) {
this.logger.error({ error }, 'Ollama batch embedding failed, falling back to sequential');
// Fall through to sequential processing
}
}
// Fallback: call embed() for each text sequentially
const embeddings = await Promise.all(texts.map((text) => this.embed(text)));
return embeddings;
}
/**
* Get embedding dimensions
*/
getDimensions(): number {
return this.dimensions;
}
/**
* Get model name
*/
getModel(): string {
return this.model;
}
/**
* Generate embedding using Ollama
*/
private async embedOllama(text: string): Promise<number[]> {
if (!this.ollama) {
this.logger.error('Ollama client not initialized');
return new Array(this.dimensions).fill(0);
}
try {
const response = await this.ollama.embed({
model: this.model,
input: text,
});
// Ollama returns single embedding for single input
return response.embeddings[0];
} catch (error) {
this.logger.error({ error }, 'Ollama embedding failed, returning zero vector');
return new Array(this.dimensions).fill(0);
}
}
/**
* Generate embedding using OpenAI API
*/
private async embedOpenAI(text: string): Promise<number[]> {
if (!this.config.apiKey) {
this.logger.warn('OpenAI API key not configured, returning zero vector');
return new Array(this.dimensions).fill(0);
}
try {
const response = await fetch('https://api.openai.com/v1/embeddings', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`,
},
body: JSON.stringify({
model: this.model,
input: text,
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
}
const data = await response.json() as { data: Array<{ embedding: number[] }> };
return data.data[0].embedding;
} catch (error) {
this.logger.error({ error }, 'OpenAI embedding failed, returning zero vector');
return new Array(this.dimensions).fill(0);
}
}
/**
* Generate embedding using Voyage AI API (Anthropic partnership)
*/
private async embedVoyage(_text: string): Promise<number[]> {
// TODO: Implement Voyage AI embedding when API key available
// API endpoint: https://api.voyageai.com/v1/embeddings
this.logger.warn('Voyage AI embedding not yet implemented, returning zero vector');
return new Array(this.dimensions).fill(0);
}
/**
* Generate embedding using Cohere API
*/
private async embedCohere(_text: string): Promise<number[]> {
// TODO: Implement Cohere embedding when API key available
// API endpoint: https://api.cohere.ai/v1/embed
this.logger.warn('Cohere embedding not yet implemented, returning zero vector');
return new Array(this.dimensions).fill(0);
}
/**
* Generate embedding using local model
*/
private async embedLocal(_text: string): Promise<number[]> {
// TODO: Implement local embedding (via transformers.js or Python sidecar)
// Options:
// 1. transformers.js (pure JS/WebAssembly) - slower but self-contained
// 2. Python sidecar service running sentence-transformers - faster
// 3. ONNX runtime with pre-exported models - good balance
this.logger.warn('Local embedding not implemented, returning zero vector');
return new Array(this.dimensions).fill(0);
}
/**
* Calculate cosine similarity between two embeddings
*/
static cosineSimilarity(a: number[], b: number[]): number {
if (a.length !== b.length) {
throw new Error('Embeddings must have same dimensions');
}
let dotProduct = 0;
let normA = 0;
let normB = 0;
for (let i = 0; i < a.length; i++) {
dotProduct += a[i] * b[i];
normA += a[i] * a[i];
normB += b[i] * b[i];
}
return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
}
}

View File

@@ -0,0 +1,20 @@
// Memory layer exports
export { TieredCheckpointSaver } from './checkpoint-saver.js';
export { ConversationStore } from './conversation-store.js';
export { EmbeddingService } from './embedding-service.js';
export { RAGRetriever } from './rag-retriever.js';
export { DocumentLoader } from './document-loader.js';
export {
createUserContext,
touchContext,
isContextExpired,
serializeContext,
deserializeContext,
getDefaultCapabilities,
type UserContext,
type ActiveChannel,
type ChannelCapabilities,
type WorkspaceContext,
type MemoryChunk,
} from './session-context.js';

View File

@@ -0,0 +1,210 @@
import type { FastifyBaseLogger } from 'fastify';
import { QdrantClient } from '../../clients/qdrant-client.js';
/**
* Vector point with metadata for Qdrant
*/
export interface VectorPoint {
id: string;
vector: number[];
payload: {
user_id: string;
session_id: string;
content: string;
role: 'user' | 'assistant' | 'system';
timestamp: number;
[key: string]: unknown;
};
}
/**
* Search result from Qdrant
*/
export interface SearchResult {
id: string;
score: number;
payload: VectorPoint['payload'];
}
/**
* Qdrant client configuration
*/
export interface QdrantConfig {
url: string;
apiKey?: string;
collectionName?: string;
}
/**
* RAG retriever using Qdrant for vector similarity search
*
* Features:
* - **Global namespace** (user_id="0") for platform knowledge
* - **User-specific namespaces** for personal memories
* - **Queries join both** global and user memories
* - Semantic search across conversation history
* - Context retrieval for agent prompts
* - User preference and pattern learning
*
* Architecture: Gateway-side vector store, user_id indexed for GDPR compliance
*/
export class RAGRetriever {
private qdrant: QdrantClient;
constructor(
config: QdrantConfig,
private logger: FastifyBaseLogger,
vectorDimension: number = 1536
) {
this.qdrant = new QdrantClient(config, logger, vectorDimension);
}
/**
* Initialize Qdrant collection with proper schema
*/
async initialize(): Promise<void> {
await this.qdrant.initialize();
}
/**
* Store conversation message as vector
*/
async storeMessage(
userId: string,
sessionId: string,
role: 'user' | 'assistant' | 'system',
content: string,
embedding: number[],
metadata?: Record<string, unknown>
): Promise<void> {
const id = `${userId}:${sessionId}:${Date.now()}`;
const payload = {
user_id: userId,
session_id: sessionId,
content,
role,
timestamp: Date.now(),
...metadata,
};
this.logger.debug(
{ userId, sessionId, role, contentLength: content.length },
'Storing message vector'
);
await this.qdrant.upsertPoint(id, embedding, payload);
}
/**
* Store global platform knowledge (user_id = "0")
*/
async storeGlobalKnowledge(
id: string,
content: string,
embedding: number[],
metadata?: Record<string, unknown>
): Promise<void> {
this.logger.debug({ id, contentLength: content.length }, 'Storing global knowledge');
await this.qdrant.storeGlobalKnowledge(id, embedding, {
session_id: 'global',
content,
role: 'system',
timestamp: Date.now(),
...metadata,
});
}
/**
* Search for relevant memories using vector similarity
* Queries BOTH global (user_id="0") and user-specific memories
*/
async search(
userId: string,
queryEmbedding: number[],
options?: {
limit?: number;
sessionId?: string;
minScore?: number;
timeRange?: { start: number; end: number };
}
): Promise<SearchResult[]> {
const limit = options?.limit || 5;
const minScore = options?.minScore || 0.7;
this.logger.debug(
{ userId, limit, sessionId: options?.sessionId },
'Searching for relevant memories (global + user)'
);
// Qdrant client handles the "should" logic: user_id = userId OR user_id = "0"
const results = await this.qdrant.search(userId, queryEmbedding, {
limit,
scoreThreshold: minScore,
sessionId: options?.sessionId,
timeRange: options?.timeRange,
});
return results.map(r => ({
id: r.id,
score: r.score,
payload: r.payload as VectorPoint['payload'],
}));
}
/**
* Get recent conversation history for context
*/
async getRecentHistory(
userId: string,
sessionId: string,
limit: number = 10
): Promise<SearchResult[]> {
this.logger.debug({ userId, sessionId, limit }, 'Getting recent conversation history');
const result = await this.qdrant.scroll(userId, {
sessionId,
limit,
});
return result.points.map(p => ({
id: p.id,
score: 1.0, // Not a search result, so score is 1.0
payload: p.payload as VectorPoint['payload'],
}));
}
/**
* Delete all vectors for a user (GDPR compliance)
*/
async deleteUserData(userId: string): Promise<void> {
this.logger.info({ userId }, 'Deleting all user vectors for GDPR compliance');
await this.qdrant.deleteUserData(userId);
}
/**
* Delete all vectors for a session
*/
async deleteSession(userId: string, sessionId: string): Promise<void> {
this.logger.info({ userId, sessionId }, 'Deleting session vectors');
await this.qdrant.deleteSession(userId, sessionId);
}
/**
* Get collection statistics
*/
async getStats(): Promise<{
vectorCount: number;
indexedCount: number;
collectionSize: number;
}> {
const info = await this.qdrant.getCollectionInfo();
return {
vectorCount: info.vectorsCount,
indexedCount: info.indexedVectorsCount,
collectionSize: info.pointsCount,
};
}
}

View File

@@ -0,0 +1,226 @@
import type { UserLicense, ChannelType } from '../../types/user.js';
import type { BaseMessage } from '@langchain/core/messages';
/**
* Channel capabilities (what the channel supports)
*/
export interface ChannelCapabilities {
supportsMarkdown: boolean;
supportsImages: boolean;
supportsButtons: boolean;
supportsVoice: boolean;
supportsFiles: boolean;
maxMessageLength: number;
}
/**
* Active channel information for multi-channel routing
*/
export interface ActiveChannel {
type: ChannelType;
channelUserId: string; // Platform-specific ID (telegram_id, discord_id, etc)
capabilities: ChannelCapabilities;
metadata?: Record<string, unknown>;
}
/**
* Workspace state (current user context)
*/
export interface WorkspaceContext {
activeIndicators: string[];
activeStrategies: string[];
watchlist: string[];
recentQueries: string[];
preferences: Record<string, unknown>;
}
/**
* Memory chunk from RAG retrieval
*/
export interface MemoryChunk {
id: string;
content: string;
role: 'user' | 'assistant' | 'system';
timestamp: number;
relevanceScore: number;
metadata?: Record<string, unknown>;
}
/**
* Enhanced user context for agent harness
*
* Contains all necessary context for an agent session:
* - User identity and license
* - Active channel info (for multi-channel support)
* - Conversation state and history
* - RAG-retrieved relevant memories
* - Workspace state
*
* This object is passed to all agent nodes and tools.
*/
export interface UserContext {
// Identity
userId: string;
sessionId: string;
license: UserLicense;
// Channel context (for multi-channel routing)
activeChannel: ActiveChannel;
// Conversation state
conversationHistory: BaseMessage[];
currentMessage?: string;
// RAG context
relevantMemories: MemoryChunk[];
// Workspace state
workspaceState: WorkspaceContext;
// Metadata
createdAt: Date;
lastActivity: Date;
}
/**
* Get default channel capabilities based on type
*/
export function getDefaultCapabilities(channelType: ChannelType): ChannelCapabilities {
switch (channelType) {
case 'websocket':
return {
supportsMarkdown: true,
supportsImages: true,
supportsButtons: true,
supportsVoice: false,
supportsFiles: true,
maxMessageLength: 100000,
};
case 'telegram':
return {
supportsMarkdown: true,
supportsImages: true,
supportsButtons: true,
supportsVoice: true,
supportsFiles: true,
maxMessageLength: 4096,
};
case 'slack':
return {
supportsMarkdown: true,
supportsImages: true,
supportsButtons: true,
supportsVoice: false,
supportsFiles: true,
maxMessageLength: 40000,
};
case 'discord':
return {
supportsMarkdown: true,
supportsImages: true,
supportsButtons: true,
supportsVoice: true,
supportsFiles: true,
maxMessageLength: 2000,
};
default:
// Default fallback
return {
supportsMarkdown: false,
supportsImages: false,
supportsButtons: false,
supportsVoice: false,
supportsFiles: false,
maxMessageLength: 1000,
};
}
}
/**
* Create a new user context
*/
export function createUserContext(params: {
userId: string;
sessionId: string;
license: UserLicense;
channelType: ChannelType;
channelUserId: string;
channelCapabilities?: Partial<ChannelCapabilities>;
}): UserContext {
const defaultCapabilities = getDefaultCapabilities(params.channelType);
const capabilities: ChannelCapabilities = {
...defaultCapabilities,
...params.channelCapabilities,
};
return {
userId: params.userId,
sessionId: params.sessionId,
license: params.license,
activeChannel: {
type: params.channelType,
channelUserId: params.channelUserId,
capabilities,
},
conversationHistory: [],
relevantMemories: [],
workspaceState: {
activeIndicators: [],
activeStrategies: [],
watchlist: [],
recentQueries: [],
preferences: {},
},
createdAt: new Date(),
lastActivity: new Date(),
};
}
/**
* Update last activity timestamp
*/
export function touchContext(context: UserContext): UserContext {
return {
...context,
lastActivity: new Date(),
};
}
/**
* Check if context has expired (for TTL management)
*/
export function isContextExpired(context: UserContext, ttlSeconds: number): boolean {
const now = Date.now();
const lastActivity = context.lastActivity.getTime();
return (now - lastActivity) / 1000 > ttlSeconds;
}
/**
* Serialize context for Redis storage
*/
export function serializeContext(context: UserContext): string {
return JSON.stringify({
...context,
createdAt: context.createdAt.toISOString(),
lastActivity: context.lastActivity.toISOString(),
// Don't serialize conversation history (too large, use checkpoint instead)
conversationHistory: undefined,
});
}
/**
* Deserialize context from Redis storage
*/
export function deserializeContext(data: string): Partial<UserContext> {
const parsed = JSON.parse(data);
return {
...parsed,
createdAt: new Date(parsed.createdAt),
lastActivity: new Date(parsed.lastActivity),
conversationHistory: [], // Will be loaded from checkpoint
};
}

View File

@@ -0,0 +1,146 @@
# Skills
Skills are individual capabilities that the agent can use to accomplish tasks. Each skill is a self-contained unit with:
- A markdown definition file (`*.skill.md`)
- A TypeScript implementation extending `BaseSkill`
- Clear input/output contracts
- Parameter validation
- Error handling
## Skill Structure
```
skills/
├── base-skill.ts # Base class
├── {skill-name}.skill.md # Definition
├── {skill-name}.ts # Implementation
└── README.md # This file
```
## Creating a New Skill
### 1. Create the Definition File
Create `{skill-name}.skill.md`:
```markdown
# My Skill
**Version:** 1.0.0
**Author:** Your Name
**Tags:** category1, category2
## Description
What does this skill do?
## Inputs
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| param1 | string | Yes | What it does |
## Outputs
What does it return?
## Example Usage
Show code example
```
### 2. Create the Implementation
Create `{skill-name}.ts`:
```typescript
import { BaseSkill, SkillInput, SkillResult, SkillMetadata } from './base-skill.js';
export class MySkill extends BaseSkill {
getMetadata(): SkillMetadata {
return {
name: 'my-skill',
description: 'What it does',
version: '1.0.0',
};
}
getParametersSchema(): Record<string, unknown> {
return {
type: 'object',
required: ['param1'],
properties: {
param1: { type: 'string' },
},
};
}
validateInput(parameters: Record<string, unknown>): boolean {
return typeof parameters.param1 === 'string';
}
async execute(input: SkillInput): Promise<SkillResult> {
this.logStart(input);
try {
// Your implementation here
const result = this.success({ data: 'result' });
this.logEnd(result);
return result;
} catch (error) {
return this.error(error as Error);
}
}
}
```
### 3. Register the Skill
Add to `index.ts`:
```typescript
export { MySkill } from './my-skill.js';
```
## Using Skills in Workflows
Skills can be used in LangGraph workflows:
```typescript
import { MarketAnalysisSkill } from '../skills/market-analysis.js';
const analyzeNode = async (state) => {
const skill = new MarketAnalysisSkill(logger, model);
const result = await skill.execute({
context: state.userContext,
parameters: {
ticker: state.ticker,
period: '4h',
},
});
return {
analysis: result.data,
};
};
```
## Best Practices
1. **Single Responsibility**: Each skill should do one thing well
2. **Validation**: Always validate inputs thoroughly
3. **Error Handling**: Use try/catch and return meaningful errors
4. **Logging**: Use `logStart()` and `logEnd()` helpers
5. **Documentation**: Keep the `.skill.md` file up to date
6. **Testing**: Write unit tests for skill logic
7. **Idempotency**: Skills should be safe to retry
## Available Skills
- **market-analysis**: Analyze market conditions and trends
- *(Add more as you build them)*
## Skill Categories
- **Market Data**: Query and analyze market information
- **Trading**: Execute trades, manage positions
- **Analysis**: Technical and fundamental analysis
- **Risk**: Risk assessment and management
- **Utilities**: Helper functions and utilities

View File

@@ -0,0 +1,128 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
import type { UserContext } from '../memory/session-context.js';
/**
* Skill metadata
*/
export interface SkillMetadata {
name: string;
description: string;
version: string;
author?: string;
tags?: string[];
}
/**
* Skill input parameters
*/
export interface SkillInput {
context: UserContext;
parameters: Record<string, unknown>;
}
/**
* Skill execution result
*/
export interface SkillResult {
success: boolean;
data?: unknown;
error?: string;
metadata?: Record<string, unknown>;
}
/**
* Base skill interface
*
* Skills are individual capabilities that the agent can use.
* Each skill is defined by:
* - A markdown file (*.skill.md) describing purpose, inputs, outputs
* - A TypeScript implementation extending BaseSkill
*
* Skills can use:
* - LLM calls for reasoning
* - User's MCP server tools
* - Platform tools (market data, charts, etc.)
*/
export abstract class BaseSkill {
protected logger: FastifyBaseLogger;
protected model?: BaseChatModel;
constructor(logger: FastifyBaseLogger, model?: BaseChatModel) {
this.logger = logger;
this.model = model;
}
/**
* Get skill metadata
*/
abstract getMetadata(): SkillMetadata;
/**
* Validate input parameters
*/
abstract validateInput(parameters: Record<string, unknown>): boolean;
/**
* Execute the skill
*/
abstract execute(input: SkillInput): Promise<SkillResult>;
/**
* Get required parameters schema (JSON Schema format)
*/
abstract getParametersSchema(): Record<string, unknown>;
/**
* Helper: Log skill execution start
*/
protected logStart(input: SkillInput): void {
const metadata = this.getMetadata();
this.logger.info(
{
skill: metadata.name,
userId: input.context.userId,
sessionId: input.context.sessionId,
parameters: input.parameters,
},
'Starting skill execution'
);
}
/**
* Helper: Log skill execution end
*/
protected logEnd(result: SkillResult): void {
const metadata = this.getMetadata();
this.logger.info(
{
skill: metadata.name,
success: result.success,
error: result.error,
},
'Skill execution completed'
);
}
/**
* Helper: Create success result
*/
protected success(data: unknown, metadata?: Record<string, unknown>): SkillResult {
return {
success: true,
data,
metadata,
};
}
/**
* Helper: Create error result
*/
protected error(error: string | Error, metadata?: Record<string, unknown>): SkillResult {
return {
success: false,
error: error instanceof Error ? error.message : error,
metadata,
};
}
}

View File

@@ -0,0 +1,10 @@
// Skills exports
export {
BaseSkill,
type SkillMetadata,
type SkillInput,
type SkillResult,
} from './base-skill.js';
export { MarketAnalysisSkill } from './market-analysis.js';

View File

@@ -0,0 +1,78 @@
# Market Analysis Skill
**Version:** 1.0.0
**Author:** Dexorder AI Platform
**Tags:** market-data, analysis, trading
## Description
Analyzes market conditions for a given ticker and timeframe. Provides insights on:
- Price trends and patterns
- Volume analysis
- Support and resistance levels
- Market sentiment indicators
## Inputs
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| `ticker` | string | Yes | Market identifier (e.g., "BINANCE:BTC/USDT") |
| `period` | string | Yes | Analysis period ("1h", "4h", "1d", "1w") |
| `startTime` | number | No | Start timestamp (microseconds), defaults to 7 days ago |
| `endTime` | number | No | End timestamp (microseconds), defaults to now |
| `indicators` | string[] | No | Additional indicators to include (e.g., ["RSI", "MACD"]) |
## Outputs
```typescript
{
success: true,
data: {
ticker: string,
period: string,
timeRange: { start: number, end: number },
trend: "bullish" | "bearish" | "neutral",
priceChange: number,
volumeProfile: {
average: number,
recent: number,
trend: "increasing" | "decreasing" | "stable"
},
supportLevels: number[],
resistanceLevels: number[],
indicators: Record<string, unknown>,
analysis: string // LLM-generated natural language analysis
}
}
```
## Example Usage
```typescript
const skill = new MarketAnalysisSkill(logger, model);
const result = await skill.execute({
context: userContext,
parameters: {
ticker: "BINANCE:BTC/USDT",
period: "4h",
indicators: ["RSI", "MACD"]
}
});
console.log(result.data.analysis);
// "Bitcoin is showing bullish momentum with RSI at 65 and MACD crossing above signal line..."
```
## Implementation Notes
- Queries OHLC data from Iceberg warehouse
- Uses LLM for natural language analysis
- Caches results for 5 minutes to reduce computation
- Falls back to reduced analysis if Iceberg unavailable
## Dependencies
- Iceberg client (market data)
- LLM model (analysis generation)
- User's MCP server (optional custom indicators)

View File

@@ -0,0 +1,198 @@
import { BaseSkill, type SkillInput, type SkillResult, type SkillMetadata } from './base-skill.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
/**
* Market analysis skill implementation
*
* See market-analysis.skill.md for full documentation
*/
export class MarketAnalysisSkill extends BaseSkill {
constructor(logger: FastifyBaseLogger, model?: BaseChatModel) {
super(logger, model);
}
getMetadata(): SkillMetadata {
return {
name: 'market-analysis',
description: 'Analyze market conditions for a given ticker and timeframe',
version: '1.0.0',
author: 'Dexorder AI Platform',
tags: ['market-data', 'analysis', 'trading'],
};
}
getParametersSchema(): Record<string, unknown> {
return {
type: 'object',
required: ['ticker', 'period'],
properties: {
ticker: {
type: 'string',
description: 'Market identifier (e.g., "BINANCE:BTC/USDT")',
},
period: {
type: 'string',
enum: ['1h', '4h', '1d', '1w'],
description: 'Analysis period',
},
startTime: {
type: 'number',
description: 'Start timestamp in microseconds',
},
endTime: {
type: 'number',
description: 'End timestamp in microseconds',
},
indicators: {
type: 'array',
items: { type: 'string' },
description: 'Additional indicators to include',
},
},
};
}
validateInput(parameters: Record<string, unknown>): boolean {
if (!parameters.ticker || typeof parameters.ticker !== 'string') {
return false;
}
if (!parameters.period || typeof parameters.period !== 'string') {
return false;
}
return true;
}
async execute(input: SkillInput): Promise<SkillResult> {
this.logStart(input);
if (!this.validateInput(input.parameters)) {
return this.error('Invalid parameters: ticker and period are required');
}
try {
const ticker = input.parameters.ticker as string;
const period = input.parameters.period as string;
const indicators = (input.parameters.indicators as string[]) || [];
// 1. Fetch OHLC data from Iceberg
// TODO: Implement Iceberg query
// const ohlcData = await this.fetchOHLCData(ticker, period, startTime, endTime);
const ohlcData = this.getMockOHLCData(); // Placeholder
// 2. Calculate technical indicators
const analysis = this.calculateAnalysis(ohlcData, indicators);
// 3. Generate natural language analysis using LLM
let narrativeAnalysis = '';
if (this.model) {
narrativeAnalysis = await this.generateNarrativeAnalysis(
ticker,
period,
analysis
);
}
const result = this.success({
ticker,
period,
timeRange: {
start: ohlcData.startTime,
end: ohlcData.endTime,
},
trend: analysis.trend,
priceChange: analysis.priceChange,
volumeProfile: analysis.volumeProfile,
supportLevels: analysis.supportLevels,
resistanceLevels: analysis.resistanceLevels,
indicators: analysis.indicators,
analysis: narrativeAnalysis,
});
this.logEnd(result);
return result;
} catch (error) {
const result = this.error(error as Error);
this.logEnd(result);
return result;
}
}
/**
* Calculate technical analysis from OHLC data
*/
private calculateAnalysis(
ohlcData: any,
_requestedIndicators: string[]
): any {
// TODO: Implement proper technical analysis
// This is a simplified placeholder
const priceChange = ((ohlcData.close - ohlcData.open) / ohlcData.open) * 100;
const trend = priceChange > 1 ? 'bullish' : priceChange < -1 ? 'bearish' : 'neutral';
return {
trend,
priceChange,
volumeProfile: {
average: ohlcData.avgVolume,
recent: ohlcData.currentVolume,
trend: ohlcData.currentVolume > ohlcData.avgVolume ? 'increasing' : 'decreasing',
},
supportLevels: [ohlcData.low * 0.98, ohlcData.low * 0.95],
resistanceLevels: [ohlcData.high * 1.02, ohlcData.high * 1.05],
indicators: {},
};
}
/**
* Generate natural language analysis using LLM
*/
private async generateNarrativeAnalysis(
ticker: string,
period: string,
analysis: any
): Promise<string> {
if (!this.model) {
return 'LLM not available for narrative analysis';
}
const systemPrompt = `You are a professional market analyst.
Provide concise, actionable market analysis based on technical data.
Focus on key insights and avoid jargon.`;
const userPrompt = `Analyze the following market data for ${ticker} (${period}):
Trend: ${analysis.trend}
Price Change: ${analysis.priceChange.toFixed(2)}%
Volume: ${analysis.volumeProfile.trend}
Support Levels: ${analysis.supportLevels.join(', ')}
Resistance Levels: ${analysis.resistanceLevels.join(', ')}
Provide a 2-3 sentence analysis suitable for a trading decision.`;
const response = await this.model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
return response.content as string;
}
/**
* Mock OHLC data (placeholder until Iceberg integration)
*/
private getMockOHLCData(): any {
return {
startTime: Date.now() - 7 * 24 * 60 * 60 * 1000,
endTime: Date.now(),
open: 50000,
high: 52000,
low: 49000,
close: 51500,
avgVolume: 1000000,
currentVolume: 1200000,
};
}
}

View File

@@ -0,0 +1,273 @@
# Subagents
Specialized agents with dedicated knowledge bases and system prompts.
## What are Subagents?
Subagents are focused AI agents designed for specific tasks. Unlike general-purpose agents, each subagent has:
- **Specialized knowledge**: Multi-file memory directory with domain-specific info
- **Custom system prompt**: Tailored instructions for the task
- **Model override**: Can use different models than the main agent
- **Capability tags**: Declare what they can do
## Directory Structure
```
subagents/
├── base-subagent.ts # Base class
├── {subagent-name}/
│ ├── config.yaml # Configuration
│ ├── system-prompt.md # System instructions
│ ├── memory/ # Knowledge base (multi-file)
│ │ ├── file1.md
│ │ ├── file2.md
│ │ └── file3.md
│ └── index.ts # Implementation
└── README.md # This file
```
## Creating a New Subagent
### 1. Create Directory Structure
```bash
mkdir -p subagents/my-subagent/memory
```
### 2. Create config.yaml
```yaml
name: my-subagent
description: What it does
# Model override (optional)
model: claude-3-5-sonnet-20241022
temperature: 0.3
maxTokens: 4096
# Memory files to load
memoryFiles:
- guidelines.md
- examples.md
- best-practices.md
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities
capabilities:
- capability1
- capability2
```
### 3. Write system-prompt.md
```markdown
# My Subagent System Prompt
You are an expert in [domain].
## Your Role
[What the subagent does]
## Approach
1. [Step 1]
2. [Step 2]
## Output Format
[How to structure responses]
```
### 4. Create Memory Files
Split knowledge into logical files:
```markdown
<!-- memory/guidelines.md -->
# Guidelines
## What to Check
- Thing 1
- Thing 2
## What to Avoid
- Anti-pattern 1
- Anti-pattern 2
```
### 5. Implement Subagent
```typescript
// index.ts
import { BaseSubagent, SubagentConfig, SubagentContext } from '../base-subagent.js';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { FastifyBaseLogger } from 'fastify';
export class MySubagent extends BaseSubagent {
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger) {
super(config, model, logger);
}
async execute(context: SubagentContext, input: string): Promise<string> {
this.logger.info({ subagent: this.getName() }, 'Executing subagent');
const messages = this.buildMessages(context, input);
const response = await this.model.invoke(messages);
return response.content as string;
}
}
// Factory function
export async function createMySubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string
): Promise<MySubagent> {
const { readFile } = await import('fs/promises');
const { join } = await import('path');
const yaml = await import('js-yaml');
const configPath = join(basePath, 'config.yaml');
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as SubagentConfig;
const subagent = new MySubagent(config, model, logger);
await subagent.initialize(basePath);
return subagent;
}
```
### 6. Export from index.ts
```typescript
// subagents/index.ts
export { MySubagent, createMySubagent } from './my-subagent/index.js';
```
## Using Subagents
### Direct Usage
```typescript
import { createMySubagent } from './harness/subagents';
const subagent = await createMySubagent(model, logger, basePath);
const result = await subagent.execute({ userContext }, 'input text');
```
### In Workflows
```typescript
const analyzeNode = async (state) => {
const result = await mySubagent.execute(
{ userContext: state.userContext },
state.input
);
return { analysis: result };
};
```
### With Routing
Add to `config/subagent-routing.yaml`:
```yaml
subagents:
my-subagent:
enabled: true
path: src/harness/subagents/my-subagent
triggers:
keywords:
- "keyword1"
- "keyword2"
patterns:
- "pattern.*regex"
priority: medium
timeout: 30000
```
## Multi-File Memory Benefits
### Why Split Memory?
1. **Organization**: Easier to maintain separate concerns
2. **Versioning**: Update specific files without touching others
3. **Collaboration**: Multiple people can work on different files
4. **Context Management**: LLM sees structured knowledge
### Example Split
For a code reviewer:
- `review-guidelines.md`: What to check
- `common-patterns.md`: Good/bad examples
- `best-practices.md`: Industry standards
All files are loaded and concatenated at initialization.
## Best Practices
### Memory Files
- **Be Specific**: Include concrete examples, not just theory
- **Use Markdown**: Tables, lists, code blocks for clarity
- **Keep Focused**: Each file should have a clear purpose
- **Update Regularly**: Improve based on real usage
### System Prompts
- **Define Role Clearly**: "You are a [specific role]"
- **Specify Output Format**: Show examples of expected output
- **Set Constraints**: What to do, what not to do
- **Give Context**: Why this subagent exists
### Configuration
- **Model Selection**: Use faster models for simple tasks
- **Temperature**: Lower (0.2-0.3) for precise work, higher (0.7-0.9) for creative
- **Capabilities**: Tag accurately for routing
## Available Subagents
### code-reviewer
Reviews trading strategy code for bugs, performance, and best practices.
**Capabilities:**
- `static_analysis`
- `performance_review`
- `security_audit`
- `code_quality`
**Memory:**
- Review guidelines
- Common patterns
- Best practices
### risk-analyzer (TODO)
Analyzes trading risk and exposure.
### market-analyst (TODO)
Provides market analysis and insights.
## Troubleshooting
### Memory Files Not Loading
- Check file paths in config.yaml
- Ensure files exist in memory/ directory
- Check file permissions
### Subagent Not Being Routed
- Verify triggers in subagent-routing.yaml
- Check priority (higher priority matches first)
- Ensure enabled: true
### Model Errors
- Verify API keys in environment
- Check model override is valid
- Ensure token limits not exceeded

View File

@@ -0,0 +1,179 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { SystemMessage, HumanMessage } from '@langchain/core/messages';
import type { FastifyBaseLogger } from 'fastify';
import type { UserContext } from '../memory/session-context.js';
import { readFile } from 'fs/promises';
import { join } from 'path';
/**
* Subagent configuration (loaded from config.yaml)
*/
export interface SubagentConfig {
name: string;
model?: string; // Override default model
temperature?: number;
maxTokens?: number;
memoryFiles: string[]; // Memory files to load from memory/ directory
capabilities: string[];
systemPromptFile?: string; // Path to system-prompt.md
}
/**
* Subagent execution context
*/
export interface SubagentContext {
userContext: UserContext;
conversationHistory?: BaseMessage[];
}
/**
* Base subagent class
*
* Subagents are specialized agents with:
* - Dedicated system prompts
* - Multi-file memory (guidelines, patterns, best practices)
* - Optional model override
* - Specific capabilities
*
* Structure:
* subagents/
* code-reviewer/
* config.yaml
* system-prompt.md
* memory/
* review-guidelines.md
* common-patterns.md
* index.ts
*/
export abstract class BaseSubagent {
protected logger: FastifyBaseLogger;
protected model: BaseChatModel;
protected config: SubagentConfig;
protected systemPrompt?: string;
protected memoryContext: string[] = [];
constructor(
config: SubagentConfig,
model: BaseChatModel,
logger: FastifyBaseLogger
) {
this.config = config;
this.model = model;
this.logger = logger;
}
/**
* Initialize subagent: load system prompt and memory files
*/
async initialize(basePath: string): Promise<void> {
this.logger.info({ subagent: this.config.name }, 'Initializing subagent');
// Load system prompt
if (this.config.systemPromptFile) {
const promptPath = join(basePath, this.config.systemPromptFile);
this.systemPrompt = await this.loadFile(promptPath);
}
// Load memory files
for (const memoryFile of this.config.memoryFiles) {
const memoryPath = join(basePath, 'memory', memoryFile);
const content = await this.loadFile(memoryPath);
if (content) {
this.memoryContext.push(`# ${memoryFile}\n\n${content}`);
}
}
this.logger.info(
{
subagent: this.config.name,
memoryFiles: this.config.memoryFiles.length,
systemPromptLoaded: !!this.systemPrompt,
},
'Subagent initialized'
);
}
/**
* Execute subagent with given input
*/
abstract execute(
context: SubagentContext,
input: string
): Promise<string>;
/**
* Stream execution (optional, default to non-streaming)
*/
async *stream(
context: SubagentContext,
input: string
): AsyncGenerator<string> {
const result = await this.execute(context, input);
yield result;
}
/**
* Build messages with system prompt and memory context
*/
protected buildMessages(
context: SubagentContext,
currentInput: string
): BaseMessage[] {
const messages: BaseMessage[] = [];
// System prompt with memory context
let systemContent = this.systemPrompt || `You are ${this.config.name}.`;
if (this.memoryContext.length > 0) {
systemContent += '\n\n# Knowledge Base\n\n';
systemContent += this.memoryContext.join('\n\n---\n\n');
}
messages.push(new SystemMessage(systemContent));
// Add conversation history if provided
if (context.conversationHistory && context.conversationHistory.length > 0) {
messages.push(...context.conversationHistory);
}
// Add current input
messages.push(new HumanMessage(currentInput));
return messages;
}
/**
* Load file content
*/
private async loadFile(path: string): Promise<string | undefined> {
try {
const content = await readFile(path, 'utf-8');
return content;
} catch (error) {
this.logger.warn({ error, path }, 'Failed to load file');
return undefined;
}
}
/**
* Get subagent name
*/
getName(): string {
return this.config.name;
}
/**
* Get subagent capabilities
*/
getCapabilities(): string[] {
return this.config.capabilities;
}
/**
* Check if subagent has a specific capability
*/
hasCapability(capability: string): boolean {
return this.config.capabilities.includes(capability);
}
}

View File

@@ -0,0 +1,26 @@
# Code Reviewer Subagent Configuration
name: code-reviewer
description: Reviews trading strategy code for bugs, performance issues, and best practices
# Model configuration (optional override)
model: claude-3-5-sonnet-20241022
temperature: 0.3
maxTokens: 4096
# Memory files to load from memory/ directory
memoryFiles:
- review-guidelines.md
- common-patterns.md
- best-practices.md
# System prompt file
systemPromptFile: system-prompt.md
# Capabilities this subagent provides
capabilities:
- static_analysis
- performance_review
- security_audit
- code_quality
- best_practices

View File

@@ -0,0 +1,91 @@
import { BaseSubagent, type SubagentConfig, type SubagentContext } from '../base-subagent.js';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
/**
* Code Reviewer Subagent
*
* Specialized agent for reviewing trading strategy code.
* Reviews for:
* - Logic errors and bugs
* - Performance issues
* - Security vulnerabilities
* - Trading best practices
* - Code quality
*
* Loads knowledge from multi-file memory:
* - review-guidelines.md: What to check for
* - common-patterns.md: Good and bad examples
* - best-practices.md: Industry standards
*/
export class CodeReviewerSubagent extends BaseSubagent {
constructor(config: SubagentConfig, model: BaseChatModel, logger: FastifyBaseLogger) {
super(config, model, logger);
}
/**
* Review code and provide structured feedback
*/
async execute(context: SubagentContext, code: string): Promise<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
codeLength: code.length,
},
'Reviewing code'
);
const messages = this.buildMessages(context, `Review the following trading strategy code:\n\n\`\`\`typescript\n${code}\n\`\`\``);
const response = await this.model.invoke(messages);
return response.content as string;
}
/**
* Stream code review
*/
async *stream(context: SubagentContext, code: string): AsyncGenerator<string> {
this.logger.info(
{
subagent: this.getName(),
userId: context.userContext.userId,
codeLength: code.length,
},
'Streaming code review'
);
const messages = this.buildMessages(context, `Review the following trading strategy code:\n\n\`\`\`typescript\n${code}\n\`\`\``);
const stream = await this.model.stream(messages);
for await (const chunk of stream) {
yield chunk.content as string;
}
}
}
/**
* Factory function to create and initialize CodeReviewerSubagent
*/
export async function createCodeReviewerSubagent(
model: BaseChatModel,
logger: FastifyBaseLogger,
basePath: string
): Promise<CodeReviewerSubagent> {
const { readFile } = await import('fs/promises');
const { join } = await import('path');
const yaml = await import('js-yaml');
// Load config
const configPath = join(basePath, 'config.yaml');
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as SubagentConfig;
// Create and initialize subagent
const subagent = new CodeReviewerSubagent(config, model, logger);
await subagent.initialize(basePath);
return subagent;
}

View File

@@ -0,0 +1,227 @@
# Trading Strategy Best Practices
## Code Organization
### Separation of Concerns
```typescript
// Good: Clear separation
class Strategy {
async analyze(data: MarketData): Promise<Signal> { }
}
class RiskManager {
validateSignal(signal: Signal): boolean { }
}
class ExecutionEngine {
async execute(signal: Signal): Promise<Order> { }
}
// Bad: Everything in one function
async function trade() {
// Analysis, risk, execution all mixed
}
```
### Configuration Management
```typescript
// Good: External configuration
interface StrategyConfig {
stopLossPercent: number;
takeProfitPercent: number;
maxPositionSize: number;
riskPerTrade: number;
}
const config = loadConfig('strategy.yaml');
// Bad: Hardcoded values scattered throughout
const stopLoss = price * 0.95; // What if you want to change this?
```
## Testing Considerations
### Testable Code
```typescript
// Good: Pure functions, easy to test
function calculateRSI(prices: number[], period: number = 14): number {
// Pure calculation, no side effects
return rsi;
}
// Bad: Hard to test
async function strategy() {
const data = await fetchLiveData(); // Can't control in tests
const signal = analyze(data);
await executeTrade(signal); // Side effects
}
```
### Mock-Friendly Design
```typescript
// Good: Dependency injection
class Strategy {
constructor(
private dataProvider: DataProvider,
private executor: OrderExecutor
) {}
async run() {
const data = await this.dataProvider.getData();
// ...
}
}
// In tests: inject mocks
const strategy = new Strategy(mockDataProvider, mockExecutor);
```
## Performance Optimization
### Avoid Recalculation
```typescript
// Good: Cache indicator results
class IndicatorCache {
private cache = new Map<string, { value: number, timestamp: number }>();
get(key: string, ttl: number, calculator: () => number): number {
const cached = this.cache.get(key);
if (cached && Date.now() - cached.timestamp < ttl) {
return cached.value;
}
const value = calculator();
this.cache.set(key, { value, timestamp: Date.now() });
return value;
}
}
// Bad: Recalculate every time
for (const ticker of tickers) {
const rsi = calculateRSI(await getData(ticker)); // Slow
}
```
### Batch Operations
```typescript
// Good: Batch API calls
const results = await Promise.all(
tickers.map(ticker => dataProvider.getOHLC(ticker))
);
// Bad: Sequential API calls
const results = [];
for (const ticker of tickers) {
results.push(await dataProvider.getOHLC(ticker)); // Slow
}
```
## Error Handling
### Graceful Degradation
```typescript
// Good: Fallback behavior
async function getMarketData(ticker: string): Promise<OHLC[]> {
try {
return await primarySource.fetch(ticker);
} catch (error) {
logger.warn('Primary source failed, trying backup');
try {
return await backupSource.fetch(ticker);
} catch (backupError) {
logger.error('All sources failed');
return getCachedData(ticker); // Last resort
}
}
}
// Bad: Let it crash
async function getMarketData(ticker: string) {
return await api.fetch(ticker); // Uncaught errors
}
```
### Detailed Logging
```typescript
// Good: Structured logging with context
logger.info({
action: 'order_placed',
ticker: 'BTC/USDT',
side: 'buy',
size: 0.1,
price: 50000,
orderId: 'abc123',
strategy: 'mean-reversion'
});
// Bad: String concatenation
console.log('Placed order'); // No context
```
## Documentation
### Self-Documenting Code
```typescript
// Good: Clear naming and JSDoc
/**
* Calculate position size using Kelly Criterion
* @param winRate Probability of winning (0-1)
* @param avgWin Average win amount
* @param avgLoss Average loss amount
* @param capital Total available capital
* @returns Optimal position size in base currency
*/
function calculateKellyPosition(
winRate: number,
avgWin: number,
avgLoss: number,
capital: number
): number {
const kellyPercent = (winRate * avgWin - (1 - winRate) * avgLoss) / avgWin;
return Math.max(0, Math.min(kellyPercent * capital, capital * 0.25)); // Cap at 25%
}
// Bad: Cryptic names
function calc(w: number, a: number, b: number, c: number) {
return (w * a - (1 - w) * b) / a * c;
}
```
## Security
### Input Validation
```typescript
// Good: Validate all external inputs
function validateTicker(ticker: string): boolean {
return /^[A-Z]+:[A-Z]+\/[A-Z]+$/.test(ticker);
}
function validatePeriod(period: string): boolean {
return ['1m', '5m', '15m', '1h', '4h', '1d', '1w'].includes(period);
}
// Bad: Trust user input
function getOHLC(ticker: string, period: string) {
return db.query(`SELECT * FROM ohlc WHERE ticker='${ticker}'`); // SQL injection!
}
```
### Rate Limiting
```typescript
// Good: Prevent API abuse
class RateLimiter {
private calls: number[] = [];
async throttle(maxCallsPerMinute: number): Promise<void> {
const now = Date.now();
this.calls = this.calls.filter(t => now - t < 60000);
if (this.calls.length >= maxCallsPerMinute) {
const wait = 60000 - (now - this.calls[0]);
await sleep(wait);
}
this.calls.push(now);
}
}
```

View File

@@ -0,0 +1,124 @@
# Common Trading Strategy Patterns
## Pattern: Trend Following
```typescript
// Good: Clear trend detection with multiple confirmations
function detectTrend(prices: number[], period: number = 20): 'bull' | 'bear' | 'neutral' {
const sma = calculateSMA(prices, period);
const currentPrice = prices[prices.length - 1];
const priceVsSMA = (currentPrice - sma) / sma;
// Use threshold to avoid noise
if (priceVsSMA > 0.02) return 'bull';
if (priceVsSMA < -0.02) return 'bear';
return 'neutral';
}
// Bad: Single indicator, no confirmation
function detectTrend(prices: number[]): string {
return prices[prices.length - 1] > prices[prices.length - 2] ? 'bull' : 'bear';
}
```
## Pattern: Mean Reversion
```typescript
// Good: Proper boundary checks and position sizing
async function checkMeanReversion(ticker: string): Promise<TradeSignal | null> {
const data = await getOHLC(ticker, 100);
const mean = calculateMean(data.close);
const stdDev = calculateStdDev(data.close);
const current = data.close[data.close.length - 1];
const zScore = (current - mean) / stdDev;
// Only trade at extreme deviations
if (zScore < -2) {
return {
side: 'buy',
size: calculatePositionSize(Math.abs(zScore)), // Scale with confidence
stopLoss: current * 0.95,
};
}
return null;
}
// Bad: No risk management, arbitrary thresholds
function checkMeanReversion(price: number, avg: number): boolean {
return price < avg; // Too simplistic
}
```
## Pattern: Breakout Detection
```typescript
// Good: Volume confirmation and false breakout protection
function detectBreakout(ohlc: OHLC[], resistance: number): boolean {
const current = ohlc[ohlc.length - 1];
const previous = ohlc[ohlc.length - 2];
// Price breaks resistance
const priceBreak = current.close > resistance && previous.close <= resistance;
// Volume confirmation (at least 1.5x average)
const avgVolume = ohlc.slice(-20, -1).reduce((sum, c) => sum + c.volume, 0) / 19;
const volumeConfirm = current.volume > avgVolume * 1.5;
// Wait for candle close to avoid false breaks
const candleClosed = true; // Check if candle is complete
return priceBreak && volumeConfirm && candleClosed;
}
// Bad: No confirmation, premature signal
function detectBreakout(price: number, resistance: number): boolean {
return price > resistance; // False positives
}
```
## Pattern: Risk Management
```typescript
// Good: Comprehensive risk checks
class PositionManager {
private readonly MAX_POSITION_PERCENT = 0.05; // 5% of portfolio
private readonly MAX_DAILY_LOSS = 0.02; // 2% daily drawdown limit
async openPosition(signal: TradeSignal, accountBalance: number): Promise<boolean> {
// Check daily loss limit
if (this.getDailyPnL() / accountBalance < -this.MAX_DAILY_LOSS) {
logger.warn('Daily loss limit reached');
return false;
}
// Position size check
const maxSize = accountBalance * this.MAX_POSITION_PERCENT;
const actualSize = Math.min(signal.size, maxSize);
// Risk/reward check
const risk = Math.abs(signal.price - signal.stopLoss);
const reward = Math.abs(signal.takeProfit - signal.price);
if (reward / risk < 2) {
logger.info('Risk/reward ratio too low');
return false;
}
return await this.executeOrder(signal, actualSize);
}
}
// Bad: No risk checks
async function openPosition(signal: any) {
return await exchange.buy(signal.ticker, signal.size); // Dangerous
}
```
## Anti-Patterns to Avoid
1. **Magic Numbers**: Use named constants
2. **Global State**: Pass state explicitly
3. **Synchronous Blocking**: Use async for I/O
4. **No Error Handling**: Always wrap in try/catch
5. **Ignoring Slippage**: Factor in execution costs

View File

@@ -0,0 +1,67 @@
# Code Review Guidelines
## Trading Strategy Specific Checks
### Position Sizing
- ✅ Check for dynamic position sizing based on account balance
- ✅ Verify max position size limits
- ❌ Flag hardcoded position sizes
- ❌ Flag missing position size validation
### Order Handling
- ✅ Verify order type is appropriate (market vs limit)
- ✅ Check for order timeout handling
- ❌ Flag missing order confirmation checks
- ❌ Flag potential duplicate orders
### Risk Management
- ✅ Verify stop-loss is always set
- ✅ Check take-profit levels are realistic
- ❌ Flag missing drawdown protection
- ❌ Flag strategies without maximum daily loss limits
### Data Handling
- ✅ Check for proper OHLC data validation
- ✅ Verify timestamp handling (timezone, microseconds)
- ❌ Flag missing null/undefined checks
- ❌ Flag potential look-ahead bias
### Performance
- ✅ Verify indicators are calculated efficiently
- ✅ Check for unnecessary re-calculations
- ❌ Flag O(n²) or worse algorithms in hot paths
- ❌ Flag large memory allocations in loops
## Severity Levels
### Critical (🔴)
- Will cause financial loss or system crash
- Security vulnerabilities
- Data integrity issues
- Must be fixed before deployment
### High (🟠)
- Significant bugs or edge cases
- Performance issues that affect execution
- Risk management gaps
- Should be fixed before deployment
### Medium (🟡)
- Code quality issues
- Minor performance improvements
- Best practice violations
- Fix when convenient
### Low (🟢)
- Style preferences
- Documentation improvements
- Nice-to-have refactorings
- Optional improvements
## Common Pitfalls
1. **Look-Ahead Bias**: Using future data in backtests
2. **Overfitting**: Too many parameters, not enough data
3. **Slippage Ignorance**: Not accounting for execution costs
4. **Survivorship Bias**: Testing only on assets that survived
5. **Data Snooping**: Testing multiple strategies, reporting only the best

View File

@@ -0,0 +1,51 @@
# Code Reviewer System Prompt
You are an expert code reviewer specializing in trading strategies and financial algorithms.
## Your Role
Review trading strategy code with a focus on:
- **Correctness**: Logic errors, edge cases, off-by-one errors
- **Performance**: Inefficient loops, unnecessary calculations
- **Security**: Input validation, overflow risks, race conditions
- **Trading Best Practices**: Position sizing, risk management, order handling
- **Code Quality**: Readability, maintainability, documentation
## Review Approach
1. **Read the entire code** before providing feedback
2. **Identify critical issues first** (bugs, security, data loss)
3. **Suggest improvements** with specific code examples
4. **Explain the "why"** behind each recommendation
5. **Be constructive** - focus on helping, not criticizing
## Output Format
Structure your review as:
```
## Summary
Brief overview of code quality (1-2 sentences)
## Critical Issues
- Issue 1: Description with line number
- Issue 2: Description with line number
## Improvements
- Suggestion 1: Description with example
- Suggestion 2: Description with example
## Best Practices
- Practice 1: Why it matters
- Practice 2: Why it matters
## Overall Assessment
Pass / Needs Revision / Reject
```
## Important Notes
- Be specific with line numbers and code references
- Provide actionable feedback
- Consider the trading context (not just general coding)
- Flag any risk management issues immediately

View File

@@ -0,0 +1,12 @@
// Subagents exports
export {
BaseSubagent,
type SubagentConfig,
type SubagentContext,
} from './base-subagent.js';
export {
CodeReviewerSubagent,
createCodeReviewerSubagent,
} from './code-reviewer/index.js';

View File

View File

@@ -0,0 +1,461 @@
# Workflows
LangGraph-based workflows for multi-step agent orchestration.
## What are Workflows?
Workflows are state machines that orchestrate complex multi-step tasks with:
- **State Management**: Typed state with annotations
- **Conditional Routing**: Different paths based on state
- **Validation Loops**: Retry with fixes
- **Human-in-the-Loop**: Approval gates and interrupts
- **Error Recovery**: Graceful handling of failures
Built on [LangGraph.js](https://langchain-ai.github.io/langgraphjs/).
## Directory Structure
```
workflows/
├── base-workflow.ts # Base class and utilities
├── {workflow-name}/
│ ├── config.yaml # Workflow configuration
│ ├── state.ts # State schema (Annotations)
│ ├── nodes.ts # Node implementations
│ └── graph.ts # StateGraph definition
└── README.md # This file
```
## Workflow Components
### State (state.ts)
Defines what data flows through the workflow:
```typescript
import { Annotation } from '@langchain/langgraph';
import { BaseWorkflowState } from '../base-workflow.js';
export const MyWorkflowState = Annotation.Root({
...BaseWorkflowState.spec, // Inherit base fields
// Your custom fields
input: Annotation<string>(),
result: Annotation<string | null>({ default: () => null }),
errorCount: Annotation<number>({ default: () => 0 }),
});
export type MyWorkflowStateType = typeof MyWorkflowState.State;
```
### Nodes (nodes.ts)
Functions that transform state:
```typescript
export function createMyNode(deps: Dependencies) {
return async (state: MyWorkflowStateType): Promise<Partial<MyWorkflowStateType>> => {
// Do work
const result = await doSomething(state.input);
// Return partial state update
return { result };
};
}
```
### Graph (graph.ts)
Connects nodes with edges:
```typescript
import { StateGraph } from '@langchain/langgraph';
import { BaseWorkflow } from '../base-workflow.js';
export class MyWorkflow extends BaseWorkflow<MyWorkflowStateType> {
buildGraph(): StateGraph<MyWorkflowStateType> {
const graph = new StateGraph(MyWorkflowState);
// Add nodes
graph
.addNode('step1', createStep1Node())
.addNode('step2', createStep2Node());
// Add edges
graph
.addEdge('__start__', 'step1')
.addEdge('step1', 'step2')
.addEdge('step2', '__end__');
return graph;
}
}
```
### Config (config.yaml)
Workflow settings:
```yaml
name: my-workflow
description: What it does
timeout: 300000 # 5 minutes
maxRetries: 3
requiresApproval: true
approvalNodes:
- human_approval
# Custom settings
myCustomSetting: value
```
## Common Patterns
### 1. Validation Loop (Retry with Fixes)
```typescript
graph
.addNode('validate', validateNode)
.addNode('fix', fixNode)
.addConditionalEdges('validate', (state) => {
if (state.isValid) return 'next_step';
if (state.retryCount >= 3) return '__end__'; // Give up
return 'fix'; // Try to fix
})
.addEdge('fix', 'validate'); // Loop back
```
### 2. Human-in-the-Loop (Approval)
```typescript
const approvalNode = async (state) => {
// Send approval request to user's channel
await sendToChannel(state.userContext.activeChannel, {
type: 'approval_request',
data: {
action: 'execute_trade',
details: state.tradeDetails,
}
});
// Mark as waiting for approval
return { approvalRequested: true, userApproved: false };
};
graph.addConditionalEdges('approval', (state) => {
return state.userApproved ? 'execute' : '__end__';
});
// To resume after user input:
// const updated = await workflow.execute({ ...state, userApproved: true });
```
### 3. Parallel Execution
```typescript
import { Branch } from '@langchain/langgraph';
graph
.addNode('parallel_start', startNode)
.addNode('task_a', taskANode)
.addNode('task_b', taskBNode)
.addNode('merge', mergeNode);
// Branch to parallel tasks
graph.addEdge('parallel_start', Branch.parallel(['task_a', 'task_b']));
// Merge results
graph
.addEdge('task_a', 'merge')
.addEdge('task_b', 'merge');
```
### 4. Error Recovery
```typescript
const resilientNode = async (state) => {
try {
const result = await riskyOperation();
return { result, error: null };
} catch (error) {
logger.error({ error }, 'Operation failed');
return {
error: error.message,
fallbackUsed: true,
result: await fallbackOperation()
};
}
};
```
### 5. Conditional Routing
```typescript
graph.addConditionalEdges('decision', (state) => {
if (state.score > 0.8) return 'high_confidence';
if (state.score > 0.5) return 'medium_confidence';
return 'low_confidence';
});
graph
.addNode('high_confidence', autoApproveNode)
.addNode('medium_confidence', humanReviewNode)
.addNode('low_confidence', rejectNode);
```
## Available Workflows
### strategy-validation
Validates trading strategies with multiple steps and a validation loop.
**Flow:**
1. Code Review (using CodeReviewerSubagent)
2. If issues → Fix Code → loop back
3. Backtest (via MCP)
4. If failed → Fix Code → loop back
5. Risk Assessment
6. Human Approval
7. Final Recommendation
**Features:**
- Max 3 retry attempts
- Multi-file memory from subagent
- Risk-based auto-approval
- Comprehensive state tracking
### trading-request
Human-in-the-loop workflow for trade execution.
**Flow:**
1. Analyze market conditions
2. Calculate risk and position size
3. Request human approval (PAUSE)
4. If approved → Execute trade
5. Generate summary
**Features:**
- Interrupt at approval node
- Channel-aware approval UI
- Risk validation
- Execution confirmation
## Creating a New Workflow
### 1. Create Directory
```bash
mkdir -p workflows/my-workflow
```
### 2. Define State
```typescript
// state.ts
import { Annotation } from '@langchain/langgraph';
import { BaseWorkflowState } from '../base-workflow.js';
export const MyWorkflowState = Annotation.Root({
...BaseWorkflowState.spec,
// Add your fields
input: Annotation<string>(),
step1Result: Annotation<string | null>({ default: () => null }),
step2Result: Annotation<string | null>({ default: () => null }),
});
export type MyWorkflowStateType = typeof MyWorkflowState.State;
```
### 3. Create Nodes
```typescript
// nodes.ts
import { MyWorkflowStateType } from './state.js';
export function createStep1Node(deps: any) {
return async (state: MyWorkflowStateType) => {
const result = await doStep1(state.input);
return { step1Result: result };
};
}
export function createStep2Node(deps: any) {
return async (state: MyWorkflowStateType) => {
const result = await doStep2(state.step1Result);
return { step2Result: result, output: result };
};
}
```
### 4. Build Graph
```typescript
// graph.ts
import { StateGraph } from '@langchain/langgraph';
import { BaseWorkflow, WorkflowConfig } from '../base-workflow.js';
import { MyWorkflowState, MyWorkflowStateType } from './state.js';
import { createStep1Node, createStep2Node } from './nodes.js';
export class MyWorkflow extends BaseWorkflow<MyWorkflowStateType> {
constructor(config: WorkflowConfig, private deps: any, logger: Logger) {
super(config, logger);
}
buildGraph(): StateGraph<MyWorkflowStateType> {
const graph = new StateGraph(MyWorkflowState);
const step1 = createStep1Node(this.deps);
const step2 = createStep2Node(this.deps);
graph
.addNode('step1', step1)
.addNode('step2', step2)
.addEdge('__start__', 'step1')
.addEdge('step1', 'step2')
.addEdge('step2', '__end__');
return graph;
}
}
```
### 5. Create Config
```yaml
# config.yaml
name: my-workflow
description: My workflow description
timeout: 60000
maxRetries: 3
requiresApproval: false
model: claude-3-5-sonnet-20241022
```
### 6. Add Factory Function
```typescript
// graph.ts (continued)
export async function createMyWorkflow(
deps: any,
logger: Logger,
configPath: string
): Promise<MyWorkflow> {
const config = await loadYAML(configPath);
const workflow = new MyWorkflow(config, deps, logger);
workflow.compile();
return workflow;
}
```
## Usage
### Execute Workflow
```typescript
import { createMyWorkflow } from './harness/workflows';
const workflow = await createMyWorkflow(deps, logger, configPath);
const result = await workflow.execute({
userContext,
input: 'my input'
});
console.log(result.output);
```
### Stream Workflow
```typescript
for await (const state of workflow.stream({ userContext, input })) {
console.log('Current state:', state);
}
```
### With Interrupts (Human-in-the-Loop)
```typescript
// Initial execution (pauses at interrupt)
const pausedState = await workflow.execute(initialState);
// User provides input
const userInput = await getUserApproval();
// Resume from paused state
const finalState = await workflow.execute({
...pausedState,
userApproved: userInput.approved
});
```
## Best Practices
### State Design
- **Immutable Updates**: Return partial state, don't mutate
- **Type Safety**: Use TypeScript annotations
- **Defaults**: Provide sensible defaults
- **Nullable Fields**: Use `| null` with `default: () => null`
### Node Implementation
- **Pure Functions**: Avoid side effects in state logic
- **Error Handling**: Catch errors, return error state
- **Logging**: Log entry/exit of nodes
- **Partial Updates**: Only return fields that changed
### Graph Design
- **Single Responsibility**: Each node does one thing
- **Clear Flow**: Easy to visualize the graph
- **Error Paths**: Handle failures gracefully
- **Idempotency**: Safe to retry nodes
### Configuration
- **Timeouts**: Set reasonable limits
- **Retries**: Don't retry forever
- **Approvals**: Mark approval nodes explicitly
- **Documentation**: Explain complex config values
## Debugging
### View Graph
```typescript
// Get graph structure
const compiled = workflow.compile();
console.log(compiled.getGraph());
```
### Log State
```typescript
const debugNode = async (state) => {
logger.debug({ state }, 'Current state');
return {}; // No changes
};
graph.addNode('debug', debugNode);
```
### Test Nodes in Isolation
```typescript
const step1 = createStep1Node(deps);
const result = await step1({ input: 'test', /* ... */ });
expect(result.step1Result).toBe('expected');
```
## References
- [LangGraph.js Docs](https://langchain-ai.github.io/langgraphjs/)
- [LangChain.js Docs](https://js.langchain.com/)
- [Example: strategy-validation](./strategy-validation/graph.ts)
- [Example: trading-request](./trading-request/graph.ts)

View File

@@ -0,0 +1,200 @@
import { Annotation } from '@langchain/langgraph';
import type { FastifyBaseLogger } from 'fastify';
import type { UserContext } from '../memory/session-context.js';
/**
* Workflow configuration (loaded from config.yaml)
*/
export interface WorkflowConfig {
name: string;
description: string;
timeout?: number; // Milliseconds
maxRetries?: number;
requiresApproval?: boolean;
approvalNodes?: string[]; // Nodes that require human approval
}
/**
* Base workflow state (all workflows extend this)
*/
export const BaseWorkflowState = Annotation.Root({
userContext: Annotation<UserContext>(),
input: Annotation<string>(),
output: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
error: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
metadata: Annotation<Record<string, unknown>>({
value: (left, right) => ({ ...left, ...right }),
default: () => ({}),
}),
});
export type BaseWorkflowStateType = typeof BaseWorkflowState.State;
/**
* Workflow node function type
*/
export type WorkflowNode<TState> = (state: TState) => Promise<Partial<TState>>;
/**
* Workflow edge condition function type
*/
export type WorkflowEdgeCondition<TState> = (state: TState) => string;
/**
* Base workflow class
*
* Workflows are LangGraph state machines with:
* - Config-driven setup (timeout, retries, approval gates)
* - Standardized state structure
* - Support for human-in-the-loop
* - Validation loops
* - Error handling
*
* Structure:
* workflows/
* strategy-validation/
* config.yaml
* graph.ts
* nodes.ts
* state.ts
*/
export abstract class BaseWorkflow<TState extends BaseWorkflowStateType> {
protected logger: FastifyBaseLogger;
protected config: WorkflowConfig;
protected graph?: any;
constructor(config: WorkflowConfig, logger: FastifyBaseLogger) {
this.config = config;
this.logger = logger;
}
/**
* Build the workflow graph (implemented by subclasses)
*/
abstract buildGraph(): any;
/**
* Compile the workflow graph
*/
compile(): void {
this.logger.info({ workflow: this.config.name }, 'Compiling workflow graph');
const stateGraph = this.buildGraph();
this.graph = stateGraph.compile();
}
/**
* Execute the workflow
*/
async execute(initialState: Partial<TState>): Promise<TState> {
if (!this.graph) {
throw new Error('Workflow not compiled. Call compile() first.');
}
this.logger.info(
{ workflow: this.config.name, userId: initialState.userContext?.userId },
'Executing workflow'
);
const startTime = Date.now();
try {
// Execute with timeout if configured
const result = this.config.timeout
? await this.executeWithTimeout(initialState)
: await this.graph.invoke(initialState);
const duration = Date.now() - startTime;
this.logger.info(
{
workflow: this.config.name,
duration,
success: !result.error,
},
'Workflow execution completed'
);
return result;
} catch (error) {
this.logger.error(
{ error, workflow: this.config.name },
'Workflow execution failed'
);
throw error;
}
}
/**
* Stream workflow execution
*/
async *stream(initialState: Partial<TState>): AsyncGenerator<TState> {
if (!this.graph) {
throw new Error('Workflow not compiled. Call compile() first.');
}
this.logger.info(
{ workflow: this.config.name, userId: initialState.userContext?.userId },
'Streaming workflow execution'
);
try {
const stream = await this.graph.stream(initialState);
for await (const state of stream) {
yield state;
}
} catch (error) {
this.logger.error(
{ error, workflow: this.config.name },
'Workflow streaming failed'
);
throw error;
}
}
/**
* Execute with timeout
*/
private async executeWithTimeout(initialState: Partial<TState>): Promise<TState> {
if (!this.config.timeout || !this.graph) {
throw new Error('Invalid state');
}
return await Promise.race([
this.graph.invoke(initialState) as Promise<TState>,
new Promise<TState>((_, reject) =>
setTimeout(
() => reject(new Error(`Workflow timeout after ${this.config.timeout}ms`)),
this.config.timeout
)
),
]);
}
/**
* Get workflow name
*/
getName(): string {
return this.config.name;
}
/**
* Check if workflow requires approval
*/
requiresApproval(): boolean {
return this.config.requiresApproval || false;
}
/**
* Get approval nodes
*/
getApprovalNodes(): string[] {
return this.config.approvalNodes || [];
}
}

View File

@@ -0,0 +1,20 @@
// Workflows exports
export {
BaseWorkflow,
BaseWorkflowState,
type WorkflowConfig,
type BaseWorkflowStateType,
type WorkflowNode,
type WorkflowEdgeCondition,
} from './base-workflow.js';
export {
StrategyValidationWorkflow,
createStrategyValidationWorkflow,
} from './strategy-validation/graph.js';
export {
TradingRequestWorkflow,
createTradingRequestWorkflow,
} from './trading-request/graph.js';

View File

@@ -0,0 +1,19 @@
# Strategy Validation Workflow Configuration
name: strategy-validation
description: Validates trading strategies with code review, backtest, and risk assessment
# Workflow settings
timeout: 300000 # 5 minutes
maxRetries: 3
requiresApproval: true
approvalNodes:
- human_approval
# Validation loop settings
maxValidationRetries: 3 # Max times to retry fixing errors
minBacktestScore: 0.5 # Minimum Sharpe ratio to pass
# Model override (optional)
model: claude-3-5-sonnet-20241022
temperature: 0.3

View File

@@ -0,0 +1,138 @@
import { StateGraph } from '@langchain/langgraph';
import { BaseWorkflow, type WorkflowConfig } from '../base-workflow.js';
import { StrategyValidationState, type StrategyValidationStateType } from './state.js';
import {
createCodeReviewNode,
createFixCodeNode,
createBacktestNode,
createRiskAssessmentNode,
createHumanApprovalNode,
createRecommendationNode,
} from './nodes.js';
import type { FastifyBaseLogger } from 'fastify';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { CodeReviewerSubagent } from '../../subagents/code-reviewer/index.js';
/**
* Strategy Validation Workflow
*
* Multi-step workflow with validation loop:
* 1. Code Review (using CodeReviewerSubagent)
* 2. If issues found → Fix Code → Loop back to Code Review
* 3. Backtest (using user's MCP server)
* 4. If backtest fails → Fix Code → Loop back to Code Review
* 5. Risk Assessment
* 6. Human Approval (pause for user input)
* 7. Final Recommendation
*
* Features:
* - Validation loop with max retries
* - Human-in-the-loop approval gate
* - Multi-file memory from CodeReviewerSubagent
* - Comprehensive state tracking
*/
export class StrategyValidationWorkflow extends BaseWorkflow<StrategyValidationStateType> {
constructor(
config: WorkflowConfig,
private model: BaseChatModel,
private codeReviewer: CodeReviewerSubagent,
private mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
logger: FastifyBaseLogger
) {
super(config, logger);
}
buildGraph(): any {
const graph = new StateGraph(StrategyValidationState);
// Create nodes
const codeReviewNode = createCodeReviewNode(this.codeReviewer, this.logger);
const fixCodeNode = createFixCodeNode(this.model, this.logger);
const backtestNode = createBacktestNode(this.mcpBacktestFn, this.logger);
const riskAssessmentNode = createRiskAssessmentNode(this.model, this.logger);
const humanApprovalNode = createHumanApprovalNode(this.logger);
const recommendationNode = createRecommendationNode(this.model, this.logger);
// Add nodes to graph
graph
.addNode('code_review', codeReviewNode)
.addNode('fix_code', fixCodeNode)
.addNode('backtest', backtestNode)
.addNode('risk_assessment', riskAssessmentNode)
.addNode('human_approval', humanApprovalNode)
.addNode('recommendation', recommendationNode);
// Define edges
(graph as any).addEdge('__start__', 'code_review');
// Conditional: After code review, fix if needed or proceed to backtest
(graph as any).addConditionalEdges('code_review', (state: any) => {
if (state.needsFixing && state.validationRetryCount < 3) {
return 'fix_code';
}
if (state.needsFixing && state.validationRetryCount >= 3) {
return 'recommendation'; // Give up, generate rejection
}
return 'backtest';
});
// After fixing code, loop back to code review
(graph as any).addEdge('fix_code', 'code_review');
// Conditional: After backtest, fix if failed or proceed to risk assessment
(graph as any).addConditionalEdges('backtest', (state: any) => {
if (!state.backtestPassed && state.validationRetryCount < 3) {
return 'fix_code';
}
if (!state.backtestPassed && state.validationRetryCount >= 3) {
return 'recommendation'; // Give up
}
return 'risk_assessment';
});
// After risk assessment, go to human approval
(graph as any).addEdge('risk_assessment', 'human_approval');
// Conditional: After human approval, proceed to recommendation or reject
(graph as any).addConditionalEdges('human_approval', (state: any) => {
return state.humanApproved ? 'recommendation' : '__end__';
});
// Final recommendation is terminal
(graph as any).addEdge('recommendation', '__end__');
return graph;
}
}
/**
* Factory function to create and compile workflow
*/
export async function createStrategyValidationWorkflow(
model: BaseChatModel,
codeReviewer: CodeReviewerSubagent,
mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
logger: FastifyBaseLogger,
configPath: string
): Promise<StrategyValidationWorkflow> {
const { readFile } = await import('fs/promises');
const yaml = await import('js-yaml');
// Load config
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as WorkflowConfig;
// Create workflow
const workflow = new StrategyValidationWorkflow(
config,
model,
codeReviewer,
mcpBacktestFn,
logger
);
// Compile graph
workflow.compile();
return workflow;
}

View File

@@ -0,0 +1,233 @@
import type { StrategyValidationStateType } from './state.js';
import type { FastifyBaseLogger } from 'fastify';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { CodeReviewerSubagent } from '../../subagents/code-reviewer/index.js';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
/**
* Node: Code Review
* Reviews strategy code using CodeReviewerSubagent
*/
export function createCodeReviewNode(
codeReviewer: CodeReviewerSubagent,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Code review');
const review = await codeReviewer.execute(
{ userContext: state.userContext },
state.strategyCode
);
// Simple issue detection (in production, parse structured output)
const hasIssues = review.toLowerCase().includes('critical') ||
review.toLowerCase().includes('reject');
return {
codeReview: review,
codeIssues: hasIssues ? ['Issues detected in code review'] : [],
needsFixing: hasIssues,
};
};
}
/**
* Node: Fix Code Issues
* Uses LLM to fix issues identified in code review
*/
export function createFixCodeNode(
model: BaseChatModel,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Fixing code issues');
const systemPrompt = `You are a trading strategy developer.
Fix the issues identified in the code review while maintaining the strategy's logic.
Return only the corrected code without explanation.`;
const userPrompt = `Original code:
\`\`\`typescript
${state.strategyCode}
\`\`\`
Code review feedback:
${state.codeReview}
Provide the corrected code:`;
const response = await model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
const fixedCode = (response.content as string)
.replace(/```typescript\n?/g, '')
.replace(/```\n?/g, '')
.trim();
return {
strategyCode: fixedCode,
validationRetryCount: state.validationRetryCount + 1,
};
};
}
/**
* Node: Backtest Strategy
* Runs backtest using user's MCP server
*/
export function createBacktestNode(
mcpBacktestFn: (code: string, ticker: string, timeframe: string) => Promise<Record<string, unknown>>,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Running backtest');
try {
const results = await mcpBacktestFn(
state.strategyCode,
state.ticker,
state.timeframe
);
// Check if backtest passed (simplified)
const sharpeRatio = (results.sharpeRatio as number) || 0;
const passed = sharpeRatio > 0.5;
return {
backtestResults: results,
backtestPassed: passed,
needsFixing: !passed,
};
} catch (error) {
logger.error({ error }, 'Backtest failed');
return {
backtestResults: { error: (error as Error).message },
backtestPassed: false,
needsFixing: true,
};
}
};
}
/**
* Node: Risk Assessment
* Analyzes backtest results for risk
*/
export function createRiskAssessmentNode(
model: BaseChatModel,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Risk assessment');
const systemPrompt = `You are a risk management expert.
Analyze the strategy and backtest results to assess risk level.
Provide: risk level (low/medium/high) and detailed assessment.`;
const userPrompt = `Strategy code:
\`\`\`typescript
${state.strategyCode}
\`\`\`
Backtest results:
${JSON.stringify(state.backtestResults, null, 2)}
Provide risk assessment in format:
RISK_LEVEL: [low/medium/high]
ASSESSMENT: [detailed explanation]`;
const response = await model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
const assessment = response.content as string;
// Parse risk level (simplified)
let riskLevel: 'low' | 'medium' | 'high' = 'medium';
if (assessment.includes('RISK_LEVEL: low')) riskLevel = 'low';
if (assessment.includes('RISK_LEVEL: high')) riskLevel = 'high';
return {
riskAssessment: assessment,
riskLevel,
};
};
}
/**
* Node: Human Approval
* Pauses workflow for human review
*/
export function createHumanApprovalNode(logger: FastifyBaseLogger) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Awaiting human approval');
// In real implementation, this would:
// 1. Send approval request to user's channel
// 2. Store workflow state with interrupt
// 3. Wait for user response
// 4. Resume with approval decision
// For now, auto-approve if risk is low/medium and backtest passed
const autoApprove = state.backtestPassed &&
(state.riskLevel === 'low' || state.riskLevel === 'medium');
return {
humanApproved: autoApprove,
approvalComment: autoApprove ? 'Auto-approved: passed validation' : 'Needs manual review',
};
};
}
/**
* Node: Final Recommendation
* Generates final recommendation based on all steps
*/
export function createRecommendationNode(
model: BaseChatModel,
logger: FastifyBaseLogger
) {
return async (state: StrategyValidationStateType): Promise<Partial<StrategyValidationStateType>> => {
logger.info('Strategy validation: Generating recommendation');
const systemPrompt = `You are the final decision maker for strategy deployment.
Based on all validation steps, provide a clear recommendation: approve, reject, or revise.`;
const userPrompt = `Strategy validation summary:
Code Review: ${state.codeIssues.length === 0 ? 'Passed' : 'Issues found'}
Backtest: ${state.backtestPassed ? 'Passed' : 'Failed'}
Risk Level: ${state.riskLevel}
Human Approved: ${state.humanApproved}
Backtest Results:
${JSON.stringify(state.backtestResults, null, 2)}
Risk Assessment:
${state.riskAssessment}
Provide final recommendation (approve/reject/revise) and reasoning:`;
const response = await model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
const recommendation = response.content as string;
// Parse recommendation (simplified)
let decision: 'approve' | 'reject' | 'revise' = 'revise';
if (recommendation.toLowerCase().includes('approve')) decision = 'approve';
if (recommendation.toLowerCase().includes('reject')) decision = 'reject';
return {
recommendation: decision,
recommendationReason: recommendation,
output: recommendation,
};
};
}

View File

@@ -0,0 +1,78 @@
import { Annotation } from '@langchain/langgraph';
import { BaseWorkflowState } from '../base-workflow.js';
/**
* Strategy validation workflow state
*
* Extends base workflow state with strategy-specific fields
*/
export const StrategyValidationState = Annotation.Root({
...BaseWorkflowState.spec,
// Input
strategyCode: Annotation<string>(),
ticker: Annotation<string>(),
timeframe: Annotation<string>(),
// Code review step
codeReview: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
codeIssues: Annotation<string[]>({
value: (left, right) => right ?? left,
default: () => [],
}),
// Backtest step
backtestResults: Annotation<Record<string, unknown> | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
backtestPassed: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
// Risk assessment step
riskAssessment: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
riskLevel: Annotation<'low' | 'medium' | 'high' | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Human approval step
humanApproved: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
approvalComment: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Validation loop control
validationRetryCount: Annotation<number>({
value: (left, right) => right ?? left,
default: () => 0,
}),
needsFixing: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
// Final output
recommendation: Annotation<'approve' | 'reject' | 'revise' | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
recommendationReason: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
});
export type StrategyValidationStateType = typeof StrategyValidationState.State;

View File

@@ -0,0 +1,19 @@
# Trading Request Workflow Configuration
name: trading-request
description: Human-in-the-loop workflow for executing trading requests
# Workflow settings
timeout: 600000 # 10 minutes (includes human wait time)
maxRetries: 1
requiresApproval: true
approvalNodes:
- await_approval
# Trading limits
maxPositionPercent: 0.05 # 5% of portfolio max
minRiskRewardRatio: 2.0 # Minimum 2:1 risk/reward
# Model override (optional)
model: claude-3-5-sonnet-20241022
temperature: 0.2

View File

@@ -0,0 +1,229 @@
import { StateGraph } from '@langchain/langgraph';
import { BaseWorkflow, type WorkflowConfig } from '../base-workflow.js';
import { TradingRequestState, type TradingRequestStateType } from './state.js';
import type { FastifyBaseLogger } from 'fastify';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
/**
* Trading Request Workflow
*
* Human-in-the-loop workflow for executing trades:
* 1. Analyze market conditions
* 2. Calculate risk and position size
* 3. Request human approval (PAUSE HERE)
* 4. If approved → Execute trade
* 5. Generate execution summary
*
* Features:
* - Interrupt at approval node
* - Resume with user input
* - Risk validation
* - Multi-channel approval UI
*/
export class TradingRequestWorkflow extends BaseWorkflow<TradingRequestStateType> {
constructor(
config: WorkflowConfig,
private model: BaseChatModel,
private marketDataFn: (ticker: string) => Promise<{ price: number; [key: string]: unknown }>,
private executeTradeFn: (order: any) => Promise<{ orderId: string; status: string; price: number }>,
logger: FastifyBaseLogger
) {
super(config, logger);
}
buildGraph(): any {
const graph = new StateGraph(TradingRequestState);
// Node: Analyze market
const analyzeNode = async (state: TradingRequestStateType): Promise<Partial<TradingRequestStateType>> => {
this.logger.info('Trading request: Analyzing market');
const marketData = await this.marketDataFn(state.ticker);
const systemPrompt = `You are a market analyst. Analyze current conditions for a ${state.side} order.`;
const userPrompt = `Ticker: ${state.ticker}
Current Price: ${marketData.price}
Requested: ${state.side} ${state.amount} at ${state.price || 'market'}
Provide 2-3 sentence analysis:`;
const response = await this.model.invoke([
new SystemMessage(systemPrompt),
new HumanMessage(userPrompt),
]);
return {
marketAnalysis: response.content as string,
currentPrice: marketData.price,
};
};
// Node: Calculate risk
const calculateRiskNode = async (state: TradingRequestStateType): Promise<Partial<TradingRequestStateType>> => {
this.logger.info('Trading request: Calculating risk');
// Simplified risk calculation
const accountBalance = state.userContext.license.features.maxBacktestDays * 1000; // Mock
const maxPosition = accountBalance * 0.05; // 5% max
const positionValue = state.amount * (state.currentPrice || 0);
const positionSize = Math.min(positionValue, maxPosition);
// Mock risk/reward (in production, calculate from stop-loss and take-profit)
const riskRewardRatio = 2.5;
return {
riskAssessment: {
accountBalance,
maxPosition,
positionValue,
positionSize,
},
riskRewardRatio,
positionSize,
};
};
// Node: Request approval (INTERRUPT POINT)
const requestApprovalNode = async (state: TradingRequestStateType): Promise<Partial<TradingRequestStateType>> => {
this.logger.info('Trading request: Requesting approval');
// TODO: Send approval request to user's active channel
// In production, this would:
// 1. Format approval UI for the channel (buttons for Telegram, etc.)
// 2. Send message with trade details
// 3. Store workflow state
// 4. Return with interrupt signal
// 5. LangGraph will pause here until resumed with user input
// For now, mock approval
const approvalMessage = `
Trade Request Approval Needed:
- ${state.side.toUpperCase()} ${state.amount} ${state.ticker}
- Current Price: $${state.currentPrice}
- Position Size: $${state.positionSize}
- Risk/Reward: ${state.riskRewardRatio}:1
Market Analysis:
${state.marketAnalysis}
Reply 'approve' or 'reject'
`;
return {
approvalRequested: true,
approvalMessage,
approvalTimestamp: Date.now(),
// In production, this node would use Interrupt here
userApproved: false, // Wait for user input
};
};
// Node: Execute trade
const executeTradeNode = async (state: TradingRequestStateType): Promise<Partial<TradingRequestStateType>> => {
this.logger.info('Trading request: Executing trade');
try {
const order = {
ticker: state.ticker,
side: state.side,
amount: state.amount,
type: state.requestType,
price: state.price,
};
const result = await this.executeTradeFn(order);
return {
orderPlaced: true,
orderId: result.orderId,
executionPrice: result.price,
executionStatus: result.status as any,
};
} catch (error) {
this.logger.error({ error }, 'Trade execution failed');
return {
orderPlaced: false,
executionStatus: 'rejected',
error: (error as Error).message,
};
}
};
// Node: Generate summary
const summaryNode = async (state: TradingRequestStateType): Promise<Partial<TradingRequestStateType>> => {
this.logger.info('Trading request: Generating summary');
const summary = state.orderPlaced
? `Trade executed successfully:
- Order ID: ${state.orderId}
- ${state.side.toUpperCase()} ${state.amount} ${state.ticker}
- Execution Price: $${state.executionPrice}
- Status: ${state.executionStatus}`
: `Trade not executed:
- Reason: ${state.userApproved ? 'Execution failed' : 'User rejected'}`;
return {
summary,
output: summary,
};
};
// Add nodes
graph
.addNode('analyze', analyzeNode)
.addNode('calculate_risk', calculateRiskNode)
.addNode('request_approval', requestApprovalNode)
.addNode('execute_trade', executeTradeNode)
.addNode('summary', summaryNode);
// Define edges
(graph as any).addEdge('__start__', 'analyze');
(graph as any).addEdge('analyze', 'calculate_risk');
(graph as any).addEdge('calculate_risk', 'request_approval');
// Conditional: After approval, execute or reject
(graph as any).addConditionalEdges('request_approval', (state: any) => {
// In production, this would check if user approved via interrupt resume
return state.userApproved ? 'execute_trade' : 'summary';
});
(graph as any).addEdge('execute_trade', 'summary');
(graph as any).addEdge('summary', '__end__');
return graph;
}
}
/**
* Factory function to create and compile workflow
*/
export async function createTradingRequestWorkflow(
model: BaseChatModel,
marketDataFn: (ticker: string) => Promise<{ price: number; [key: string]: unknown }>,
executeTradeFn: (order: any) => Promise<{ orderId: string; status: string; price: number }>,
logger: FastifyBaseLogger,
configPath: string
): Promise<TradingRequestWorkflow> {
const { readFile } = await import('fs/promises');
const yaml = await import('js-yaml');
// Load config
const configContent = await readFile(configPath, 'utf-8');
const config = yaml.load(configContent) as WorkflowConfig;
// Create workflow
const workflow = new TradingRequestWorkflow(
config,
model,
marketDataFn,
executeTradeFn,
logger
);
// Compile graph
workflow.compile();
return workflow;
}

View File

@@ -0,0 +1,89 @@
import { Annotation } from '@langchain/langgraph';
import { BaseWorkflowState } from '../base-workflow.js';
/**
* Trading request workflow state
*
* Handles human-in-the-loop approval for trade execution
*/
export const TradingRequestState = Annotation.Root({
...BaseWorkflowState.spec,
// Input
requestType: Annotation<'market_order' | 'limit_order' | 'stop_loss'>(),
ticker: Annotation<string>(),
side: Annotation<'buy' | 'sell'>(),
amount: Annotation<number>(), // Requested amount
price: Annotation<number | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Analysis step
marketAnalysis: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
currentPrice: Annotation<number | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Risk calculation
riskAssessment: Annotation<Record<string, unknown> | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
riskRewardRatio: Annotation<number | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
positionSize: Annotation<number | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Human approval
approvalRequested: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
userApproved: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
approvalMessage: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
approvalTimestamp: Annotation<number | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Execution
orderPlaced: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
orderId: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
executionPrice: Annotation<number | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
executionStatus: Annotation<'pending' | 'filled' | 'rejected' | 'cancelled' | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
// Output
summary: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
});
export type TradingRequestStateType = typeof TradingRequestState.State;

View File

@@ -88,7 +88,10 @@ export class KubernetesClient {
*/
async deploymentExists(deploymentName: string): Promise<boolean> {
try {
await this.appsApi.readNamespacedDeployment(deploymentName, this.config.namespace);
await this.appsApi.readNamespacedDeployment({
name: deploymentName,
namespace: this.config.namespace
});
return true;
} catch (error: any) {
if (error.response?.statusCode === 404) {
@@ -140,20 +143,26 @@ export class KubernetesClient {
try {
switch (doc.kind) {
case 'Deployment':
await this.appsApi.createNamespacedDeployment(this.config.namespace, doc);
await this.appsApi.createNamespacedDeployment({
namespace: this.config.namespace,
body: doc
});
this.config.logger.info({ deploymentName }, 'Created deployment');
break;
case 'PersistentVolumeClaim':
await this.coreApi.createNamespacedPersistentVolumeClaim(
this.config.namespace,
doc
);
await this.coreApi.createNamespacedPersistentVolumeClaim({
namespace: this.config.namespace,
body: doc
});
this.config.logger.info({ pvcName }, 'Created PVC');
break;
case 'Service':
await this.coreApi.createNamespacedService(this.config.namespace, doc);
await this.coreApi.createNamespacedService({
namespace: this.config.namespace,
body: doc
});
this.config.logger.info({ serviceName }, 'Created service');
break;
@@ -193,12 +202,11 @@ export class KubernetesClient {
while (Date.now() - startTime < timeoutMs) {
try {
const response = await this.appsApi.readNamespacedDeployment(
deploymentName,
this.config.namespace
);
const deployment = await this.appsApi.readNamespacedDeployment({
name: deploymentName,
namespace: this.config.namespace
});
const deployment = response.body;
const status = deployment.status;
// Check if deployment is ready
@@ -215,7 +223,7 @@ export class KubernetesClient {
// Check for failure conditions
if (status?.conditions) {
const failedCondition = status.conditions.find(
(c) => c.type === 'Progressing' && c.status === 'False'
(c: any) => c.type === 'Progressing' && c.status === 'False'
);
if (failedCondition) {
this.config.logger.error(
@@ -255,16 +263,14 @@ export class KubernetesClient {
*/
async getServiceEndpoint(serviceName: string): Promise<string | null> {
try {
const response = await this.coreApi.readNamespacedService(
serviceName,
this.config.namespace
);
const service = response.body;
const service = await this.coreApi.readNamespacedService({
name: serviceName,
namespace: this.config.namespace
});
// For ClusterIP services, return internal DNS name
if (service.spec?.type === 'ClusterIP') {
const port = service.spec.ports?.find((p) => p.name === 'mcp')?.port || 3000;
const port = service.spec.ports?.find((p: any) => p.name === 'mcp')?.port || 3000;
return `http://${serviceName}.${this.config.namespace}.svc.cluster.local:${port}`;
}
@@ -296,7 +302,10 @@ export class KubernetesClient {
// Delete deployment
try {
await this.appsApi.deleteNamespacedDeployment(deploymentName, this.config.namespace);
await this.appsApi.deleteNamespacedDeployment({
name: deploymentName,
namespace: this.config.namespace
});
this.config.logger.info({ deploymentName }, 'Deleted deployment');
} catch (error: any) {
if (error.response?.statusCode !== 404) {
@@ -306,7 +315,10 @@ export class KubernetesClient {
// Delete service
try {
await this.coreApi.deleteNamespacedService(serviceName, this.config.namespace);
await this.coreApi.deleteNamespacedService({
name: serviceName,
namespace: this.config.namespace
});
this.config.logger.info({ serviceName }, 'Deleted service');
} catch (error: any) {
if (error.response?.statusCode !== 404) {
@@ -316,7 +328,10 @@ export class KubernetesClient {
// Delete PVC
try {
await this.coreApi.deleteNamespacedPersistentVolumeClaim(pvcName, this.config.namespace);
await this.coreApi.deleteNamespacedPersistentVolumeClaim({
name: pvcName,
namespace: this.config.namespace
});
this.config.logger.info({ pvcName }, 'Deleted PVC');
} catch (error: any) {
if (error.response?.statusCode !== 404) {

View File

@@ -1,8 +1,5 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatOpenAI } from '@langchain/openai';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { ChatOpenRouter } from '@langchain/openrouter';
import type { FastifyBaseLogger } from 'fastify';
/**
@@ -10,9 +7,6 @@ import type { FastifyBaseLogger } from 'fastify';
*/
export enum LLMProvider {
ANTHROPIC = 'anthropic',
OPENAI = 'openai',
GOOGLE = 'google',
OPENROUTER = 'openrouter',
}
/**
@@ -30,9 +24,6 @@ export interface ModelConfig {
*/
export interface ProviderConfig {
anthropicApiKey?: string;
openaiApiKey?: string;
googleApiKey?: string;
openrouterApiKey?: string;
}
/**
@@ -61,15 +52,6 @@ export class LLMProviderFactory {
case LLMProvider.ANTHROPIC:
return this.createAnthropicModel(modelConfig);
case LLMProvider.OPENAI:
return this.createOpenAIModel(modelConfig);
case LLMProvider.GOOGLE:
return this.createGoogleModel(modelConfig);
case LLMProvider.OPENROUTER:
return this.createOpenRouterModel(modelConfig);
default:
throw new Error(`Unsupported provider: ${modelConfig.provider}`);
}
@@ -91,88 +73,18 @@ export class LLMProviderFactory {
});
}
/**
* Create OpenAI GPT model
*/
private createOpenAIModel(config: ModelConfig): ChatOpenAI {
if (!this.config.openaiApiKey) {
throw new Error('OpenAI API key not configured');
}
return new ChatOpenAI({
model: config.model,
temperature: config.temperature ?? 0.7,
maxTokens: config.maxTokens ?? 4096,
openAIApiKey: this.config.openaiApiKey,
});
}
/**
* Create Google Gemini model
*/
private createGoogleModel(config: ModelConfig): ChatGoogleGenerativeAI {
if (!this.config.googleApiKey) {
throw new Error('Google API key not configured');
}
return new ChatGoogleGenerativeAI({
model: config.model,
temperature: config.temperature ?? 0.7,
maxOutputTokens: config.maxTokens ?? 4096,
apiKey: this.config.googleApiKey,
});
}
/**
* Create OpenRouter model (access to 300+ models)
*/
private createOpenRouterModel(config: ModelConfig): ChatOpenRouter {
if (!this.config.openrouterApiKey) {
throw new Error('OpenRouter API key not configured');
}
return new ChatOpenRouter({
model: config.model,
temperature: config.temperature ?? 0.7,
maxTokens: config.maxTokens ?? 4096,
apiKey: this.config.openrouterApiKey,
});
}
/**
* Get default model based on environment
*/
getDefaultModel(): ModelConfig {
// Check which API keys are available
if (this.config.anthropicApiKey) {
return {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-5-sonnet-20241022',
};
if (!this.config.anthropicApiKey) {
throw new Error('Anthropic API key not configured');
}
if (this.config.openaiApiKey) {
return {
provider: LLMProvider.OPENAI,
model: 'gpt-4o',
};
}
if (this.config.googleApiKey) {
return {
provider: LLMProvider.GOOGLE,
model: 'gemini-2.0-flash-exp',
};
}
if (this.config.openrouterApiKey) {
return {
provider: LLMProvider.OPENROUTER,
model: 'anthropic/claude-3.5-sonnet',
};
}
throw new Error('No LLM API keys configured');
return {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-5-sonnet-20241022',
};
}
}
@@ -180,7 +92,6 @@ export class LLMProviderFactory {
* Predefined model configurations
*/
export const MODELS = {
// Anthropic
CLAUDE_SONNET: {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-5-sonnet-20241022',
@@ -193,24 +104,4 @@ export const MODELS = {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-opus-20240229',
},
// OpenAI
GPT4O: {
provider: LLMProvider.OPENAI,
model: 'gpt-4o',
},
GPT4O_MINI: {
provider: LLMProvider.OPENAI,
model: 'gpt-4o-mini',
},
// Google
GEMINI_2_FLASH: {
provider: LLMProvider.GOOGLE,
model: 'gemini-2.0-flash-exp',
},
GEMINI_PRO: {
provider: LLMProvider.GOOGLE,
model: 'gemini-1.5-pro',
},
} as const satisfies Record<string, ModelConfig>;

View File

@@ -108,11 +108,11 @@ export class ModelRouter {
// Pro users get good models
return isComplex
? { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-sonnet-20241022' }
: { provider: LLMProvider.OPENAI, model: 'gpt-4o-mini' };
: { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
}
// Free users get efficient models
return { provider: LLMProvider.GOOGLE, model: 'gemini-2.0-flash-exp' };
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
}
/**
@@ -124,10 +124,10 @@ export class ModelRouter {
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-sonnet-20241022' };
case 'pro':
return { provider: LLMProvider.OPENAI, model: 'gpt-4o' };
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-sonnet-20241022' };
case 'free':
return { provider: LLMProvider.GOOGLE, model: 'gemini-2.0-flash-exp' };
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
default:
return this.defaultModel;
@@ -137,23 +137,18 @@ export class ModelRouter {
/**
* Route to cheapest available model
*/
private routeByCost(license: UserLicense): ModelConfig {
// Free tier: use cheapest
if (license.licenseType === 'free') {
return { provider: LLMProvider.GOOGLE, model: 'gemini-2.0-flash-exp' };
}
// Paid tiers: use GPT-4o-mini for cost efficiency
return { provider: LLMProvider.OPENAI, model: 'gpt-4o-mini' };
private routeByCost(_license: UserLicense): ModelConfig {
// All tiers: use Haiku for cost efficiency
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
}
/**
* Check if model is allowed for user's license
*/
private isModelAllowed(model: ModelConfig, license: UserLicense): boolean {
// Free tier: only cheap models
// Free tier: only Haiku
if (license.licenseType === 'free') {
const allowedModels = ['gemini-2.0-flash-exp', 'gpt-4o-mini', 'claude-3-5-haiku-20241022'];
const allowedModels = ['claude-3-5-haiku-20241022'];
return allowedModels.includes(model.model);
}

View File

@@ -1,16 +1,149 @@
import Fastify from 'fastify';
import websocket from '@fastify/websocket';
import cors from '@fastify/cors';
import Redis from 'ioredis';
import { readFileSync } from 'fs';
import { load as loadYaml } from 'js-yaml';
import { UserService } from './db/user-service.js';
import { Authenticator } from './auth/authenticator.js';
import { createBetterAuth } from './auth/better-auth-config.js';
import { AuthService } from './auth/auth-service.js';
import { AuthRoutes } from './routes/auth-routes.js';
import { WebSocketHandler } from './channels/websocket-handler.js';
import { TelegramHandler } from './channels/telegram-handler.js';
import { KubernetesClient } from './k8s/client.js';
import { ContainerManager } from './k8s/container-manager.js';
// Catch unhandled promise rejections for better debugging
process.on('unhandledRejection', (reason: any, promise) => {
console.error('=== UNHANDLED PROMISE REJECTION ===');
console.error('Reason:', reason);
console.error('Message:', reason?.message);
console.error('Stack:', reason?.stack);
console.error('Cause:', reason?.cause);
console.error('Promise:', promise);
console.error('===================================');
process.exit(1);
});
import {
SessionRegistry,
EventSubscriber,
EventRouter,
DeliveryService,
} from './events/index.js';
import { QdrantClient } from './clients/qdrant-client.js';
import { EmbeddingService, RAGRetriever, DocumentLoader } from './harness/memory/index.js';
import { join } from 'path';
import { fileURLToPath } from 'url';
import { dirname } from 'path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Load configuration from YAML files
function loadConfig() {
const configPath = process.env.CONFIG_PATH || '/config/config.yaml';
const secretsPath = process.env.SECRETS_PATH || '/config/secrets.yaml';
let configData: any = {};
let secretsData: any = {};
try {
const configFile = readFileSync(configPath, 'utf8');
configData = loadYaml(configFile) || {};
console.log(`Loaded configuration from ${configPath}`);
} catch (error: any) {
console.warn(`Could not load config from ${configPath}: ${error.message}, using defaults`);
}
try {
const secretsFile = readFileSync(secretsPath, 'utf8');
secretsData = loadYaml(secretsFile) || {};
console.log(`Loaded secrets from ${secretsPath}`);
} catch (error: any) {
console.warn(`Could not load secrets from ${secretsPath}: ${error.message}`);
}
return {
port: configData.server?.port || parseInt(process.env.PORT || '3000'),
host: configData.server?.host || process.env.HOST || '0.0.0.0',
logLevel: configData.server?.log_level || process.env.LOG_LEVEL || 'info',
corsOrigin: configData.server?.cors_origin || process.env.CORS_ORIGIN || '*',
baseUrl: configData.server?.base_url || process.env.BASE_URL || 'http://localhost:3000',
trustedOrigins: configData.server?.trusted_origins || [
process.env.BASE_URL || 'http://localhost:3000',
'http://localhost:5173',
'http://localhost:8080',
],
databaseUrl: configData.database?.url || process.env.DATABASE_URL || 'postgresql://localhost/dexorder',
// Authentication configuration
authSecret: secretsData.auth?.secret || process.env.AUTH_SECRET || 'change-me-in-production',
// LLM provider API keys
providerConfig: {
anthropicApiKey: secretsData.llm_providers?.anthropic_api_key || process.env.ANTHROPIC_API_KEY,
openaiApiKey: secretsData.llm_providers?.openai_api_key || process.env.OPENAI_API_KEY,
googleApiKey: secretsData.llm_providers?.google_api_key || process.env.GOOGLE_API_KEY,
openrouterApiKey: secretsData.llm_providers?.openrouter_api_key || process.env.OPENROUTER_API_KEY,
},
telegramBotToken: secretsData.telegram?.bot_token || process.env.TELEGRAM_BOT_TOKEN || '',
// Email service configuration
emailServiceKey: secretsData.email?.service_key || process.env.EMAIL_SERVICE_KEY,
emailFromAddress: configData.email?.from_address || process.env.EMAIL_FROM_ADDRESS || 'noreply@dexorder.com',
// Push notification service configuration
pushServiceKey: secretsData.push?.service_key || process.env.PUSH_SERVICE_KEY,
// Event router configuration
eventRouterBind: configData.events?.router_bind || process.env.EVENT_ROUTER_BIND || 'tcp://*:5571',
// Redis configuration (for harness memory layer)
redisUrl: configData.redis?.url || process.env.REDIS_URL || 'redis://localhost:6379',
// Qdrant configuration (for RAG)
qdrant: {
url: configData.qdrant?.url || process.env.QDRANT_URL || 'http://localhost:6333',
apiKey: secretsData.qdrant?.api_key || process.env.QDRANT_API_KEY,
collectionName: configData.qdrant?.collection || process.env.QDRANT_COLLECTION || 'gateway_memory',
},
// Iceberg configuration (for durable storage)
iceberg: {
catalogUri: configData.iceberg?.catalog_uri || process.env.ICEBERG_CATALOG_URI || 'http://iceberg-catalog:8181',
namespace: configData.iceberg?.namespace || process.env.ICEBERG_NAMESPACE || 'gateway',
s3Endpoint: configData.iceberg?.s3_endpoint || process.env.S3_ENDPOINT,
s3AccessKey: secretsData.iceberg?.s3_access_key || process.env.S3_ACCESS_KEY,
s3SecretKey: secretsData.iceberg?.s3_secret_key || process.env.S3_SECRET_KEY,
},
// Embedding configuration (for RAG)
embedding: {
provider: (configData.embedding?.provider || process.env.EMBEDDING_PROVIDER || 'ollama') as 'ollama' | 'openai' | 'anthropic' | 'local' | 'voyage' | 'cohere' | 'none',
model: configData.embedding?.model || process.env.EMBEDDING_MODEL,
apiKey: secretsData.embedding?.api_key || process.env.EMBEDDING_API_KEY || secretsData.llm_providers?.openai_api_key || process.env.OPENAI_API_KEY,
ollamaUrl: configData.embedding?.ollama_url || process.env.OLLAMA_URL || 'http://localhost:11434',
},
// Kubernetes configuration
kubernetes: {
namespace: configData.kubernetes?.namespace || process.env.KUBERNETES_NAMESPACE || 'dexorder-agents',
inCluster: configData.kubernetes?.in_cluster ?? (process.env.KUBERNETES_IN_CLUSTER === 'true'),
context: configData.kubernetes?.context || process.env.KUBERNETES_CONTEXT,
agentImage: configData.kubernetes?.agent_image || process.env.AGENT_IMAGE || 'ghcr.io/dexorder/agent:latest',
sidecarImage: configData.kubernetes?.sidecar_image || process.env.SIDECAR_IMAGE || 'ghcr.io/dexorder/lifecycle-sidecar:latest',
storageClass: configData.kubernetes?.storage_class || process.env.AGENT_STORAGE_CLASS || 'standard',
},
};
}
const config = loadConfig();
const app = Fastify({
logger: {
level: process.env.LOG_LEVEL || 'info',
level: config.logLevel,
transport: {
target: 'pino-pretty',
options: {
@@ -22,33 +155,6 @@ const app = Fastify({
},
});
// Configuration from environment
const config = {
port: parseInt(process.env.PORT || '3000'),
host: process.env.HOST || '0.0.0.0',
databaseUrl: process.env.DATABASE_URL || 'postgresql://localhost/dexorder',
// LLM provider API keys
providerConfig: {
anthropicApiKey: process.env.ANTHROPIC_API_KEY,
openaiApiKey: process.env.OPENAI_API_KEY,
googleApiKey: process.env.GOOGLE_API_KEY,
openrouterApiKey: process.env.OPENROUTER_API_KEY,
},
telegramBotToken: process.env.TELEGRAM_BOT_TOKEN || '',
// Kubernetes configuration
kubernetes: {
namespace: process.env.KUBERNETES_NAMESPACE || 'dexorder-agents',
inCluster: process.env.KUBERNETES_IN_CLUSTER === 'true',
context: process.env.KUBERNETES_CONTEXT,
agentImage: process.env.AGENT_IMAGE || 'ghcr.io/dexorder/agent:latest',
sidecarImage: process.env.SIDECAR_IMAGE || 'ghcr.io/dexorder/lifecycle-sidecar:latest',
storageClass: process.env.AGENT_STORAGE_CLASS || 'standard',
},
};
// Validate at least one LLM provider is configured
const hasAnyProvider = Object.values(config.providerConfig).some(key => !!key);
if (!hasAnyProvider) {
@@ -58,7 +164,7 @@ if (!hasAnyProvider) {
// Register plugins
await app.register(cors, {
origin: process.env.CORS_ORIGIN || '*',
origin: config.corsOrigin,
});
await app.register(websocket, {
@@ -70,6 +176,61 @@ await app.register(websocket, {
// Initialize services
const userService = new UserService(config.databaseUrl);
// Initialize Better Auth
let betterAuth;
try {
app.log.info({ databaseUrl: config.databaseUrl.replace(/:[^:@]+@/, ':***@') }, 'Initializing Better Auth');
betterAuth = await createBetterAuth({
databaseUrl: config.databaseUrl,
secret: config.authSecret,
baseUrl: config.baseUrl,
trustedOrigins: config.trustedOrigins,
logger: app.log,
});
app.log.info('Better Auth initialized successfully');
} catch (error: any) {
app.log.error({ error, message: error.message, stack: error.stack }, 'Failed to initialize Better Auth');
throw error;
}
// Initialize Auth Service
const authService = new AuthService({
auth: betterAuth,
pool: userService.getPool(),
logger: app.log,
});
// Connect UserService with AuthService for JWT verification
userService.setAuthService(authService);
// Initialize Redis client (for harness memory layer)
const redis = new Redis(config.redisUrl, {
maxRetriesPerRequest: 3,
connectTimeout: 10000, // 10 seconds
retryStrategy: (times) => {
if (times > 5) {
app.log.error('Redis connection failed after 5 retries');
return null; // Stop retrying
}
const delay = Math.min(times * 50, 2000);
return delay;
},
lazyConnect: true,
});
// Initialize Qdrant client (for RAG)
const qdrantClient = new QdrantClient(config.qdrant, app.log);
// Initialize Iceberg client (for durable storage)
// const icebergClient = new IcebergClient(config.iceberg, app.log);
app.log.info({
redis: config.redisUrl,
qdrant: config.qdrant.url,
iceberg: config.iceberg.catalogUri,
embeddingProvider: config.embedding.provider,
}, 'Harness storage clients configured');
// Initialize Kubernetes client and container manager
const k8sClient = new KubernetesClient({
namespace: config.kubernetes.namespace,
@@ -86,41 +247,170 @@ const containerManager = new ContainerManager({
namespace: config.kubernetes.namespace,
logger: app.log,
});
app.log.debug('Container manager initialized');
const authenticator = new Authenticator({
userService,
containerManager,
logger: app.log,
});
app.log.debug('Authenticator initialized');
// Initialize event system
const sessionRegistry = new SessionRegistry();
app.log.debug('Session registry initialized');
const deliveryService = new DeliveryService({
telegramBotToken: config.telegramBotToken,
emailServiceKey: config.emailServiceKey,
emailFromAddress: config.emailFromAddress,
pushServiceKey: config.pushServiceKey,
logger: app.log,
});
app.log.debug('Delivery service initialized');
const eventSubscriber = new EventSubscriber(sessionRegistry, app.log);
app.log.debug('Event subscriber initialized');
const eventRouter = new EventRouter({
sessions: sessionRegistry,
delivery: deliveryService,
logger: app.log,
bindEndpoint: config.eventRouterBind,
});
app.log.debug('Event router initialized');
// Initialize channel handlers
const websocketHandler = new WebSocketHandler({
authenticator,
providerConfig: config.providerConfig,
sessionRegistry,
eventSubscriber,
});
app.log.debug('WebSocket handler initialized');
const telegramHandler = new TelegramHandler({
authenticator,
providerConfig: config.providerConfig,
telegramBotToken: config.telegramBotToken,
});
app.log.debug('Telegram handler initialized');
// Initialize auth routes
app.log.debug('Initializing auth routes...');
const authRoutes = new AuthRoutes({
authService,
betterAuth,
});
// Register routes
app.log.debug('Registering auth routes...');
try {
authRoutes.register(app);
app.log.debug('Auth routes registered successfully');
} catch (error: any) {
app.log.error({ error, message: error.message, stack: error.stack }, 'Failed to register auth routes');
throw error;
}
app.log.debug('Registering websocket handler...');
websocketHandler.register(app);
app.log.debug('Registering telegram handler...');
telegramHandler.register(app);
app.log.debug('All routes registered');
// Health check
app.get('/health', async () => {
return {
const health: any = {
status: 'ok',
timestamp: new Date().toISOString(),
activeSessions: sessionRegistry.size(),
eventSubscriptions: eventSubscriber.getSubscriptionCount(),
processedEvents: eventRouter.getProcessedEventCount(),
};
// Add RAG stats if available
if (app.hasDecorator('ragRetriever')) {
try {
const ragStats = await (app as any).ragRetriever.getStats();
health.rag = {
vectorCount: ragStats.vectorCount,
indexedCount: ragStats.indexedCount,
};
} catch (error) {
// Ignore errors in health check
}
}
return health;
});
// Admin endpoints
app.post('/admin/reload-knowledge', async (_request, reply) => {
if (!app.hasDecorator('documentLoader')) {
return reply.code(503).send({
error: 'Document loader not initialized',
});
}
try {
app.log.info('Manual knowledge reload requested');
const stats = await (app as any).documentLoader.loadAll();
return {
success: true,
stats,
timestamp: new Date().toISOString(),
};
} catch (error: any) {
app.log.error({ error }, 'Failed to reload knowledge');
return reply.code(500).send({
error: 'Failed to reload knowledge',
message: error.message,
});
}
});
app.get('/admin/knowledge-stats', async (_request, reply) => {
if (!app.hasDecorator('documentLoader')) {
return reply.code(503).send({
error: 'Document loader not initialized',
});
}
try {
const loaderStats = (app as any).documentLoader.getStats();
const ragStats = await (app as any).ragRetriever.getStats();
return {
loader: loaderStats,
rag: {
vectorCount: ragStats.vectorCount,
indexedCount: ragStats.indexedCount,
collectionSize: ragStats.collectionSize,
},
timestamp: new Date().toISOString(),
};
} catch (error: any) {
app.log.error({ error }, 'Failed to get knowledge stats');
return reply.code(500).send({
error: 'Failed to get knowledge stats',
message: error.message,
});
}
});
// Graceful shutdown
const shutdown = async () => {
app.log.info('Shutting down gracefully...');
try {
// Stop event system first
await eventSubscriber.stop();
await eventRouter.stop();
// Disconnect Redis
redis.disconnect();
await userService.close();
await app.close();
app.log.info('Shutdown complete');
@@ -136,6 +426,61 @@ process.on('SIGINT', shutdown);
// Start server
try {
app.log.debug('Starting server initialization...');
// Connect to Redis
app.log.debug('Connecting to Redis...');
await redis.connect();
app.log.info('Redis connected');
// Initialize Qdrant collection
app.log.debug('Initializing Qdrant...');
try {
await qdrantClient.initialize();
app.log.info('Qdrant collection initialized');
} catch (error) {
app.log.warn({ error }, 'Qdrant initialization failed - RAG will not be available');
}
// Initialize RAG system and load global knowledge
app.log.debug('Initializing RAG system...');
try {
// Initialize embedding service
const embeddingService = new EmbeddingService(config.embedding, app.log);
const vectorDimension = embeddingService.getDimensions();
// Initialize RAG retriever
const ragRetriever = new RAGRetriever(config.qdrant, app.log, vectorDimension);
await ragRetriever.initialize();
// Initialize document loader
const knowledgeDir = join(__dirname, '..', 'knowledge');
const documentLoader = new DocumentLoader(
{ knowledgeDir },
embeddingService,
ragRetriever,
app.log
);
// Load all knowledge documents
const loadStats = await documentLoader.loadAll();
app.log.info(loadStats, 'Global knowledge loaded into RAG');
// Store references for admin endpoints
app.decorate('documentLoader', documentLoader);
app.decorate('ragRetriever', ragRetriever);
} catch (error) {
app.log.warn({ error }, 'Failed to load global knowledge - RAG will use existing data');
}
// Start event system
app.log.debug('Starting event subscriber...');
await eventSubscriber.start();
app.log.debug('Starting event router...');
await eventRouter.start();
app.log.debug('Event system started');
app.log.debug('Starting Fastify server...');
await app.listen({
port: config.port,
host: config.host,
@@ -145,6 +490,9 @@ try {
{
port: config.port,
host: config.host,
eventRouterBind: config.eventRouterBind,
redis: config.redisUrl,
qdrant: config.qdrant.url,
},
'Gateway server started'
);

View File

@@ -0,0 +1,262 @@
import type { FastifyInstance, FastifyRequest, FastifyReply } from 'fastify';
import type { AuthService } from '../auth/auth-service.js';
import type { BetterAuthInstance } from '../auth/better-auth-config.js';
export interface AuthRoutesConfig {
authService: AuthService;
betterAuth: BetterAuthInstance;
}
/**
* Authentication routes for user registration, login, and passkey management
*/
export class AuthRoutes {
private config: AuthRoutesConfig;
constructor(config: AuthRoutesConfig) {
this.config = config;
}
/**
* Register authentication routes
*/
register(app: FastifyInstance): void {
// Better Auth provides built-in routes via its handler
// Mount Better Auth's automatic routes at /api/auth/*
app.all('/api/auth/*', async (request: FastifyRequest, reply: FastifyReply) => {
// Better Auth handler processes the request
const response = await this.config.betterAuth.handler(request.raw as any);
// Forward the response to Fastify
reply.status(response.status);
response.headers.forEach((value, key) => {
reply.header(key, value);
});
return response.body;
});
// Custom routes for integration with existing system
/**
* Register new user with email and password
* POST /auth/register
*/
app.post(
'/auth/register',
{
schema: {
body: {
type: 'object',
required: ['email', 'password'],
properties: {
email: { type: 'string', format: 'email' },
password: { type: 'string', minLength: 8 },
name: { type: 'string' },
},
},
},
},
async (request, reply) => {
const { email, password, name } = request.body as {
email: string;
password: string;
name?: string;
};
const result = await this.config.authService.createUser(email, password, name);
if (result.error) {
return reply.code(400).send({
error: result.error,
});
}
// Ensure user has a license
await this.config.authService.ensureUserLicense(result.userId, email);
// Auto sign in after registration
const signInResult = await this.config.authService.signIn(email, password);
if (signInResult.error) {
return reply.code(500).send({
error: 'User created but auto sign-in failed',
});
}
return {
success: true,
userId: result.userId,
token: signInResult.token,
};
}
);
/**
* Sign in with email and password
* POST /auth/login
*/
app.post(
'/auth/login',
{
schema: {
body: {
type: 'object',
required: ['email', 'password'],
properties: {
email: { type: 'string', format: 'email' },
password: { type: 'string' },
},
},
},
},
async (request, reply) => {
const { email, password } = request.body as {
email: string;
password: string;
};
const result = await this.config.authService.signIn(email, password);
if (result.error) {
return reply.code(401).send({
error: result.error,
});
}
return {
success: true,
userId: result.userId,
token: result.token,
};
}
);
/**
* Sign out
* POST /auth/logout
*/
app.post('/auth/logout', async (request, reply) => {
const token = this.extractBearerToken(request);
if (!token) {
return reply.code(401).send({
error: 'No token provided',
});
}
const result = await this.config.authService.signOut(token);
return {
success: result.success,
};
});
/**
* Get current session
* GET /auth/session
*/
app.get('/auth/session', async (request, reply) => {
const token = this.extractBearerToken(request);
if (!token) {
return reply.code(401).send({
error: 'No token provided',
});
}
const session = await this.config.authService.getSession(token);
if (!session) {
return reply.code(401).send({
error: 'Invalid or expired session',
});
}
return {
user: session.user,
session: session.session,
};
});
// Passkey support disabled for now (beta - email/password only)
// /**
// * Passkey registration - generate options
// * POST /auth/passkey/register/options
// */
// app.post('/auth/passkey/register/options', async (request, reply) => {
// const token = this.extractBearerToken(request);
//
// if (!token) {
// return reply.code(401).send({
// error: 'Authentication required',
// });
// }
//
// try {
// const options = await this.config.authService.registerPasskey('', token);
// return options;
// } catch (error: any) {
// app.log.error({ error }, 'Passkey registration options failed');
// return reply.code(500).send({
// error: error.message || 'Failed to generate passkey options',
// });
// }
// });
//
// /**
// * Passkey authentication
// * POST /auth/passkey/authenticate
// */
// app.post(
// '/auth/passkey/authenticate',
// {
// schema: {
// body: {
// type: 'object',
// required: ['credential'],
// },
// },
// },
// async (request, reply) => {
// const { credential } = request.body as { credential: any };
//
// const result = await this.config.authService.authenticatePasskey(credential);
//
// if (result.error) {
// return reply.code(401).send({
// error: result.error,
// });
// }
//
// return {
// success: true,
// userId: result.userId,
// token: result.token,
// };
// }
// );
/**
* Health check for auth system
* GET /auth/health
*/
app.get('/auth/health', async () => {
return {
status: 'ok',
timestamp: new Date().toISOString(),
authProvider: 'better-auth',
};
});
}
/**
* Extract bearer token from request headers
*/
private extractBearerToken(request: FastifyRequest): string | null {
const auth = request.headers.authorization;
if (!auth || !auth.startsWith('Bearer ')) {
return null;
}
return auth.substring(7);
}
}

View File

@@ -13,20 +13,25 @@ const StrategyAnalysisState = Annotation.Root({
// Analysis steps
codeReview: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
backtestResults: Annotation<Record<string, unknown> | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
riskAssessment: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
humanApproved: Annotation<boolean>({
value: (left, right) => right ?? left,
default: () => false,
}),
// Final output
recommendation: Annotation<string | null>({
value: (left, right) => right ?? left,
default: () => null,
}),
});
@@ -98,7 +103,7 @@ Focus on: drawdown, win rate, Sharpe ratio, position sizing, and risk of ruin.`;
};
// Node: Human Approval (placeholder - would integrate with UI)
const humanApprovalNode = async (state: StrategyAnalysisStateType) => {
const humanApprovalNode = async (_state: StrategyAnalysisStateType) => {
logger.info('Strategy workflow: Awaiting human approval');
// In real implementation, this would pause and wait for user input