client-py connected

This commit is contained in:
2026-03-27 16:33:40 -04:00
parent c76887ab92
commit c3a8fae132
55 changed files with 1598 additions and 426 deletions

1
.idea/ai.iml generated
View File

@@ -16,6 +16,7 @@
<excludeFolder url="file://$MODULE_DIR$/ingestor/src/proto" />
<excludeFolder url="file://$MODULE_DIR$/relay/protobuf" />
<excludeFolder url="file://$MODULE_DIR$/relay/target" />
<excludeFolder url="file://$MODULE_DIR$/doc/competition" />
</content>
<orderEntry type="jdk" jdkName="Python 3.12 (ai)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />

View File

@@ -151,14 +151,12 @@ rebuild_images() {
if [ "$service" == "all" ] || [ "$service" == "gateway" ]; then
echo -e "${GREEN}→${NC} Building gateway..."
GATEWAY_TAG=$(build_and_get_tag gateway) || exit 1
docker tag "dexorder/ai-gateway:$GATEWAY_TAG" "dexorder/gateway:$GATEWAY_TAG"
fi
# Build lifecycle-sidecar (Go binary)
if [ "$service" == "all" ] || [ "$service" == "lifecycle-sidecar" ] || [ "$service" == "sidecar" ]; then
echo -e "${GREEN}→${NC} Building lifecycle-sidecar..."
SIDECAR_TAG=$(build_and_get_tag lifecycle-sidecar) || exit 1
docker tag "dexorder/ai-lifecycle-sidecar:$SIDECAR_TAG" "lifecycle-sidecar:$SIDECAR_TAG"
fi
# Build web (Vue.js application)
@@ -399,7 +397,7 @@ create_dev_user() {
'http://localhost:8080/mcp',
'{\"maxIndicators\":50,\"maxStrategies\":20,\"maxBacktestDays\":365,\"realtimeData\":true,\"customExecutors\":true,\"apiAccess\":true}',
'{\"maxConcurrentSessions\":5,\"maxMessagesPerDay\":1000,\"maxTokensPerMessage\":8192,\"rateLimitPerMinute\":60}',
'{\"provider\":\"anthropic\",\"model\":\"claude-3-5-sonnet-20241022\",\"temperature\":0.7}'
'{\"provider\":\"anthropic\",\"model\":\"claude-sonnet-4-6\",\"temperature\":0.7}'
)
ON CONFLICT (user_id) DO UPDATE SET
license_type = EXCLUDED.license_type,
@@ -651,6 +649,9 @@ case "$COMMAND" in
kubectl wait --for=delete pod -l app=qdrant --timeout=60s 2>/dev/null || true
# Now delete PVCs
delete_pvcs all
# Delete dexorder-agents namespace
echo -e "${GREEN}→${NC} Deleting dexorder-agents namespace..."
kubectl delete namespace dexorder-agents 2>/dev/null || true
minikube stop
echo -e "${GREEN}✓ Minikube stopped and PVCs deleted${NC}"
echo -e "${YELLOW}Tip: Use 'bin/dev stop --keep-data' to preserve PVCs${NC}"

View File

@@ -68,6 +68,9 @@ ENV PYTHONUNBUFFERED=1 \
ZMQ_XPUB_PORT=5570 \
ZMQ_GATEWAY_ENDPOINT=tcp://gateway:5571 \
MCP_SERVER_NAME=dexorder-user \
MCP_TRANSPORT=sse \
MCP_HTTP_PORT=3000 \
MCP_HTTP_HOST=0.0.0.0 \
IDLE_TIMEOUT_MINUTES=15 \
ENABLE_IDLE_SHUTDOWN=true

View File

@@ -11,6 +11,10 @@ Provides high-level APIs for:
__version__ = "0.1.0"
# Suppress the protobuf warning - it's handled at import time
import warnings
warnings.filterwarnings('ignore', message='Warning: Protobuf files not found')
from .ohlc_client import OHLCClient
from .iceberg_client import IcebergClient
from .history_client import HistoryClient

View File

@@ -0,0 +1,559 @@
"""
Category-based File Management Tools for MCP Server
Provides write/edit/read/list tools for categorized Python scripts with automatic
directory structure, metadata management, and validation harnesses.
Categories:
- strategy: Trading strategies with specific data feed requirements
- indicator: Technical indicators with configurable parameters
- research: Research scripts with pyplot visualization support
File Structure:
{DATA_DIR}/{category}/{sanitized_name}/
- implementation.py # Python implementation
- metadata.json # Category-specific metadata (includes name and description)
After write/edit operations, a category-specific test harness runs to validate
the code and capture errors/output for agent feedback.
"""
import json
import logging
import re
import subprocess
import sys
from dataclasses import dataclass, asdict
from enum import Enum
from pathlib import Path
from typing import Any, Optional
log = logging.getLogger(__name__)
# =============================================================================
# Categories and Metadata
# =============================================================================
class Category(str, Enum):
"""Supported file categories."""
STRATEGY = "strategy"
INDICATOR = "indicator"
RESEARCH = "research"
@dataclass
class BaseMetadata:
"""Base metadata for all categories."""
name: str # Display name (can have special chars)
description: str # LLM-generated description
@dataclass
class StrategyMetadata(BaseMetadata):
"""Metadata for trading strategies."""
data_feeds: list[str] = None # Required data feeds (e.g., ["BTC/USD", "ETH/USD"])
def __post_init__(self):
if self.data_feeds is None:
self.data_feeds = []
@dataclass
class IndicatorMetadata(BaseMetadata):
"""Metadata for technical indicators."""
default_length: int = 14 # Default period/length parameter
@dataclass
class ResearchMetadata(BaseMetadata):
"""Metadata for research scripts."""
# Future: data_sources, dependencies, etc.
pass
# Metadata class registry
METADATA_CLASSES = {
Category.STRATEGY: StrategyMetadata,
Category.INDICATOR: IndicatorMetadata,
Category.RESEARCH: ResearchMetadata,
}
# =============================================================================
# Utilities
# =============================================================================
def sanitize_name(name: str) -> str:
"""
Sanitize a name for use as a directory name.
Converts special characters to underscores and preserves alphanumerics.
Examples:
"Tim's 5/13 EMA+" -> "Tims_5_13_EMA"
"My Strategy (v2)" -> "My_Strategy_v2"
"""
# Replace non-alphanumeric chars (except spaces/hyphens) with underscores
sanitized = re.sub(r'[^\w\s\-]', '_', name)
# Replace spaces and hyphens with underscores
sanitized = re.sub(r'[\s\-]+', '_', sanitized)
# Remove duplicate underscores
sanitized = re.sub(r'_+', '_', sanitized)
# Strip leading/trailing underscores
sanitized = sanitized.strip('_')
return sanitized
def get_category_path(data_dir: Path, category: Category, name: str) -> Path:
"""
Get the directory path for a category item.
Args:
data_dir: Base data directory
category: Category enum
name: Display name (will be sanitized)
Returns:
Path to the category item directory
"""
safe_name = sanitize_name(name)
return data_dir / category.value / safe_name
# =============================================================================
# Category File Manager
# =============================================================================
class CategoryFileManager:
"""
Manages category-based file operations with validation.
"""
def __init__(self, data_dir: Path):
self.data_dir = data_dir
# Ensure category directories exist
for category in Category:
(data_dir / category.value).mkdir(parents=True, exist_ok=True)
def write(
self,
category: str,
name: str,
description: str,
code: str,
metadata: Optional[dict] = None
) -> dict[str, Any]:
"""
Write a new category file with metadata.
Args:
category: Category name (strategy, indicator, research)
name: Display name for the item
description: LLM-generated description (required)
code: Python implementation code
metadata: Additional category-specific metadata fields
Returns:
dict with:
- success: bool
- path: str - path to the implementation file
- validation: dict - results from test harness
- error: str (if any)
"""
try:
cat = Category(category)
except ValueError:
return {
"success": False,
"error": f"Invalid category '{category}'. Must be one of: {', '.join(c.value for c in Category)}"
}
# Get item directory
item_dir = get_category_path(self.data_dir, cat, name)
item_dir.mkdir(parents=True, exist_ok=True)
# Write implementation
impl_path = item_dir / "implementation.py"
try:
impl_path.write_text(code)
log.info(f"Wrote {cat.value} implementation: {impl_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write implementation: {e}"}
# Build metadata
meta_dict = metadata or {}
meta_dict["name"] = name
meta_dict["description"] = description
# Validate and write metadata
try:
metadata_class = METADATA_CLASSES[cat]
meta_obj = metadata_class(**meta_dict)
validated_meta = asdict(meta_obj)
meta_path = item_dir / "metadata.json"
meta_path.write_text(json.dumps(validated_meta, indent=2))
log.info(f"Wrote metadata: {meta_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write metadata: {e}"}
# Run validation harness
validation = self._validate(cat, item_dir)
return {
"success": validation["success"],
"path": str(impl_path),
"validation": validation,
}
def edit(
self,
category: str,
name: str,
code: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[dict] = None
) -> dict[str, Any]:
"""
Edit an existing category file.
Args:
category: Category name
name: Display name for the item
code: Python implementation code (optional, omit to keep existing)
description: Updated description (optional, omit to keep existing)
metadata: Additional metadata updates (optional)
Returns:
dict with:
- success: bool
- path: str - path to the implementation file
- validation: dict - results from test harness (if code updated)
- error: str (if any)
"""
try:
cat = Category(category)
except ValueError:
return {"success": False, "error": f"Invalid category '{category}'"}
item_dir = get_category_path(self.data_dir, cat, name)
if not item_dir.exists():
return {"success": False, "error": f"Item '{name}' does not exist in category '{category}'"}
impl_path = item_dir / "implementation.py"
meta_path = item_dir / "metadata.json"
# Load existing metadata
try:
existing_meta = {}
if meta_path.exists():
existing_meta = json.loads(meta_path.read_text())
except Exception as e:
return {"success": False, "error": f"Failed to read existing metadata: {e}"}
# Update code if provided
if code is not None:
try:
impl_path.write_text(code)
log.info(f"Updated {cat.value} implementation: {impl_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write implementation: {e}"}
# Update metadata
updated_meta = existing_meta.copy()
if description is not None:
updated_meta["description"] = description
if metadata:
updated_meta.update(metadata)
# Validate and write metadata
try:
metadata_class = METADATA_CLASSES[cat]
meta_obj = metadata_class(**updated_meta)
validated_meta = asdict(meta_obj)
meta_path.write_text(json.dumps(validated_meta, indent=2))
log.info(f"Updated metadata: {meta_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write metadata: {e}"}
# Run validation harness if code was updated
validation = None
if code is not None:
validation = self._validate(cat, item_dir)
result = {
"success": True,
"path": str(impl_path),
}
if validation:
result["validation"] = validation
result["success"] = validation["success"]
return result
def read(
self,
category: str,
name: str
) -> dict[str, Any]:
"""
Read a category file and its metadata.
Returns:
dict with:
- exists: bool
- code: str - implementation code
- metadata: dict - metadata
- error: str (if any)
"""
try:
cat = Category(category)
except ValueError:
return {"exists": False, "error": f"Invalid category '{category}'"}
item_dir = get_category_path(self.data_dir, cat, name)
if not item_dir.exists():
return {"exists": False}
impl_path = item_dir / "implementation.py"
meta_path = item_dir / "metadata.json"
try:
code = impl_path.read_text() if impl_path.exists() else ""
metadata = {}
if meta_path.exists():
metadata = json.loads(meta_path.read_text())
return {
"exists": True,
"code": code,
"metadata": metadata,
}
except Exception as e:
return {"exists": False, "error": str(e)}
def list_items(self, category: str) -> dict[str, Any]:
"""
List all items in a category with names and descriptions.
Returns:
dict with:
- items: list of dicts with name, description, and full metadata
- error: str (if any)
"""
try:
cat = Category(category)
except ValueError:
return {"error": f"Invalid category '{category}'"}
cat_dir = self.data_dir / cat.value
items = []
for item_dir in cat_dir.iterdir():
if not item_dir.is_dir():
continue
meta_path = item_dir / "metadata.json"
if meta_path.exists():
try:
metadata = json.loads(meta_path.read_text())
items.append({
"name": metadata.get("name", item_dir.name),
"description": metadata.get("description", ""),
"metadata": metadata,
})
except Exception as e:
log.error(f"Failed to read metadata for {item_dir}: {e}")
return {"items": items}
def _validate(self, category: Category, item_dir: Path) -> dict[str, Any]:
"""
Run category-specific validation harness.
Returns:
dict with:
- success: bool
- output: str - stdout/stderr from validation
- images: list[dict] - base64-encoded images (for research)
- error: str (if any)
"""
impl_path = item_dir / "implementation.py"
if category == Category.STRATEGY:
return self._validate_strategy(impl_path)
elif category == Category.INDICATOR:
return self._validate_indicator(impl_path)
elif category == Category.RESEARCH:
return self._validate_research(impl_path, item_dir)
else:
return {"success": False, "error": f"No validator for category {category}"}
def _validate_strategy(self, impl_path: Path) -> dict[str, Any]:
"""
Validate a strategy implementation.
Runs basic syntax check and imports.
"""
try:
result = subprocess.run(
[sys.executable, "-m", "py_compile", str(impl_path)],
capture_output=True,
text=True,
timeout=10,
)
if result.returncode == 0:
return {
"success": True,
"output": "Strategy syntax valid",
}
else:
return {
"success": False,
"output": result.stderr,
"error": "Syntax error in strategy",
}
except subprocess.TimeoutExpired:
return {"success": False, "error": "Validation timeout"}
except Exception as e:
return {"success": False, "error": f"Validation failed: {e}"}
def _validate_indicator(self, impl_path: Path) -> dict[str, Any]:
"""
Validate an indicator implementation.
Runs basic syntax check and imports.
"""
try:
result = subprocess.run(
[sys.executable, "-m", "py_compile", str(impl_path)],
capture_output=True,
text=True,
timeout=10,
)
if result.returncode == 0:
return {
"success": True,
"output": "Indicator syntax valid",
}
else:
return {
"success": False,
"output": result.stderr,
"error": "Syntax error in indicator",
}
except subprocess.TimeoutExpired:
return {"success": False, "error": "Validation timeout"}
except Exception as e:
return {"success": False, "error": f"Validation failed: {e}"}
def _validate_research(self, impl_path: Path, item_dir: Path) -> dict[str, Any]:
"""
Validate a research script.
Runs the script and captures output + pyplot images.
"""
# Create a wrapper script that captures pyplot figures
wrapper_code = f"""
import sys
import io
import base64
import json
from pathlib import Path
import matplotlib
matplotlib.use('Agg') # Non-interactive backend
import matplotlib.pyplot as plt
# Capture stdout
old_stdout = sys.stdout
sys.stdout = io.StringIO()
# Run user code
user_code_path = Path(r"{impl_path}")
try:
exec(compile(user_code_path.read_text(), user_code_path, 'exec'), {{}})
except Exception as e:
print(f"ERROR: {{e}}", file=sys.stderr)
sys.exit(1)
# Get stdout
output = sys.stdout.getvalue()
sys.stdout = old_stdout
# Capture all pyplot figures as base64 PNGs
images = []
for fig_num in plt.get_fignums():
fig = plt.figure(fig_num)
buf = io.BytesIO()
fig.savefig(buf, format='png', dpi=100, bbox_inches='tight')
buf.seek(0)
img_b64 = base64.b64encode(buf.read()).decode('utf-8')
images.append({{"format": "png", "data": img_b64}})
buf.close()
plt.close('all')
# Output results as JSON
result = {{
"output": output,
"images": images,
}}
print(json.dumps(result))
"""
try:
result = subprocess.run(
[sys.executable, "-c", wrapper_code],
capture_output=True,
text=True,
timeout=30,
cwd=str(item_dir),
)
if result.returncode == 0:
try:
data = json.loads(result.stdout)
return {
"success": True,
"output": data["output"],
"images": data["images"],
}
except json.JSONDecodeError:
return {
"success": True,
"output": result.stdout,
"images": [],
}
else:
return {
"success": False,
"output": result.stderr,
"error": "Research script execution failed",
}
except subprocess.TimeoutExpired:
return {"success": False, "error": "Research script timeout"}
except Exception as e:
return {"success": False, "error": f"Validation failed: {e}"}
# =============================================================================
# Global Manager Instance
# =============================================================================
_category_manager: Optional[CategoryFileManager] = None
def get_category_manager(data_dir: Optional[Path] = None) -> CategoryFileManager:
"""Get or create the global category file manager."""
global _category_manager
if _category_manager is None:
if data_dir is None:
raise ValueError("data_dir required for first initialization")
_category_manager = CategoryFileManager(data_dir)
return _category_manager

View File

@@ -16,12 +16,11 @@ import struct
import sys
import os
# Import protobuf messages (assuming they're generated in ../protobuf)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../protobuf'))
# Import protobuf messages from generated package
try:
from ingestor_pb2 import SubmitHistoricalRequest, SubmitResponse, HistoryReadyNotification
from dexorder.generated.ingestor_pb2 import SubmitHistoricalRequest, SubmitResponse, HistoryReadyNotification
except ImportError:
print("Warning: Protobuf files not found. Run: protoc -I ../protobuf --python_out=../protobuf ../protobuf/*.proto")
print("Warning: Protobuf files not found. Run: protoc --python_out=dexorder/generated --proto_path=protobuf protobuf/*.proto")
raise

View File

@@ -205,26 +205,36 @@ class LifecycleManager:
_lifecycle_manager: Optional[LifecycleManager] = None
def get_lifecycle_manager() -> LifecycleManager:
def get_lifecycle_manager(
idle_timeout_minutes: Optional[int] = None,
enable_shutdown: Optional[bool] = None,
) -> LifecycleManager:
"""Get or create the global lifecycle manager instance."""
global _lifecycle_manager
if _lifecycle_manager is None:
# Load configuration from environment
idle_timeout = int(os.environ.get("IDLE_TIMEOUT_MINUTES", "15"))
# Load configuration from environment or use provided values
idle_timeout = idle_timeout_minutes if idle_timeout_minutes is not None else int(os.environ.get("IDLE_TIMEOUT_MINUTES", "15"))
check_interval = int(os.environ.get("IDLE_CHECK_INTERVAL_SECONDS", "60"))
enable_shutdown = os.environ.get("ENABLE_IDLE_SHUTDOWN", "true").lower() == "true"
enable = enable_shutdown if enable_shutdown is not None else os.environ.get("ENABLE_IDLE_SHUTDOWN", "true").lower() == "true"
_lifecycle_manager = LifecycleManager(
idle_timeout_minutes=idle_timeout,
check_interval_seconds=check_interval,
enable_shutdown=enable_shutdown,
enable_shutdown=enable,
)
return _lifecycle_manager
async def start_lifecycle_manager() -> LifecycleManager:
async def start_lifecycle_manager(
user_id: Optional[str] = None,
idle_timeout_minutes: Optional[int] = None,
enable_idle_shutdown: Optional[bool] = None,
) -> LifecycleManager:
"""Initialize and start the lifecycle manager."""
manager = get_lifecycle_manager()
manager = get_lifecycle_manager(
idle_timeout_minutes=idle_timeout_minutes,
enable_shutdown=enable_idle_shutdown,
)
manager.setup_signal_handlers()
await manager.start()
return manager

View File

@@ -18,12 +18,20 @@ from pathlib import Path
from typing import Optional
import yaml
import uvicorn
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.server.sse import SseServerTransport
from starlette.applications import Starlette
from starlette.routing import Route, Mount
from starlette.requests import Request
from starlette.responses import Response
from sse_starlette import EventSourceResponse
from dexorder import EventPublisher, start_lifecycle_manager, get_lifecycle_manager
from dexorder.events import EventType, UserEvent, DeliverySpec
from dexorder.api.workspace_tools import get_workspace_store
from dexorder.api.category_tools import get_category_manager
# =============================================================================
@@ -68,6 +76,9 @@ class Config:
# MCP server settings
self.mcp_server_name: str = os.getenv("MCP_SERVER_NAME", "dexorder-user")
self.mcp_transport: str = os.getenv("MCP_TRANSPORT", "sse") # "stdio" or "sse"
self.mcp_http_port: int = int(os.getenv("MCP_HTTP_PORT", "3000"))
self.mcp_http_host: str = os.getenv("MCP_HTTP_HOST", "0.0.0.0")
# Lifecycle settings
self.idle_timeout_minutes: int = int(os.getenv("IDLE_TIMEOUT_MINUTES", "15"))
@@ -134,6 +145,10 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
workspace_store = get_workspace_store(config.workspace_dir)
logging.info(f"Workspace store initialized at {config.workspace_dir}")
# Initialize category file manager
category_manager = get_category_manager(config.data_dir)
logging.info(f"Category manager initialized at {config.data_dir}")
@server.list_resources()
async def list_resources():
"""List available resources"""
@@ -143,6 +158,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
"name": "Hello World",
"description": "A simple hello world resource",
"mimeType": "text/plain",
"annotations": {
"agent_accessible": True, # Available to agent for ad-hoc queries
}
}
]
@@ -170,7 +188,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
@server.list_tools()
async def list_tools():
"""List available tools including workspace tools"""
"""List available tools including workspace and category tools"""
return [
{
"name": "workspace_read",
@@ -184,6 +202,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
},
"required": ["store_name"]
},
"annotations": {
"agent_accessible": True, # Agent can read workspace stores
}
},
{
@@ -201,6 +222,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
},
"required": ["store_name", "data"]
},
"annotations": {
"agent_accessible": True, # Agent can write workspace stores
}
},
{
@@ -228,13 +252,124 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
}
},
"required": ["store_name", "patch"]
},
"annotations": {
"agent_accessible": True, # Agent can patch workspace stores
}
},
{
"name": "category_write",
"description": "Write a new strategy, indicator, or research script with validation",
"inputSchema": {
"type": "object",
"properties": {
"category": {
"type": "string",
"enum": ["strategy", "indicator", "research"],
"description": "Category of the script"
},
"name": {
"type": "string",
"description": "Display name (can contain special characters)"
},
"description": {
"type": "string",
"description": "LLM-generated description of what this does (required)"
},
"code": {
"type": "string",
"description": "Python implementation code"
},
"metadata": {
"type": "object",
"description": "Optional category-specific metadata (e.g., default_length for indicators, data_feeds for strategies)"
}
},
"required": ["category", "name", "description", "code"]
},
"annotations": {
"agent_accessible": True,
}
},
{
"name": "category_edit",
"description": "Edit an existing category script (updates code, description, or metadata)",
"inputSchema": {
"type": "object",
"properties": {
"category": {
"type": "string",
"enum": ["strategy", "indicator", "research"],
"description": "Category of the script"
},
"name": {
"type": "string",
"description": "Display name of the existing item"
},
"code": {
"type": "string",
"description": "Updated Python code (optional, omit to keep existing)"
},
"description": {
"type": "string",
"description": "Updated description (optional, omit to keep existing)"
},
"metadata": {
"type": "object",
"description": "Updated metadata fields (optional)"
}
},
"required": ["category", "name"]
},
"annotations": {
"agent_accessible": True,
}
},
{
"name": "category_read",
"description": "Read a category script and its metadata",
"inputSchema": {
"type": "object",
"properties": {
"category": {
"type": "string",
"enum": ["strategy", "indicator", "research"],
"description": "Category of the script"
},
"name": {
"type": "string",
"description": "Display name of the item"
}
},
"required": ["category", "name"]
},
"annotations": {
"agent_accessible": True,
}
},
{
"name": "category_list",
"description": "List all items in a category with names and descriptions",
"inputSchema": {
"type": "object",
"properties": {
"category": {
"type": "string",
"enum": ["strategy", "indicator", "research"],
"description": "Category to list"
}
},
"required": ["category"]
},
"annotations": {
"agent_accessible": True,
}
}
]
@server.call_tool()
async def handle_tool_call(name: str, arguments: dict):
"""Handle tool calls including workspace tools"""
"""Handle tool calls including workspace and category tools"""
if name == "workspace_read":
return workspace_store.read(arguments.get("store_name", ""))
elif name == "workspace_write":
@@ -247,13 +382,78 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
arguments.get("store_name", ""),
arguments.get("patch", [])
)
elif name == "category_write":
return category_manager.write(
category=arguments.get("category", ""),
name=arguments.get("name", ""),
description=arguments.get("description", ""),
code=arguments.get("code", ""),
metadata=arguments.get("metadata")
)
elif name == "category_edit":
return category_manager.edit(
category=arguments.get("category", ""),
name=arguments.get("name", ""),
code=arguments.get("code"),
description=arguments.get("description"),
metadata=arguments.get("metadata")
)
elif name == "category_read":
return category_manager.read(
category=arguments.get("category", ""),
name=arguments.get("name", "")
)
elif name == "category_list":
return category_manager.list_items(
category=arguments.get("category", "")
)
else:
raise ValueError(f"Unknown tool: {name}")
logging.info(f"MCP server '{config.mcp_server_name}' created with workspace tools")
logging.info(f"MCP server '{config.mcp_server_name}' created with workspace and category tools")
return server
# =============================================================================
# SSE Transport Setup
# =============================================================================
def create_sse_app(mcp_server: Server) -> Starlette:
"""Create Starlette app with SSE endpoint for MCP"""
# Create SSE transport instance
sse = SseServerTransport("/messages/")
async def handle_sse(request: Request) -> Response:
"""Handle SSE connections for MCP"""
async with sse.connect_sse(
request.scope, request.receive, request._send
) as streams:
await mcp_server.run(
streams[0],
streams[1],
mcp_server.create_initialization_options()
)
return Response()
async def handle_health(request: Request) -> Response:
"""Health check endpoint for k8s probes and gateway readiness checks"""
return Response(
content='{"status":"ok"}',
media_type="application/json"
)
app = Starlette(
routes=[
Route("/sse", handle_sse),
Mount("/messages/", app=sse.handle_post_message),
Route("/health", handle_health),
]
)
return app
# =============================================================================
# Main Application
# =============================================================================
@@ -347,17 +547,34 @@ class UserContainer:
logging.info("User container stopped")
async def run(self) -> None:
"""Run the MCP server via stdio"""
"""Run the MCP server with configured transport"""
await self.start()
try:
# Run MCP server on stdio
async with stdio_server() as (read_stream, write_stream):
await self.mcp_server.run(
read_stream,
write_stream,
self.mcp_server.create_initialization_options()
if self.config.mcp_transport == "stdio":
# Run MCP server on stdio (for dev/testing)
logging.info("Starting MCP server with stdio transport")
async with stdio_server() as (read_stream, write_stream):
await self.mcp_server.run(
read_stream,
write_stream,
self.mcp_server.create_initialization_options()
)
elif self.config.mcp_transport == "sse":
# Run MCP server via HTTP/SSE (for production)
logging.info(f"Starting MCP server with SSE transport on {self.config.mcp_http_host}:{self.config.mcp_http_port}")
app = create_sse_app(self.mcp_server)
config = uvicorn.Config(
app,
host=self.config.mcp_http_host,
port=self.config.mcp_http_port,
log_level=os.getenv("LOG_LEVEL", "info").lower(),
access_log=True,
)
server = uvicorn.Server(config)
await server.serve()
else:
raise ValueError(f"Unknown MCP transport: {self.config.mcp_transport}")
finally:
await self.stop()

View File

@@ -14,8 +14,12 @@ setup(
"protobuf>=4.25.0",
"pyyaml>=6.0",
"aiofiles>=23.0.0",
"mcp>=0.9.0",
"mcp>=1.0.0",
"jsonpatch>=1.33",
"starlette>=0.27.0",
"uvicorn>=0.27.0",
"sse-starlette>=1.6.0",
"matplotlib>=3.7.0",
],
extras_require={
"dev": [

View File

@@ -22,8 +22,8 @@ spec:
memory: "2Gi"
cpu: "2000m"
min:
memory: "64Mi"
cpu: "50m"
memory: "32Mi"
cpu: "10m"
# PVC size limits
- type: PersistentVolumeClaim
max:

View File

@@ -25,8 +25,7 @@ spec:
c.image.startsWith('localhost:5000/dexorder/agent') ||
c.image.startsWith('dexorder/agent') ||
c.image.startsWith('dexorder/ai-client-py') ||
c.image.startsWith('ai-client-py') ||
c.image.startsWith('lifecycle-sidecar'))
c.image.startsWith('dexorder/ai-lifecycle-sidecar'))
message: "Only approved dexorder agent images are allowed"
reason: Forbidden

View File

@@ -25,15 +25,40 @@ data:
# Default model (if user has no preference)
defaults:
model_provider: anthropic
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
# License tier model configuration
license_models:
# Free tier models
free:
default: claude-haiku-4-5-20251001
cost_optimized: claude-haiku-4-5-20251001
complex: claude-haiku-4-5-20251001
allowed_models:
- claude-haiku-4-5-20251001
# Pro tier models
pro:
default: claude-sonnet-4-6
cost_optimized: claude-haiku-4-5-20251001
complex: claude-sonnet-4-6
blocked_models:
- claude-opus-4-6
# Enterprise tier models
enterprise:
default: claude-sonnet-4-6
cost_optimized: claude-haiku-4-5-20251001
complex: claude-opus-4-6
# Kubernetes configuration
kubernetes:
namespace: dexorder-agents
in_cluster: true
agent_image: ai-client-py:latest
sidecar_image: lifecycle-sidecar:latest
agent_image: dexorder/ai-client-py:latest
sidecar_image: dexorder/ai-lifecycle-sidecar:latest
storage_class: standard
image_pull_policy: Never # For minikube dev - use local images
# DragonflyDB (Redis-compatible, for hot storage and session management)
redis:

View File

@@ -8,7 +8,7 @@ spec:
spec:
containers:
- name: gateway
image: dexorder/gateway:latest
image: dexorder/ai-gateway:latest
imagePullPolicy: Never
env:
- name: NODE_OPTIONS

View File

@@ -132,6 +132,29 @@ generatorOptions:

View File

@@ -24,15 +24,16 @@ data:
# Default model (if user has no preference)
defaults:
model_provider: anthropic
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
# Kubernetes configuration
kubernetes:
namespace: dexorder-agents
in_cluster: true
agent_image: ghcr.io/dexorder/agent:latest
sidecar_image: ghcr.io/dexorder/lifecycle-sidecar:latest
agent_image: dexorder/ai-client-py:latest
sidecar_image: dexorder/ai-lifecycle-sidecar:latest
storage_class: standard
image_pull_policy: Always # For production - always pull from registry
# DragonflyDB (Redis-compatible, for hot storage and session management)
redis:

View File

@@ -0,0 +1,52 @@
Tell me about yourself and your capabilities
I'm **MMT Companion**, a crypto market assistant embedded directly in your MMT trading terminal.
Here's what I can do:
### Market Analysis
- Fetch and analyze **OHLCV candle data** across any timeframe
- Summarize **price action, structure, and key levels**
- Provide **market overviews** with funding rates, liquidations, and trade flow
### Order Flow & Depth
- Read **live orderbooks** and identify bid/ask walls
- Analyze **orderbook heatmaps** to track liquidity shifts over time
- Break down **cumulative volume delta (CVD)** by trade size bucket
### Positioning & Derivatives
- Track **open interest** trends and leverage buildup
- Monitor **funding rates** and liquidation clusters
### Volume & Profiling
- Build **volume profiles** to find POC, value area, and volume gaps
### Chart Drawing
- Draw **horizontal levels, rectangles, trendlines, and rays** directly on your chart
### Scripting
- Write, edit, and deploy **MMT scripts** in the built-in editor
---
I always work from your **live chart context** — currently `BTC/USD` on Binance Futures at `5m`. All data I pull is real-time and grounded in actual market feeds, never fabricated.
What would you like to analyze?
Are you able to write automated strategies and execute them? How about backtesting?
### Scripting — Yes. Execution & Backtesting — No.
**What I can do:**
- Write and edit **MMT scripts** — indicators, signals, overlays, alerts, and visual tools using the MMT scripting language
- Deploy scripts directly into your **MMT script editor** in the terminal
**What I cannot do:**
- **Automated execution** — I cannot place, manage, or cancel orders on your behalf. MMT Companion has no order routing capability.
- **Backtesting** — There is no backtesting engine available through MMT scripting or this assistant at this time.
---
In short, I can build powerful **analytical and signal scripts** that run on your live chart, but acting on those signals (trading) remains manual on your end. If you have a strategy idea you want visualized or signaled in real-time, I can script that for you.

View File

@@ -210,7 +210,7 @@ async handleMessage(message: InboundMessage): Promise<OutboundMessage> {
// 5. Call Claude with embedded context
const response = await anthropic.messages.create({
model: 'claude-3-5-sonnet-20241022',
model: 'claude-sonnet-4-6',
system: systemPrompt, // <-- User profile + workspace + custom prompt
messages, // <-- Conversation summary from RAG
tools,

View File

@@ -22,7 +22,7 @@ OPENROUTER_API_KEY=sk-or-xxxxx
# Default model (if user has no preference)
DEFAULT_MODEL_PROVIDER=anthropic
DEFAULT_MODEL=claude-3-5-sonnet-20241022
DEFAULT_MODEL=claude-sonnet-4-6
# Telegram (optional)
TELEGRAM_BOT_TOKEN=

View File

@@ -32,7 +32,7 @@ const factory = new LLMProviderFactory(config, logger);
// Create any model
const claude = factory.createModel({
provider: 'anthropic',
model: 'claude-3-5-sonnet-20241022',
model: 'claude-sonnet-4-6',
});
const gpt4 = factory.createModel({
@@ -205,7 +205,7 @@ const conversationSummary = await mcpClient.readResource('context://conversation
{
"preferredModel": {
"provider": "anthropic",
"model": "claude-3-5-sonnet-20241022"
"model": "claude-sonnet-4-6"
}
}

View File

@@ -120,7 +120,7 @@ ANTHROPIC_API_KEY=sk-ant-xxxxx
# Optional: Set default model
DEFAULT_MODEL_PROVIDER=anthropic
DEFAULT_MODEL=claude-3-5-sonnet-20241022
DEFAULT_MODEL=claude-sonnet-4-6
```
4. Start Ollama and pull embedding model:

View File

@@ -19,7 +19,31 @@ database:
# Default model (if user has no preference)
defaults:
model_provider: anthropic
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
# License tier model configuration
license_models:
# Free tier models
free:
default: claude-haiku-4-5-20251001
cost_optimized: claude-haiku-4-5-20251001
complex: claude-haiku-4-5-20251001
allowed_models:
- claude-haiku-4-5-20251001
# Pro tier models
pro:
default: claude-sonnet-4-6
cost_optimized: claude-haiku-4-5-20251001
complex: claude-sonnet-4-6
blocked_models:
- claude-opus-4-6
# Enterprise tier models
enterprise:
default: claude-sonnet-4-6
cost_optimized: claude-haiku-4-5-20251001
complex: claude-opus-4-6
# Kubernetes configuration
kubernetes:

View File

@@ -88,7 +88,7 @@ CREATE TABLE IF NOT EXISTS user_licenses (
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
COMMENT ON COLUMN user_licenses.preferred_model IS 'Optional model preference: {"provider": "anthropic", "model": "claude-3-5-sonnet-20241022", "temperature": 0.7}';
COMMENT ON COLUMN user_licenses.preferred_model IS 'Optional model preference: {"provider": "anthropic", "model": "claude-sonnet-4-6", "temperature": 0.7}';
CREATE INDEX idx_user_licenses_expires_at ON user_licenses(expires_at)
WHERE expires_at IS NOT NULL;

View File

@@ -110,6 +110,7 @@ export class TelegramHandler {
userId: authContext.userId,
sessionId: authContext.sessionId,
content: text,
attachments: [], // TODO: Add image support for Telegram
timestamp: new Date(),
};

View File

@@ -158,21 +158,25 @@ export class WebSocketHandler {
}),
};
// Create agent harness
const harness = new AgentHarness({
userId: authContext.userId,
sessionId: authContext.sessionId,
license: authContext.license,
providerConfig: this.config.providerConfig,
logger,
});
// Declare harness outside try block so it's available in catch
let harness: AgentHarness | undefined;
try {
// Initialize workspace and harness
// Initialize workspace first
await workspace.initialize();
workspace.setAdapter(wsAdapter);
this.workspaces.set(authContext.sessionId, workspace);
// Create agent harness with workspace manager
harness = new AgentHarness({
userId: authContext.userId,
sessionId: authContext.sessionId,
license: authContext.license,
providerConfig: this.config.providerConfig,
logger,
workspaceManager: workspace,
});
await harness.initialize();
this.harnesses.set(authContext.sessionId, harness);
@@ -220,8 +224,8 @@ export class WebSocketHandler {
logger.info({ type: payload.type, request_id: payload.request_id }, 'WebSocket message parsed');
// Route based on message type
if (payload.type === 'message') {
// Chat message - send to agent harness
if (payload.type === 'message' || payload.type === 'agent_user_message') {
// Chat message - send to agent harness with streaming
const inboundMessage: InboundMessage = {
messageId: randomUUID(),
userId: authContext.userId,
@@ -231,14 +235,41 @@ export class WebSocketHandler {
timestamp: new Date(),
};
const response = await harness.handleMessage(inboundMessage);
if (!harness) {
logger.error('Harness not initialized');
socket.send(JSON.stringify({ type: 'error', message: 'Session not ready' }));
return;
}
socket.send(
JSON.stringify({
type: 'message',
...response,
})
);
// Stream response chunks to client
try {
for await (const chunk of harness.streamMessage(inboundMessage)) {
socket.send(
JSON.stringify({
type: 'agent_chunk',
content: chunk,
done: false,
})
);
}
// Send final chunk with done flag
socket.send(
JSON.stringify({
type: 'agent_chunk',
content: '',
done: true,
})
);
} catch (error) {
logger.error({ error }, 'Error streaming response');
socket.send(
JSON.stringify({
type: 'error',
message: 'Failed to generate response',
})
);
}
} else if (payload.type === 'hello') {
// Workspace sync: hello message
logger.debug({ seqs: payload.seqs }, 'Handling workspace hello');
@@ -280,8 +311,10 @@ export class WebSocketHandler {
this.workspaces.delete(authContext.sessionId);
// Cleanup harness
await harness.cleanup();
this.harnesses.delete(authContext.sessionId);
if (harness) {
await harness.cleanup();
this.harnesses.delete(authContext.sessionId);
}
});
socket.on('error', (error: any) => {
@@ -292,7 +325,9 @@ export class WebSocketHandler {
socket.close(1011, 'Internal server error');
await workspace.shutdown();
this.workspaces.delete(authContext.sessionId);
await harness.cleanup();
if (harness) {
await harness.cleanup();
}
}
}

View File

@@ -66,6 +66,7 @@ export class ZMQRelayClient {
relayNotificationEndpoint: config.relayNotificationEndpoint,
clientId: config.clientId || `gateway-${randomUUID().slice(0, 8)}`,
requestTimeout: config.requestTimeout || 30000,
onMetadataUpdate: config.onMetadataUpdate || (async () => {}),
};
this.logger = logger;
this.notificationTopic = `RESPONSE:${this.config.clientId}`;

View File

@@ -101,9 +101,15 @@ See individual workflow READMEs for details.
YAML-based configuration:
- `models.yaml`: LLM providers, routing, rate limits
- `subagent-routing.yaml`: When to use which subagent
Model configuration is centralized in the main gateway config (`/config/config.yaml`), including:
- Default models and providers
- License tier model assignments (free, pro, enterprise)
- Model routing rules (complexity, cost-optimized)
Resource limits (token limits, rate limits) are stored in the database per user.
## User Context
Enhanced session context with channel awareness for multi-channel support:

View File

@@ -8,6 +8,7 @@ import { MCPClientConnector } from './mcp-client.js';
import { CONTEXT_URIS, type ResourceContent } from '../types/resources.js';
import { LLMProviderFactory, type ProviderConfig } from '../llm/provider.js';
import { ModelRouter, RoutingStrategy } from '../llm/router.js';
import type { WorkspaceManager } from '../workspace/workspace-manager.js';
export interface AgentHarnessConfig {
userId: string;
@@ -15,6 +16,7 @@ export interface AgentHarnessConfig {
license: UserLicense;
providerConfig: ProviderConfig;
logger: FastifyBaseLogger;
workspaceManager?: WorkspaceManager;
}
/**
@@ -33,9 +35,13 @@ export class AgentHarness {
private modelFactory: LLMProviderFactory;
private modelRouter: ModelRouter;
private mcpClient: MCPClientConnector;
private workspaceManager?: WorkspaceManager;
private lastWorkspaceSeq: number = 0;
private isFirstMessage: boolean = true;
constructor(config: AgentHarnessConfig) {
this.config = config;
this.workspaceManager = config.workspaceManager;
this.modelFactory = new LLMProviderFactory(config.providerConfig, config.logger);
this.modelRouter = new ModelRouter(this.modelFactory, config.logger);
@@ -102,18 +108,14 @@ export class AgentHarness {
// 7. Extract text response (tool handling TODO)
const assistantMessage = response.content as string;
// 8. Save messages to user's MCP server
this.config.logger.debug('Saving messages to MCP');
await this.mcpClient.callTool('save_message', {
role: 'user',
content: message.content,
timestamp: message.timestamp.toISOString(),
});
await this.mcpClient.callTool('save_message', {
role: 'assistant',
content: assistantMessage,
timestamp: new Date().toISOString(),
});
// TODO: Save messages to Iceberg conversation table instead of MCP
// Should batch-insert periodically or on session end to avoid many small Parquet files
// await icebergConversationStore.appendMessages([...]);
// Mark first message as processed
if (this.isFirstMessage) {
this.isFirstMessage = false;
}
return {
messageId: `msg_${Date.now()}`,
@@ -157,17 +159,17 @@ export class AgentHarness {
yield content;
}
// Save after streaming completes
await this.mcpClient.callTool('save_message', {
role: 'user',
content: message.content,
timestamp: message.timestamp.toISOString(),
});
await this.mcpClient.callTool('save_message', {
role: 'assistant',
content: fullResponse,
timestamp: new Date().toISOString(),
});
// TODO: Save messages to Iceberg conversation table instead of MCP
// Should batch-insert periodically or on session end to avoid many small Parquet files
// await icebergConversationStore.appendMessages([
// { role: 'user', content: message.content, timestamp: message.timestamp },
// { role: 'assistant', content: fullResponse, timestamp: new Date() }
// ]);
// Mark first message as processed
if (this.isFirstMessage) {
this.isFirstMessage = false;
}
} catch (error) {
this.config.logger.error({ error }, 'Error streaming message');
throw error;
@@ -224,6 +226,15 @@ export class AgentHarness {
});
}
// Add workspace delta (for subsequent turns)
const workspaceDelta = this.buildWorkspaceDelta();
if (workspaceDelta) {
messages.push({
role: 'user',
content: workspaceDelta,
});
}
// Add current user message
messages.push({
role: 'user',
@@ -273,9 +284,18 @@ Available features: ${JSON.stringify(this.config.license.features, null, 2)}`;
prompt += `\n\n# User Profile\n${userProfile.text}`;
}
// Add workspace context
// Add workspace context from MCP resource (if available)
if (workspaceState?.text) {
prompt += `\n\n# Current Workspace\n${workspaceState.text}`;
prompt += `\n\n# Current Workspace (from MCP)\n${workspaceState.text}`;
}
// Add full workspace state from WorkspaceManager (first message only)
if (this.isFirstMessage && this.workspaceManager) {
const workspaceJSON = this.workspaceManager.serializeState();
prompt += `\n\n# Workspace State (JSON)\n\`\`\`json\n${workspaceJSON}\n\`\`\``;
// Record current workspace sequence for delta tracking
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
}
// Add user's custom instructions (highest priority)
@@ -286,6 +306,30 @@ Available features: ${JSON.stringify(this.config.license.features, null, 2)}`;
return prompt;
}
/**
* Build workspace delta message for subsequent turns.
* Returns null if no changes since last message.
*/
private buildWorkspaceDelta(): string | null {
if (!this.workspaceManager || this.isFirstMessage) {
return null;
}
const changes = this.workspaceManager.getChangesSince(this.lastWorkspaceSeq);
if (Object.keys(changes).length === 0) {
return null;
}
// Format changes as JSON
const deltaJSON = JSON.stringify(changes, null, 2);
// Update sequence marker
this.lastWorkspaceSeq = this.workspaceManager.getCurrentSeq();
return `[Workspace Changes Since Last Turn]\n\`\`\`json\n${deltaJSON}\n\`\`\``;
}
/**

View File

@@ -1,110 +0,0 @@
# Default LLM Model Configuration
# Default model for general agent tasks
default:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.7
maxTokens: 4096
# Model overrides for specific use cases
models:
# Fast model for simple tasks (routing, classification)
fast:
provider: anthropic
model: claude-3-haiku-20240307
temperature: 0.3
maxTokens: 1024
# Reasoning model for complex analysis
reasoning:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.5
maxTokens: 8192
# Precise model for code generation/review
code:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.2
maxTokens: 8192
# Creative model for strategy brainstorming
creative:
provider: anthropic
model: claude-3-5-sonnet-20241022
temperature: 0.9
maxTokens: 4096
# Embedding model configuration
embeddings:
provider: openai
model: text-embedding-3-small
dimensions: 1536
# Model routing rules (complexity-based)
routing:
# Simple queries → fast model
simple:
keywords:
- "what is"
- "define"
- "list"
- "show me"
maxTokens: 100
model: fast
# Code-related → code model
code:
keywords:
- "code"
- "function"
- "implement"
- "debug"
- "review"
model: code
# Analysis tasks → reasoning model
analysis:
keywords:
- "analyze"
- "compare"
- "evaluate"
- "assess"
model: reasoning
# Everything else → default
default:
model: default
# Cost optimization settings
costControl:
# Cache system prompts (Anthropic prompt caching)
cacheSystemPrompts: true
# Token limits per license type
tokenLimits:
free:
maxTokensPerMessage: 2048
maxTokensPerDay: 50000
pro:
maxTokensPerMessage: 8192
maxTokensPerDay: 500000
enterprise:
maxTokensPerMessage: 16384
maxTokensPerDay: -1 # unlimited
# Rate limiting
rateLimits:
# Requests per minute by license
requestsPerMinute:
free: 10
pro: 60
enterprise: 120
# Concurrent requests
concurrentRequests:
free: 1
pro: 3
enterprise: 10

View File

@@ -1,4 +1,5 @@
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js';
import type { FastifyBaseLogger } from 'fastify';
@@ -11,7 +12,7 @@ export interface MCPClientConfig {
/**
* MCP client connector for user's container
* Manages connection to user-specific MCP server
* Manages connection to user-specific MCP server via SSE transport
*/
export class MCPClientConnector {
private client: Client | null = null;
@@ -23,8 +24,7 @@ export class MCPClientConnector {
}
/**
* Connect to user's MCP server
* TODO: Implement HTTP/SSE transport instead of stdio for container communication
* Connect to user's MCP server via SSE transport
*/
async connect(): Promise<void> {
if (this.connected) {
@@ -34,7 +34,7 @@ export class MCPClientConnector {
try {
this.config.logger.info(
{ userId: this.config.userId, url: this.config.mcpServerUrl },
'Connecting to user MCP server'
'Connecting to user MCP server via SSE'
);
this.client = new Client(
@@ -46,24 +46,18 @@ export class MCPClientConnector {
capabilities: {
sampling: {},
},
} as any
}
);
// TODO: Replace with HTTP transport when user containers are ready
// For now, this is a placeholder structure
// const transport = new HTTPTransport(this.config.mcpServerUrl, {
// headers: {
// 'Authorization': `Bearer ${this.config.platformJWT}`
// }
// });
// Placeholder: will be replaced with actual container transport
this.config.logger.warn(
'MCP transport not yet implemented - using placeholder'
// Create SSE transport for HTTP connection to user container
const transport = new SSEClientTransport(
new URL(`${this.config.mcpServerUrl}/sse`)
);
await this.client.connect(transport);
this.connected = true;
this.config.logger.info('Connected to user MCP server');
this.config.logger.info('Connected to user MCP server via SSE');
} catch (error) {
this.config.logger.error(
{ error, userId: this.config.userId },
@@ -84,12 +78,8 @@ export class MCPClientConnector {
try {
this.config.logger.debug({ tool: name, args }, 'Calling MCP tool');
// TODO: Implement when MCP client is connected
// const result = await this.client.callTool({ name, arguments: args });
// return result;
// Placeholder response
return { success: true, message: 'MCP tool call placeholder' };
const result = await this.client.callTool({ name, arguments: args });
return result;
} catch (error) {
this.config.logger.error({ error, tool: name }, 'MCP tool call failed');
throw error;
@@ -98,27 +88,35 @@ export class MCPClientConnector {
/**
* List available tools from user's MCP server
* Filters to only return tools marked as agent_accessible
*/
async listTools(): Promise<Array<{ name: string; description?: string }>> {
async listTools(): Promise<Array<{ name: string; description?: string; inputSchema?: any }>> {
if (!this.client || !this.connected) {
throw new Error('MCP client not connected');
}
try {
// TODO: Implement when MCP client is connected
// const tools = await this.client.listTools();
// return tools;
const response = await this.client.listTools();
// Placeholder tools (actions only, not context)
return [
{ name: 'save_message', description: 'Save message to conversation history' },
{ name: 'list_strategies', description: 'List user strategies' },
{ name: 'read_strategy', description: 'Read strategy code' },
{ name: 'write_strategy', description: 'Write strategy code' },
{ name: 'run_backtest', description: 'Run backtest on strategy' },
{ name: 'get_watchlist', description: 'Get user watchlist' },
{ name: 'execute_trade', description: 'Execute trade' },
];
// Filter tools to only include agent-accessible ones
const tools = response.tools
.filter((tool: any) => {
// Check if tool has agent_accessible annotation
const annotations = tool.annotations || {};
return annotations.agent_accessible === true;
})
.map((tool: any) => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema,
}));
this.config.logger.debug(
{ totalTools: response.tools.length, agentAccessibleTools: tools.length },
'Listed MCP tools with filtering'
);
return tools;
} catch (error) {
this.config.logger.error({ error }, 'Failed to list MCP tools');
throw error;
@@ -127,6 +125,7 @@ export class MCPClientConnector {
/**
* List available resources from user's MCP server
* Filters to only return resources marked as agent_accessible
*/
async listResources(): Promise<Array<{ uri: string; name: string; description?: string; mimeType?: string }>> {
if (!this.client || !this.connected) {
@@ -134,37 +133,28 @@ export class MCPClientConnector {
}
try {
// TODO: Implement when MCP client is connected
// const resources = await this.client.listResources();
// return resources;
const response = await this.client.listResources();
// Placeholder resources for user context
return [
{
uri: 'context://user-profile',
name: 'User Profile',
description: 'User trading style, preferences, and background',
mimeType: 'text/plain',
},
{
uri: 'context://conversation-summary',
name: 'Conversation Summary',
description: 'Semantic summary of recent conversation history with RAG',
mimeType: 'text/plain',
},
{
uri: 'context://workspace-state',
name: 'Workspace State',
description: 'Current chart, watchlist, and open positions',
mimeType: 'application/json',
},
{
uri: 'context://system-prompt',
name: 'Custom System Prompt',
description: 'User custom instructions for the assistant',
mimeType: 'text/plain',
},
];
// Filter resources to only include agent-accessible ones
const resources = response.resources
.filter((resource: any) => {
// Check if resource has agent_accessible annotation
const annotations = resource.annotations || {};
return annotations.agent_accessible === true;
})
.map((resource: any) => ({
uri: resource.uri,
name: resource.name,
description: resource.description,
mimeType: resource.mimeType,
}));
this.config.logger.debug(
{ totalResources: response.resources.length, agentAccessibleResources: resources.length },
'Listed MCP resources with filtering'
);
return resources;
} catch (error) {
this.config.logger.error({ error }, 'Failed to list MCP resources');
throw error;
@@ -182,55 +172,18 @@ export class MCPClientConnector {
try {
this.config.logger.debug({ uri }, 'Reading MCP resource');
// TODO: Implement when MCP client is connected
// const resource = await this.client.readResource({ uri });
// return resource;
const response = await this.client.readResource({ uri });
// Placeholder resource content
if (uri === 'context://user-profile') {
return {
uri,
mimeType: 'text/plain',
text: `User Profile:
- Trading experience: Intermediate
- Preferred timeframes: 1h, 4h, 1d
- Risk tolerance: Medium
- Focus: Swing trading with technical indicators`,
};
} else if (uri === 'context://conversation-summary') {
return {
uri,
mimeType: 'text/plain',
text: `Recent Conversation Summary:
[RAG-generated summary would go here]
// Extract the first content item (MCP returns array of contents)
const content = response.contents[0];
User recently discussed:
- Moving average crossover strategies
- Backtesting on BTC/USDT
- Risk management techniques`,
};
} else if (uri === 'context://workspace-state') {
return {
uri,
mimeType: 'application/json',
text: JSON.stringify({
currentChart: { ticker: 'BINANCE:BTC/USDT', timeframe: '1h' },
watchlist: ['BTC/USDT', 'ETH/USDT', 'SOL/USDT'],
openPositions: [],
}, null, 2),
};
} else if (uri === 'context://system-prompt') {
return {
uri,
mimeType: 'text/plain',
text: `Custom Instructions:
- Be concise and data-driven
- Always show risk/reward ratios
- Prefer simple strategies over complex ones`,
};
}
return { uri, text: '' };
// Handle union type: content is either TextContent or BlobContent
return {
uri: content.uri,
mimeType: content.mimeType,
text: 'text' in content ? content.text : undefined,
blob: 'blob' in content ? content.blob : undefined,
};
} catch (error) {
this.config.logger.error({ error, uri }, 'MCP resource read failed');
throw error;

View File

@@ -42,7 +42,7 @@ name: my-subagent
description: What it does
# Model override (optional)
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 4096

View File

@@ -4,7 +4,7 @@ name: code-reviewer
description: Reviews trading strategy code for bugs, performance issues, and best practices
# Model configuration (optional override)
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
temperature: 0.3
maxTokens: 4096

View File

@@ -334,7 +334,7 @@ timeout: 60000
maxRetries: 3
requiresApproval: false
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
```
### 6. Add Factory Function

View File

@@ -15,5 +15,5 @@ maxValidationRetries: 3 # Max times to retry fixing errors
minBacktestScore: 0.5 # Minimum Sharpe ratio to pass
# Model override (optional)
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
temperature: 0.3

View File

@@ -15,5 +15,5 @@ maxPositionPercent: 0.05 # 5% of portfolio max
minRiskRewardRatio: 2.0 # Minimum 2:1 risk/reward
# Model override (optional)
model: claude-3-5-sonnet-20241022
model: claude-sonnet-4-6
temperature: 0.2

View File

@@ -21,6 +21,7 @@ export interface DeploymentSpec {
agentImage: string;
sidecarImage: string;
storageClass: string;
imagePullPolicy?: string;
}
/**
@@ -132,7 +133,8 @@ export class KubernetesClient {
.replace(/\{\{pvcName\}\}/g, pvcName)
.replace(/\{\{agentImage\}\}/g, spec.agentImage)
.replace(/\{\{sidecarImage\}\}/g, spec.sidecarImage)
.replace(/\{\{storageClass\}\}/g, spec.storageClass);
.replace(/\{\{storageClass\}\}/g, spec.storageClass)
.replace(/\{\{imagePullPolicy\}\}/g, spec.imagePullPolicy || 'Always');
// Parse YAML documents (deployment, pvc, service)
const documents = yaml.loadAll(rendered) as any[];

View File

@@ -7,6 +7,7 @@ export interface ContainerManagerConfig {
agentImage: string;
sidecarImage: string;
storageClass: string;
imagePullPolicy?: string;
namespace: string;
logger: FastifyBaseLogger;
}
@@ -82,6 +83,7 @@ export class ContainerManager {
agentImage: this.config.agentImage,
sidecarImage: this.config.sidecarImage,
storageClass: this.config.storageClass,
imagePullPolicy: this.config.imagePullPolicy,
};
await this.config.k8sClient.createAgentDeployment(spec);

View File

@@ -40,7 +40,7 @@ spec:
containers:
- name: agent
image: {{agentImage}}
imagePullPolicy: Always
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false
@@ -72,6 +72,8 @@ spec:
value: "3000"
- name: ZMQ_CONTROL_PORT
value: "5555"
- name: ZMQ_GATEWAY_ENDPOINT
value: "tcp://gateway.default.svc.cluster.local:5571"
ports:
- name: mcp
@@ -102,14 +104,14 @@ spec:
readinessProbe:
httpGet:
path: /ready
path: /health
port: mcp
initialDelaySeconds: 5
periodSeconds: 10
- name: lifecycle-sidecar
image: {{sidecarImage}}
imagePullPolicy: Always
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false

View File

@@ -39,7 +39,7 @@ spec:
containers:
- name: agent
image: {{agentImage}}
imagePullPolicy: Always
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false
@@ -71,7 +71,9 @@ spec:
value: "3000"
- name: ZMQ_CONTROL_PORT
value: "5555"
- name: ZMQ_GATEWAY_ENDPOINT
value: "tcp://gateway.default.svc.cluster.local:5571"
ports:
- name: mcp
containerPort: 3000
@@ -101,14 +103,14 @@ spec:
readinessProbe:
httpGet:
path: /ready
path: /health
port: mcp
initialDelaySeconds: 5
periodSeconds: 10
- name: lifecycle-sidecar
image: {{sidecarImage}}
imagePullPolicy: Always
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false

View File

@@ -39,7 +39,7 @@ spec:
containers:
- name: agent
image: {{agentImage}}
imagePullPolicy: Always
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false
@@ -71,6 +71,8 @@ spec:
value: "3000"
- name: ZMQ_CONTROL_PORT
value: "5555"
- name: ZMQ_GATEWAY_ENDPOINT
value: "tcp://gateway.default.svc.cluster.local:5571"
ports:
- name: mcp
@@ -101,14 +103,14 @@ spec:
readinessProbe:
httpGet:
path: /ready
path: /health
port: mcp
initialDelaySeconds: 5
periodSeconds: 10
- name: lifecycle-sidecar
image: {{sidecarImage}}
imagePullPolicy: Always
imagePullPolicy: {{imagePullPolicy}}
securityContext:
allowPrivilegeEscalation: false

View File

@@ -19,11 +19,33 @@ export interface ModelConfig {
maxTokens?: number;
}
/**
* License tier model configuration
*/
export interface LicenseTierModels {
default: string;
cost_optimized: string;
complex: string;
allowed_models?: string[];
blocked_models?: string[];
}
/**
* License models configuration
*/
export interface LicenseModelsConfig {
free: LicenseTierModels;
pro: LicenseTierModels;
enterprise: LicenseTierModels;
}
/**
* Provider configuration with API keys
*/
export interface ProviderConfig {
anthropicApiKey?: string;
defaultModel?: ModelConfig;
licenseModels?: LicenseModelsConfig;
}
/**
@@ -77,15 +99,26 @@ export class LLMProviderFactory {
* Get default model based on environment
*/
getDefaultModel(): ModelConfig {
if (this.config.defaultModel) {
return this.config.defaultModel;
}
if (!this.config.anthropicApiKey) {
throw new Error('Anthropic API key not configured');
}
return {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-5-sonnet-20241022',
model: 'claude-sonnet-4-6',
};
}
/**
* Get license models configuration
*/
getLicenseModelsConfig(): LicenseModelsConfig | undefined {
return this.config.licenseModels;
}
}
/**
@@ -94,14 +127,14 @@ export class LLMProviderFactory {
export const MODELS = {
CLAUDE_SONNET: {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-5-sonnet-20241022',
model: 'claude-sonnet-4-6',
},
CLAUDE_HAIKU: {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-5-haiku-20241022',
model: 'claude-haiku-4-5-20251001',
},
CLAUDE_OPUS: {
provider: LLMProvider.ANTHROPIC,
model: 'claude-3-opus-20240229',
model: 'claude-opus-4-6',
},
} as const satisfies Record<string, ModelConfig>;

View File

@@ -1,6 +1,6 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { FastifyBaseLogger } from 'fastify';
import { LLMProviderFactory, type ModelConfig, LLMProvider } from './provider.js';
import { LLMProviderFactory, type ModelConfig, LLMProvider, type LicenseModelsConfig } from './provider.js';
import type { UserLicense } from '../types/user.js';
/**
@@ -25,11 +25,13 @@ export class ModelRouter {
private factory: LLMProviderFactory;
private logger: FastifyBaseLogger;
private defaultModel: ModelConfig;
private licenseModels?: LicenseModelsConfig;
constructor(factory: LLMProviderFactory, logger: FastifyBaseLogger) {
this.factory = factory;
this.logger = logger;
this.defaultModel = factory.getDefaultModel();
this.licenseModels = factory.getLicenseModelsConfig();
}
/**
@@ -97,37 +99,53 @@ export class ModelRouter {
private routeByComplexity(message: string, license: UserLicense): ModelConfig {
const isComplex = this.isComplexQuery(message);
// Use configuration if available
if (this.licenseModels) {
const tierConfig = this.licenseModels[license.licenseType];
if (tierConfig) {
const model = isComplex ? tierConfig.complex : tierConfig.default;
return { provider: this.defaultModel.provider as LLMProvider, model };
}
}
// Fallback to hardcoded defaults
if (license.licenseType === 'enterprise') {
// Enterprise users get best models for complex queries
return isComplex
? { provider: LLMProvider.ANTHROPIC, model: 'claude-3-opus-20240229' }
: { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-sonnet-20241022' };
? { provider: LLMProvider.ANTHROPIC, model: 'claude-opus-4-6' }
: { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' };
}
if (license.licenseType === 'pro') {
// Pro users get good models
return isComplex
? { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-sonnet-20241022' }
: { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
? { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' }
: { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
}
// Free users get efficient models
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
}
/**
* Route based on license tier
*/
private routeByLicenseTier(license: UserLicense): ModelConfig {
// Use configuration if available
if (this.licenseModels) {
const tierConfig = this.licenseModels[license.licenseType];
if (tierConfig) {
return { provider: this.defaultModel.provider as LLMProvider, model: tierConfig.default };
}
}
// Fallback to hardcoded defaults
switch (license.licenseType) {
case 'enterprise':
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-sonnet-20241022' };
return { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' };
case 'pro':
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-sonnet-20241022' };
return { provider: LLMProvider.ANTHROPIC, model: 'claude-sonnet-4-6' };
case 'free':
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
default:
return this.defaultModel;
@@ -137,24 +155,50 @@ export class ModelRouter {
/**
* Route to cheapest available model
*/
private routeByCost(_license: UserLicense): ModelConfig {
// All tiers: use Haiku for cost efficiency
return { provider: LLMProvider.ANTHROPIC, model: 'claude-3-5-haiku-20241022' };
private routeByCost(license: UserLicense): ModelConfig {
// Use configuration if available
if (this.licenseModels) {
const tierConfig = this.licenseModels[license.licenseType];
if (tierConfig) {
return { provider: this.defaultModel.provider as LLMProvider, model: tierConfig.cost_optimized };
}
}
// Fallback: use Haiku for cost efficiency
return { provider: LLMProvider.ANTHROPIC, model: 'claude-haiku-4-5-20251001' };
}
/**
* Check if model is allowed for user's license
*/
private isModelAllowed(model: ModelConfig, license: UserLicense): boolean {
// Free tier: only Haiku
// Use configuration if available
if (this.licenseModels) {
const tierConfig = this.licenseModels[license.licenseType];
if (tierConfig) {
// Check allowed_models list if defined
if (tierConfig.allowed_models && tierConfig.allowed_models.length > 0) {
return tierConfig.allowed_models.includes(model.model);
}
// Check blocked_models list if defined
if (tierConfig.blocked_models && tierConfig.blocked_models.length > 0) {
return !tierConfig.blocked_models.includes(model.model);
}
// No restrictions if neither list is defined
return true;
}
}
// Fallback to hardcoded defaults
if (license.licenseType === 'free') {
const allowedModels = ['claude-3-5-haiku-20241022'];
const allowedModels = ['claude-haiku-4-5-20251001'];
return allowedModels.includes(model.model);
}
// Pro: all except Opus
if (license.licenseType === 'pro') {
const blockedModels = ['claude-3-opus-20240229'];
const blockedModels = ['claude-opus-4-6'];
return !blockedModels.includes(model.model);
}

View File

@@ -85,12 +85,35 @@ function loadConfig() {
// Authentication configuration
authSecret: secretsData.auth?.secret || process.env.AUTH_SECRET || 'change-me-in-production',
// LLM provider API keys
// LLM provider API keys and model configuration
providerConfig: {
anthropicApiKey: secretsData.llm_providers?.anthropic_api_key || process.env.ANTHROPIC_API_KEY,
openaiApiKey: secretsData.llm_providers?.openai_api_key || process.env.OPENAI_API_KEY,
googleApiKey: secretsData.llm_providers?.google_api_key || process.env.GOOGLE_API_KEY,
openrouterApiKey: secretsData.llm_providers?.openrouter_api_key || process.env.OPENROUTER_API_KEY,
defaultModel: {
provider: configData.defaults?.model_provider || 'anthropic',
model: configData.defaults?.model || 'claude-sonnet-4-6',
},
licenseModels: {
free: {
default: configData.license_models?.free?.default || 'claude-haiku-4-5-20251001',
cost_optimized: configData.license_models?.free?.cost_optimized || 'claude-haiku-4-5-20251001',
complex: configData.license_models?.free?.complex || 'claude-haiku-4-5-20251001',
allowed_models: configData.license_models?.free?.allowed_models || ['claude-haiku-4-5-20251001'],
},
pro: {
default: configData.license_models?.pro?.default || 'claude-sonnet-4-6',
cost_optimized: configData.license_models?.pro?.cost_optimized || 'claude-haiku-4-5-20251001',
complex: configData.license_models?.pro?.complex || 'claude-sonnet-4-6',
blocked_models: configData.license_models?.pro?.blocked_models || ['claude-opus-4-6'],
},
enterprise: {
default: configData.license_models?.enterprise?.default || 'claude-sonnet-4-6',
cost_optimized: configData.license_models?.enterprise?.cost_optimized || 'claude-haiku-4-5-20251001',
complex: configData.license_models?.enterprise?.complex || 'claude-opus-4-6',
},
},
},
telegramBotToken: secretsData.telegram?.bot_token || process.env.TELEGRAM_BOT_TOKEN || '',
@@ -148,6 +171,7 @@ function loadConfig() {
agentImage: configData.kubernetes?.agent_image || process.env.AGENT_IMAGE || 'ghcr.io/dexorder/agent:latest',
sidecarImage: configData.kubernetes?.sidecar_image || process.env.SIDECAR_IMAGE || 'ghcr.io/dexorder/lifecycle-sidecar:latest',
storageClass: configData.kubernetes?.storage_class || process.env.AGENT_STORAGE_CLASS || 'standard',
imagePullPolicy: configData.kubernetes?.image_pull_policy || process.env.IMAGE_PULL_POLICY || 'Always',
},
};
}
@@ -265,6 +289,7 @@ const containerManager = new ContainerManager({
agentImage: config.kubernetes.agentImage,
sidecarImage: config.kubernetes.sidecarImage,
storageClass: config.kubernetes.storageClass,
imagePullPolicy: config.kubernetes.imagePullPolicy,
namespace: config.kubernetes.namespace,
logger: app.log,
});

View File

@@ -66,7 +66,8 @@ export type {
PathTriggerHandler,
PathTriggerContext,
ChartState,
ChartStore,
ShapesStore,
IndicatorsStore,
ChannelState,
ChannelInfo,
WorkspaceStores,

View File

@@ -404,4 +404,28 @@ export class SyncRegistry {
}
return states;
}
/**
* Get patches since a given sequence number for a store.
* Returns empty array if no patches available since that sequence.
*/
getPatchesSince(storeName: string, sinceSeq: number): JsonPatchOp[] | null {
const entry = this.entries.get(storeName);
if (!entry) {
return null;
}
const catchupPatches = entry.getCatchupPatches(sinceSeq);
if (catchupPatches === null) {
return null; // History not available
}
// Flatten all patches into a single array
const allPatches: JsonPatchOp[] = [];
for (const { patch } of catchupPatches) {
allPatches.push(...patch);
}
return allPatches;
}
}

View File

@@ -84,17 +84,19 @@ export const DEFAULT_STORES: StoreConfig[] = [
symbol: 'BINANCE:BTC/USDT',
start_time: null,
end_time: null,
interval: '15',
period: '15',
selected_shapes: [],
}),
},
{
name: 'chartStore',
name: 'shapes',
persistent: true,
initialState: () => ({
drawings: {},
templates: {},
}),
initialState: () => ({}),
},
{
name: 'indicators',
persistent: true,
initialState: () => ({}),
},
{
name: 'channelState',
@@ -198,22 +200,70 @@ export interface PathTrigger {
*/
export interface ChartState {
symbol: string;
start_time: number | null;
end_time: number | null;
interval: string;
selected_shapes: string[];
start_time: number | null; // unix timestamp
end_time: number | null; // unix timestamp
period: string; // OHLC duration (e.g., '15' for 15 minutes)
selected_shapes: string[]; // list of shape ID's
}
/**
* Chart store - persistent, stores drawings and templates.
* Control point for shapes (drawings/annotations).
*/
export interface ChartStore {
drawings: Record<string, unknown>;
templates: Record<string, unknown>;
export interface ControlPoint {
time: number; // unix timestamp
price: number;
channel?: string;
}
/**
* Shape (drawing/annotation) on TradingView chart.
*/
export interface Shape {
id: string;
type: string;
points: ControlPoint[];
color?: string;
line_width?: number;
line_style?: string;
properties?: Record<string, any>;
symbol?: string;
created_at?: number;
modified_at?: number;
original_id?: string;
}
/**
* Shapes store - persistent, stores TradingView drawings and annotations.
*/
export type ShapesStore = Record<string, Shape>;
/**
* Indicator instance on TradingView chart.
*/
export interface IndicatorInstance {
id: string;
talib_name: string;
instance_name: string;
parameters: Record<string, any>;
tv_study_id?: string;
tv_indicator_name?: string;
tv_inputs?: Record<string, any>;
visible: boolean;
pane: string;
symbol?: string;
created_at?: number;
modified_at?: number;
original_id?: string;
}
/**
* Indicators store - persistent, stores TradingView studies/indicators.
*/
export type IndicatorsStore = Record<string, IndicatorInstance>;
/**
* Channel state - transient, tracks connected channels.
* NOTE: This store is gateway-only and should NOT be synced to web clients.
*/
export interface ChannelState {
connected: Record<string, ChannelInfo>;
@@ -233,7 +283,8 @@ export interface ChannelInfo {
*/
export interface WorkspaceStores {
chartState: ChartState;
chartStore: ChartStore;
shapes: ShapesStore;
indicators: IndicatorsStore;
channelState: ChannelState;
[key: string]: unknown;
}

View File

@@ -271,6 +271,53 @@ export class WorkspaceManager {
return this.registry.getStoreNames();
}
/**
* Serialize entire workspace state as JSON.
*/
serializeState(): string {
const state: Record<string, unknown> = {};
for (const storeConfig of this.stores) {
const storeState = this.registry.getState(storeConfig.name);
if (storeState !== undefined) {
state[storeConfig.name] = storeState;
}
}
return JSON.stringify(state, null, 2);
}
/**
* Get the highest sequence number across all stores.
*/
getCurrentSeq(): number {
let maxSeq = 0;
for (const storeName of this.registry.getStoreNames()) {
const seq = this.registry.getSeq(storeName);
if (seq > maxSeq) {
maxSeq = seq;
}
}
return maxSeq;
}
/**
* Get all patches since a given sequence number across all stores.
* Returns patches grouped by store name.
*/
getChangesSince(sinceSeq: number): Record<string, JsonPatchOp[]> {
const changes: Record<string, JsonPatchOp[]> = {};
for (const storeConfig of this.stores) {
const patches = this.registry.getPatchesSince(storeConfig.name, sinceSeq);
if (patches && patches.length > 0) {
changes[storeConfig.name] = patches;
}
}
return changes;
}
// ===========================================================================
// Path Triggers
// ===========================================================================

View File

@@ -5,10 +5,10 @@ import SplitterPanel from 'primevue/splitterpanel'
import ChartView from './components/ChartView.vue'
import ChatPanel from './components/ChatPanel.vue'
import LoginScreen from './components/LoginScreen.vue'
import { useOrderStore } from './stores/orders'
import { useChartStore } from './stores/chart'
import { useShapeStore } from './stores/shapes'
import { useIndicatorStore } from './stores/indicators'
import { useChannelStore } from './stores/channel'
import { useStateSync } from './composables/useStateSync'
import { wsManager } from './composables/useWebSocket'
import { authService } from './composables/useAuth'
@@ -30,10 +30,10 @@ const chartStore = useChartStore()
watch(isMobile, (mobile) => {
if (mobile) {
// Set all chart state to null when chart is hidden
chartStore.chart_state.symbol = null as any
chartStore.chart_state.start_time = null
chartStore.chart_state.end_time = null
chartStore.chart_state.interval = null as any
chartStore.symbol = null as any
chartStore.start_time = null
chartStore.end_time = null
chartStore.period = null as any
}
})
@@ -90,15 +90,15 @@ const handleAuthenticate = async (email: string, password: string) => {
const initializeApp = async () => {
// Initialize state sync after successful authentication
const orderStore = useOrderStore()
const chartStore = useChartStore()
const shapeStore = useShapeStore()
const indicatorStore = useIndicatorStore()
const channelStore = useChannelStore()
const stateSync = useStateSync({
OrderStore: orderStore,
ChartStore: chartStore,
ShapeStore: shapeStore,
IndicatorStore: indicatorStore
chartState: chartStore,
shapes: shapeStore,
indicators: indicatorStore,
channelState: channelStore
})
stateSyncCleanup = stateSync.cleanup
}

View File

@@ -29,9 +29,9 @@ onMounted(() => {
datafeed = createTradingViewDatafeed()
tvWidget = new window.TradingView.widget({
symbol: chartStore.chart_state.symbol, // Use symbol from store
symbol: chartStore.symbol, // Use symbol from store
datafeed: datafeed,
interval: chartStore.chart_state.interval as any,
interval: chartStore.period as any,
container: chartContainer.value!,
library_path: '/charting_library/',
locale: 'en',
@@ -167,8 +167,8 @@ function initializeVisibleRange() {
to: new Date(endTime * 1000).toISOString()
})
chartStore.chart_state.start_time = startTime
chartStore.chart_state.end_time = endTime
chartStore.start_time = startTime
chartStore.end_time = endTime
}
}
@@ -183,16 +183,16 @@ function setupChartListeners() {
if (symbolInfo && symbolInfo.ticker) {
console.log('[ChartView] Symbol changed to:', symbolInfo.ticker)
isUpdatingFromChart = true
chartStore.chart_state.symbol = symbolInfo.ticker
chartStore.symbol = symbolInfo.ticker
isUpdatingFromChart = false
}
})
// Listen for interval changes
// Listen for period changes
chart.onIntervalChanged().subscribe(null, (interval: string) => {
console.log('[ChartView] Interval changed to:', interval)
console.log('[ChartView] Period changed to:', interval)
isUpdatingFromChart = true
chartStore.chart_state.interval = interval
chartStore.period = interval
isUpdatingFromChart = false
})
@@ -210,8 +210,8 @@ function setupChartListeners() {
})
isUpdatingFromChart = true
chartStore.chart_state.start_time = startTime
chartStore.chart_state.end_time = endTime
chartStore.start_time = startTime
chartStore.end_time = endTime
isUpdatingFromChart = false
}
})
@@ -224,7 +224,7 @@ function setupStoreWatchers() {
// Watch for external changes to symbol (e.g., from backend/agent)
watch(
() => chartStore.chart_state.symbol,
() => chartStore.symbol,
(newSymbol) => {
if (isUpdatingFromChart) return // Ignore updates that came from the chart itself
@@ -238,16 +238,16 @@ function setupStoreWatchers() {
}
)
// Watch for external changes to interval
// Watch for external changes to period
watch(
() => chartStore.chart_state.interval,
(newInterval) => {
() => chartStore.period,
(newPeriod) => {
if (isUpdatingFromChart) return
console.log('[ChartView] Store interval changed externally to:', newInterval)
if (chart.resolution() !== newInterval) {
chart.setResolution(newInterval, () => {
console.log('[ChartView] Chart interval updated to:', newInterval)
console.log('[ChartView] Store period changed externally to:', newPeriod)
if (chart.resolution() !== newPeriod) {
chart.setResolution(newPeriod, () => {
console.log('[ChartView] Chart period updated to:', newPeriod)
})
}
}

View File

@@ -412,7 +412,7 @@ export function useTradingViewIndicators(tvWidget: IChartingLibraryWidget) {
return
}
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
// If studyId is actually an object, extract the real values
let actualStudyId = studyId
@@ -539,7 +539,7 @@ export function useTradingViewIndicators(tvWidget: IChartingLibraryWidget) {
return
}
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
if (studyId && typeof studyId === 'string') {
console.log('[Indicators] Study properties changed for ID:', studyId)
@@ -671,7 +671,7 @@ export function useTradingViewIndicators(tvWidget: IChartingLibraryWidget) {
const chart = tvWidget.activeChart()
if (!chart) return
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
const allStudies = chart.getAllStudies()
if (!allStudies) return
@@ -755,7 +755,7 @@ export function useTradingViewIndicators(tvWidget: IChartingLibraryWidget) {
return
}
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
// Find added indicators
for (const [id, indicator] of Object.entries(newIndicators)) {
@@ -815,7 +815,7 @@ export function useTradingViewIndicators(tvWidget: IChartingLibraryWidget) {
const chart = tvWidget.activeChart()
if (!chart) return
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
const tvName = ALL_BACKEND_TO_TV_NAMES[indicator.talib_name]
if (!tvName) {

View File

@@ -207,7 +207,7 @@ export function useTradingViewShapes(tvWidget: IChartingLibraryWidget) {
const chart = tvWidget.activeChart()
if (!chart) return
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
if (eventType === 'remove') {
isUpdatingStore = true
@@ -394,7 +394,7 @@ export function useTradingViewShapes(tvWidget: IChartingLibraryWidget) {
if (!allShapes) return
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
// Track which shape IDs we've seen
const seenIds = new Set<string>()
@@ -471,7 +471,7 @@ export function useTradingViewShapes(tvWidget: IChartingLibraryWidget) {
const chart = tvWidget.activeChart()
if (!chart) return
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
// Find added shapes
for (const [id, shape] of Object.entries(newShapes)) {
@@ -530,11 +530,11 @@ export function useTradingViewShapes(tvWidget: IChartingLibraryWidget) {
const chart = tvWidget.activeChart()
if (!chart) return
const currentSymbol = chartStore.chart_state.symbol
const currentSymbol = chartStore.symbol
// Get current chart interval and convert to seconds for timestamp canonicalization
const interval = chartStore.chart_state.interval
const intervalSeconds = intervalToSeconds(interval)
// Get current chart period and convert to seconds for timestamp canonicalization
const period = chartStore.period
const intervalSeconds = intervalToSeconds(period)
// Convert points to TradingView format and canonicalize timestamps to candle boundaries
const tvPoints = shape.points.map(p => {
@@ -715,7 +715,7 @@ export function useTradingViewShapes(tvWidget: IChartingLibraryWidget) {
.map((obj: any) => obj.id)
console.log('[TradingView Shapes] Selection changed:', selectedShapeIds)
chartStore.chart_state.selected_shapes = selectedShapeIds
chartStore.selected_shapes = selectedShapeIds
})
} catch (error) {

View File

@@ -14,6 +14,11 @@ class WebSocketManager {
public isConnected = ref(false)
public isAuthenticated = ref(false)
private token: string | null = null
private messageQueue: WebSocketMessage[] = []
private reconnectAttempts = 0
private maxReconnectAttempts = Infinity // Keep trying indefinitely
private reconnectDelay = 1000 // Start with 1 second
private maxReconnectDelay = 15000 // Max 15 seconds
/**
* Connect to WebSocket with JWT token for authentication
@@ -63,6 +68,7 @@ class WebSocketManager {
console.log('[WebSocket] Connected successfully')
this.isConnected.value = true
this.isAuthenticated.value = false // Wait for 'connected' message from server
this.reconnectAttempts = 0 // Reset reconnection counter
resolve()
}
@@ -76,6 +82,8 @@ class WebSocketManager {
if (message.type === 'connected') {
console.log('[WebSocket] Received connected message, marking as authenticated')
this.isAuthenticated.value = true
// Flush any queued messages now that we're authenticated
this.flushMessageQueue()
}
// Pass to all handlers
@@ -96,6 +104,11 @@ class WebSocketManager {
this.isConnected.value = false
this.isAuthenticated.value = false
console.log('WebSocket disconnected:', event.code, event.reason)
// Attempt to reconnect if we have a token
if (this.token && !event.wasClean) {
this.scheduleReconnect()
}
}
// Connection timeout
@@ -117,19 +130,69 @@ class WebSocketManager {
}
send(message: WebSocketMessage) {
console.log('[WebSocket] Attempting to send message:', message.type, 'readyState:', this.ws?.readyState)
if (this.ws?.readyState === WebSocket.OPEN) {
console.log('[WebSocket] Sending message:', JSON.stringify(message))
console.log('[WebSocket] Sending message:', message.type)
this.ws.send(JSON.stringify(message))
} else {
console.error('[WebSocket] Cannot send message - WebSocket not open. State:', this.ws?.readyState)
console.log('[WebSocket] Queuing message (not connected yet):', message.type, '- Queue size:', this.messageQueue.length + 1)
this.messageQueue.push(message)
// Trigger reconnection if not already in progress
if (this.token && !this.reconnectTimeout) {
this.scheduleReconnect()
}
}
}
private scheduleReconnect() {
if (this.reconnectTimeout) {
return // Reconnection already scheduled
}
// Exponential backoff with max delay
const delay = Math.min(
this.reconnectDelay * Math.pow(2, this.reconnectAttempts),
this.maxReconnectDelay
)
console.log(`[WebSocket] Scheduling reconnect in ${delay}ms (attempt ${this.reconnectAttempts + 1})`)
this.reconnectTimeout = window.setTimeout(() => {
this.reconnectTimeout = null
this.reconnectAttempts++
if (this.token) {
console.log('[WebSocket] Attempting to reconnect...')
this.connect(this.token)
.then(() => {
console.log('[WebSocket] Reconnected successfully')
this.reconnectAttempts = 0 // Reset counter on success
this.flushMessageQueue()
})
.catch(err => {
console.error('[WebSocket] Reconnection failed:', err)
// scheduleReconnect will be called again via onclose
})
}
}, delay)
}
private flushMessageQueue() {
if (this.messageQueue.length > 0) {
console.log(`[WebSocket] Flushing ${this.messageQueue.length} queued messages`)
const queue = [...this.messageQueue]
this.messageQueue = []
queue.forEach(msg => this.send(msg))
}
}
disconnect() {
if (this.reconnectTimeout) {
clearTimeout(this.reconnectTimeout)
this.reconnectTimeout = null
}
this.token = null // Clear token to prevent auto-reconnect
this.messageQueue = [] // Clear message queue
this.reconnectAttempts = 0
if (this.ws) {
this.ws.close()
this.ws = null

24
web/src/stores/channel.ts Normal file
View File

@@ -0,0 +1,24 @@
import { defineStore } from 'pinia'
import { ref } from 'vue'
export interface ChannelInfo {
type: string
connectedAt: number
capabilities: {
supportsSync: boolean
supportsImages: boolean
supportsMarkdown: boolean
supportsStreaming: boolean
supportsTradingViewEmbed: boolean
}
}
export interface ChannelState {
connected: Record<string, ChannelInfo>
}
export const useChannelStore = defineStore('channelState', () => {
const connected = ref<Record<string, ChannelInfo>>({})
return { connected }
})

View File

@@ -5,18 +5,16 @@ export interface ChartState {
symbol: string
start_time: number | null
end_time: number | null
interval: string
period: string
selected_shapes: string[]
}
export const useChartStore = defineStore('ChartStore', () => {
const chart_state = ref<ChartState>({
symbol: 'BINANCE:BTC/USDT',
start_time: null,
end_time: null,
interval: '15',
selected_shapes: []
})
export const useChartStore = defineStore('chartState', () => {
const symbol = ref<string>('BINANCE:BTC/USDT')
const start_time = ref<number | null>(null)
const end_time = ref<number | null>(null)
const period = ref<string>('15')
const selected_shapes = ref<string[]>([])
return { chart_state }
return { symbol, start_time, end_time, period, selected_shapes }
})

View File

@@ -17,7 +17,7 @@ export interface IndicatorInstance {
original_id?: string
}
export const useIndicatorStore = defineStore('IndicatorStore', () => {
export const useIndicatorStore = defineStore('indicators', () => {
const indicators = ref<Record<string, IndicatorInstance>>({})
// Helper methods

View File

@@ -21,7 +21,7 @@ export interface Shape {
original_id?: string // Original ID from backend/agent before TradingView assigns its own ID
}
export const useShapeStore = defineStore('ShapeStore', () => {
export const useShapeStore = defineStore('shapes', () => {
const shapes = ref<Record<string, Shape>>({})
// Helper methods