client-py connected
This commit is contained in:
@@ -68,6 +68,9 @@ ENV PYTHONUNBUFFERED=1 \
|
||||
ZMQ_XPUB_PORT=5570 \
|
||||
ZMQ_GATEWAY_ENDPOINT=tcp://gateway:5571 \
|
||||
MCP_SERVER_NAME=dexorder-user \
|
||||
MCP_TRANSPORT=sse \
|
||||
MCP_HTTP_PORT=3000 \
|
||||
MCP_HTTP_HOST=0.0.0.0 \
|
||||
IDLE_TIMEOUT_MINUTES=15 \
|
||||
ENABLE_IDLE_SHUTDOWN=true
|
||||
|
||||
|
||||
@@ -11,6 +11,10 @@ Provides high-level APIs for:
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
# Suppress the protobuf warning - it's handled at import time
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore', message='Warning: Protobuf files not found')
|
||||
|
||||
from .ohlc_client import OHLCClient
|
||||
from .iceberg_client import IcebergClient
|
||||
from .history_client import HistoryClient
|
||||
|
||||
559
client-py/dexorder/api/category_tools.py
Normal file
559
client-py/dexorder/api/category_tools.py
Normal file
@@ -0,0 +1,559 @@
|
||||
"""
|
||||
Category-based File Management Tools for MCP Server
|
||||
|
||||
Provides write/edit/read/list tools for categorized Python scripts with automatic
|
||||
directory structure, metadata management, and validation harnesses.
|
||||
|
||||
Categories:
|
||||
- strategy: Trading strategies with specific data feed requirements
|
||||
- indicator: Technical indicators with configurable parameters
|
||||
- research: Research scripts with pyplot visualization support
|
||||
|
||||
File Structure:
|
||||
{DATA_DIR}/{category}/{sanitized_name}/
|
||||
- implementation.py # Python implementation
|
||||
- metadata.json # Category-specific metadata (includes name and description)
|
||||
|
||||
After write/edit operations, a category-specific test harness runs to validate
|
||||
the code and capture errors/output for agent feedback.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Categories and Metadata
|
||||
# =============================================================================
|
||||
|
||||
class Category(str, Enum):
|
||||
"""Supported file categories."""
|
||||
STRATEGY = "strategy"
|
||||
INDICATOR = "indicator"
|
||||
RESEARCH = "research"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaseMetadata:
|
||||
"""Base metadata for all categories."""
|
||||
name: str # Display name (can have special chars)
|
||||
description: str # LLM-generated description
|
||||
|
||||
|
||||
@dataclass
|
||||
class StrategyMetadata(BaseMetadata):
|
||||
"""Metadata for trading strategies."""
|
||||
data_feeds: list[str] = None # Required data feeds (e.g., ["BTC/USD", "ETH/USD"])
|
||||
|
||||
def __post_init__(self):
|
||||
if self.data_feeds is None:
|
||||
self.data_feeds = []
|
||||
|
||||
|
||||
@dataclass
|
||||
class IndicatorMetadata(BaseMetadata):
|
||||
"""Metadata for technical indicators."""
|
||||
default_length: int = 14 # Default period/length parameter
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResearchMetadata(BaseMetadata):
|
||||
"""Metadata for research scripts."""
|
||||
# Future: data_sources, dependencies, etc.
|
||||
pass
|
||||
|
||||
|
||||
# Metadata class registry
|
||||
METADATA_CLASSES = {
|
||||
Category.STRATEGY: StrategyMetadata,
|
||||
Category.INDICATOR: IndicatorMetadata,
|
||||
Category.RESEARCH: ResearchMetadata,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Utilities
|
||||
# =============================================================================
|
||||
|
||||
def sanitize_name(name: str) -> str:
|
||||
"""
|
||||
Sanitize a name for use as a directory name.
|
||||
|
||||
Converts special characters to underscores and preserves alphanumerics.
|
||||
Examples:
|
||||
"Tim's 5/13 EMA+" -> "Tims_5_13_EMA"
|
||||
"My Strategy (v2)" -> "My_Strategy_v2"
|
||||
"""
|
||||
# Replace non-alphanumeric chars (except spaces/hyphens) with underscores
|
||||
sanitized = re.sub(r'[^\w\s\-]', '_', name)
|
||||
# Replace spaces and hyphens with underscores
|
||||
sanitized = re.sub(r'[\s\-]+', '_', sanitized)
|
||||
# Remove duplicate underscores
|
||||
sanitized = re.sub(r'_+', '_', sanitized)
|
||||
# Strip leading/trailing underscores
|
||||
sanitized = sanitized.strip('_')
|
||||
return sanitized
|
||||
|
||||
|
||||
def get_category_path(data_dir: Path, category: Category, name: str) -> Path:
|
||||
"""
|
||||
Get the directory path for a category item.
|
||||
|
||||
Args:
|
||||
data_dir: Base data directory
|
||||
category: Category enum
|
||||
name: Display name (will be sanitized)
|
||||
|
||||
Returns:
|
||||
Path to the category item directory
|
||||
"""
|
||||
safe_name = sanitize_name(name)
|
||||
return data_dir / category.value / safe_name
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Category File Manager
|
||||
# =============================================================================
|
||||
|
||||
class CategoryFileManager:
|
||||
"""
|
||||
Manages category-based file operations with validation.
|
||||
"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
|
||||
# Ensure category directories exist
|
||||
for category in Category:
|
||||
(data_dir / category.value).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def write(
|
||||
self,
|
||||
category: str,
|
||||
name: str,
|
||||
description: str,
|
||||
code: str,
|
||||
metadata: Optional[dict] = None
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Write a new category file with metadata.
|
||||
|
||||
Args:
|
||||
category: Category name (strategy, indicator, research)
|
||||
name: Display name for the item
|
||||
description: LLM-generated description (required)
|
||||
code: Python implementation code
|
||||
metadata: Additional category-specific metadata fields
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- path: str - path to the implementation file
|
||||
- validation: dict - results from test harness
|
||||
- error: str (if any)
|
||||
"""
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Invalid category '{category}'. Must be one of: {', '.join(c.value for c in Category)}"
|
||||
}
|
||||
|
||||
# Get item directory
|
||||
item_dir = get_category_path(self.data_dir, cat, name)
|
||||
item_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write implementation
|
||||
impl_path = item_dir / "implementation.py"
|
||||
try:
|
||||
impl_path.write_text(code)
|
||||
log.info(f"Wrote {cat.value} implementation: {impl_path}")
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to write implementation: {e}"}
|
||||
|
||||
# Build metadata
|
||||
meta_dict = metadata or {}
|
||||
meta_dict["name"] = name
|
||||
meta_dict["description"] = description
|
||||
|
||||
# Validate and write metadata
|
||||
try:
|
||||
metadata_class = METADATA_CLASSES[cat]
|
||||
meta_obj = metadata_class(**meta_dict)
|
||||
validated_meta = asdict(meta_obj)
|
||||
|
||||
meta_path = item_dir / "metadata.json"
|
||||
meta_path.write_text(json.dumps(validated_meta, indent=2))
|
||||
log.info(f"Wrote metadata: {meta_path}")
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to write metadata: {e}"}
|
||||
|
||||
# Run validation harness
|
||||
validation = self._validate(cat, item_dir)
|
||||
|
||||
return {
|
||||
"success": validation["success"],
|
||||
"path": str(impl_path),
|
||||
"validation": validation,
|
||||
}
|
||||
|
||||
def edit(
|
||||
self,
|
||||
category: str,
|
||||
name: str,
|
||||
code: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
metadata: Optional[dict] = None
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Edit an existing category file.
|
||||
|
||||
Args:
|
||||
category: Category name
|
||||
name: Display name for the item
|
||||
code: Python implementation code (optional, omit to keep existing)
|
||||
description: Updated description (optional, omit to keep existing)
|
||||
metadata: Additional metadata updates (optional)
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- path: str - path to the implementation file
|
||||
- validation: dict - results from test harness (if code updated)
|
||||
- error: str (if any)
|
||||
"""
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {"success": False, "error": f"Invalid category '{category}'"}
|
||||
|
||||
item_dir = get_category_path(self.data_dir, cat, name)
|
||||
|
||||
if not item_dir.exists():
|
||||
return {"success": False, "error": f"Item '{name}' does not exist in category '{category}'"}
|
||||
|
||||
impl_path = item_dir / "implementation.py"
|
||||
meta_path = item_dir / "metadata.json"
|
||||
|
||||
# Load existing metadata
|
||||
try:
|
||||
existing_meta = {}
|
||||
if meta_path.exists():
|
||||
existing_meta = json.loads(meta_path.read_text())
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to read existing metadata: {e}"}
|
||||
|
||||
# Update code if provided
|
||||
if code is not None:
|
||||
try:
|
||||
impl_path.write_text(code)
|
||||
log.info(f"Updated {cat.value} implementation: {impl_path}")
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to write implementation: {e}"}
|
||||
|
||||
# Update metadata
|
||||
updated_meta = existing_meta.copy()
|
||||
if description is not None:
|
||||
updated_meta["description"] = description
|
||||
if metadata:
|
||||
updated_meta.update(metadata)
|
||||
|
||||
# Validate and write metadata
|
||||
try:
|
||||
metadata_class = METADATA_CLASSES[cat]
|
||||
meta_obj = metadata_class(**updated_meta)
|
||||
validated_meta = asdict(meta_obj)
|
||||
|
||||
meta_path.write_text(json.dumps(validated_meta, indent=2))
|
||||
log.info(f"Updated metadata: {meta_path}")
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to write metadata: {e}"}
|
||||
|
||||
# Run validation harness if code was updated
|
||||
validation = None
|
||||
if code is not None:
|
||||
validation = self._validate(cat, item_dir)
|
||||
|
||||
result = {
|
||||
"success": True,
|
||||
"path": str(impl_path),
|
||||
}
|
||||
if validation:
|
||||
result["validation"] = validation
|
||||
result["success"] = validation["success"]
|
||||
|
||||
return result
|
||||
|
||||
def read(
|
||||
self,
|
||||
category: str,
|
||||
name: str
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Read a category file and its metadata.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- exists: bool
|
||||
- code: str - implementation code
|
||||
- metadata: dict - metadata
|
||||
- error: str (if any)
|
||||
"""
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {"exists": False, "error": f"Invalid category '{category}'"}
|
||||
|
||||
item_dir = get_category_path(self.data_dir, cat, name)
|
||||
|
||||
if not item_dir.exists():
|
||||
return {"exists": False}
|
||||
|
||||
impl_path = item_dir / "implementation.py"
|
||||
meta_path = item_dir / "metadata.json"
|
||||
|
||||
try:
|
||||
code = impl_path.read_text() if impl_path.exists() else ""
|
||||
metadata = {}
|
||||
if meta_path.exists():
|
||||
metadata = json.loads(meta_path.read_text())
|
||||
|
||||
return {
|
||||
"exists": True,
|
||||
"code": code,
|
||||
"metadata": metadata,
|
||||
}
|
||||
except Exception as e:
|
||||
return {"exists": False, "error": str(e)}
|
||||
|
||||
def list_items(self, category: str) -> dict[str, Any]:
|
||||
"""
|
||||
List all items in a category with names and descriptions.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- items: list of dicts with name, description, and full metadata
|
||||
- error: str (if any)
|
||||
"""
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {"error": f"Invalid category '{category}'"}
|
||||
|
||||
cat_dir = self.data_dir / cat.value
|
||||
items = []
|
||||
|
||||
for item_dir in cat_dir.iterdir():
|
||||
if not item_dir.is_dir():
|
||||
continue
|
||||
|
||||
meta_path = item_dir / "metadata.json"
|
||||
if meta_path.exists():
|
||||
try:
|
||||
metadata = json.loads(meta_path.read_text())
|
||||
items.append({
|
||||
"name": metadata.get("name", item_dir.name),
|
||||
"description": metadata.get("description", ""),
|
||||
"metadata": metadata,
|
||||
})
|
||||
except Exception as e:
|
||||
log.error(f"Failed to read metadata for {item_dir}: {e}")
|
||||
|
||||
return {"items": items}
|
||||
|
||||
def _validate(self, category: Category, item_dir: Path) -> dict[str, Any]:
|
||||
"""
|
||||
Run category-specific validation harness.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- output: str - stdout/stderr from validation
|
||||
- images: list[dict] - base64-encoded images (for research)
|
||||
- error: str (if any)
|
||||
"""
|
||||
impl_path = item_dir / "implementation.py"
|
||||
|
||||
if category == Category.STRATEGY:
|
||||
return self._validate_strategy(impl_path)
|
||||
elif category == Category.INDICATOR:
|
||||
return self._validate_indicator(impl_path)
|
||||
elif category == Category.RESEARCH:
|
||||
return self._validate_research(impl_path, item_dir)
|
||||
else:
|
||||
return {"success": False, "error": f"No validator for category {category}"}
|
||||
|
||||
def _validate_strategy(self, impl_path: Path) -> dict[str, Any]:
|
||||
"""
|
||||
Validate a strategy implementation.
|
||||
|
||||
Runs basic syntax check and imports.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "py_compile", str(impl_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
return {
|
||||
"success": True,
|
||||
"output": "Strategy syntax valid",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"output": result.stderr,
|
||||
"error": "Syntax error in strategy",
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"success": False, "error": "Validation timeout"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Validation failed: {e}"}
|
||||
|
||||
def _validate_indicator(self, impl_path: Path) -> dict[str, Any]:
|
||||
"""
|
||||
Validate an indicator implementation.
|
||||
|
||||
Runs basic syntax check and imports.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "py_compile", str(impl_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
return {
|
||||
"success": True,
|
||||
"output": "Indicator syntax valid",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"output": result.stderr,
|
||||
"error": "Syntax error in indicator",
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"success": False, "error": "Validation timeout"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Validation failed: {e}"}
|
||||
|
||||
def _validate_research(self, impl_path: Path, item_dir: Path) -> dict[str, Any]:
|
||||
"""
|
||||
Validate a research script.
|
||||
|
||||
Runs the script and captures output + pyplot images.
|
||||
"""
|
||||
# Create a wrapper script that captures pyplot figures
|
||||
wrapper_code = f"""
|
||||
import sys
|
||||
import io
|
||||
import base64
|
||||
import json
|
||||
from pathlib import Path
|
||||
import matplotlib
|
||||
matplotlib.use('Agg') # Non-interactive backend
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Capture stdout
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = io.StringIO()
|
||||
|
||||
# Run user code
|
||||
user_code_path = Path(r"{impl_path}")
|
||||
try:
|
||||
exec(compile(user_code_path.read_text(), user_code_path, 'exec'), {{}})
|
||||
except Exception as e:
|
||||
print(f"ERROR: {{e}}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Get stdout
|
||||
output = sys.stdout.getvalue()
|
||||
sys.stdout = old_stdout
|
||||
|
||||
# Capture all pyplot figures as base64 PNGs
|
||||
images = []
|
||||
for fig_num in plt.get_fignums():
|
||||
fig = plt.figure(fig_num)
|
||||
buf = io.BytesIO()
|
||||
fig.savefig(buf, format='png', dpi=100, bbox_inches='tight')
|
||||
buf.seek(0)
|
||||
img_b64 = base64.b64encode(buf.read()).decode('utf-8')
|
||||
images.append({{"format": "png", "data": img_b64}})
|
||||
buf.close()
|
||||
|
||||
plt.close('all')
|
||||
|
||||
# Output results as JSON
|
||||
result = {{
|
||||
"output": output,
|
||||
"images": images,
|
||||
}}
|
||||
print(json.dumps(result))
|
||||
"""
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", wrapper_code],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
cwd=str(item_dir),
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
try:
|
||||
data = json.loads(result.stdout)
|
||||
return {
|
||||
"success": True,
|
||||
"output": data["output"],
|
||||
"images": data["images"],
|
||||
}
|
||||
except json.JSONDecodeError:
|
||||
return {
|
||||
"success": True,
|
||||
"output": result.stdout,
|
||||
"images": [],
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"output": result.stderr,
|
||||
"error": "Research script execution failed",
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"success": False, "error": "Research script timeout"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Validation failed: {e}"}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Global Manager Instance
|
||||
# =============================================================================
|
||||
|
||||
_category_manager: Optional[CategoryFileManager] = None
|
||||
|
||||
|
||||
def get_category_manager(data_dir: Optional[Path] = None) -> CategoryFileManager:
|
||||
"""Get or create the global category file manager."""
|
||||
global _category_manager
|
||||
if _category_manager is None:
|
||||
if data_dir is None:
|
||||
raise ValueError("data_dir required for first initialization")
|
||||
_category_manager = CategoryFileManager(data_dir)
|
||||
return _category_manager
|
||||
@@ -16,12 +16,11 @@ import struct
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Import protobuf messages (assuming they're generated in ../protobuf)
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../protobuf'))
|
||||
# Import protobuf messages from generated package
|
||||
try:
|
||||
from ingestor_pb2 import SubmitHistoricalRequest, SubmitResponse, HistoryReadyNotification
|
||||
from dexorder.generated.ingestor_pb2 import SubmitHistoricalRequest, SubmitResponse, HistoryReadyNotification
|
||||
except ImportError:
|
||||
print("Warning: Protobuf files not found. Run: protoc -I ../protobuf --python_out=../protobuf ../protobuf/*.proto")
|
||||
print("Warning: Protobuf files not found. Run: protoc --python_out=dexorder/generated --proto_path=protobuf protobuf/*.proto")
|
||||
raise
|
||||
|
||||
|
||||
|
||||
@@ -205,26 +205,36 @@ class LifecycleManager:
|
||||
_lifecycle_manager: Optional[LifecycleManager] = None
|
||||
|
||||
|
||||
def get_lifecycle_manager() -> LifecycleManager:
|
||||
def get_lifecycle_manager(
|
||||
idle_timeout_minutes: Optional[int] = None,
|
||||
enable_shutdown: Optional[bool] = None,
|
||||
) -> LifecycleManager:
|
||||
"""Get or create the global lifecycle manager instance."""
|
||||
global _lifecycle_manager
|
||||
if _lifecycle_manager is None:
|
||||
# Load configuration from environment
|
||||
idle_timeout = int(os.environ.get("IDLE_TIMEOUT_MINUTES", "15"))
|
||||
# Load configuration from environment or use provided values
|
||||
idle_timeout = idle_timeout_minutes if idle_timeout_minutes is not None else int(os.environ.get("IDLE_TIMEOUT_MINUTES", "15"))
|
||||
check_interval = int(os.environ.get("IDLE_CHECK_INTERVAL_SECONDS", "60"))
|
||||
enable_shutdown = os.environ.get("ENABLE_IDLE_SHUTDOWN", "true").lower() == "true"
|
||||
enable = enable_shutdown if enable_shutdown is not None else os.environ.get("ENABLE_IDLE_SHUTDOWN", "true").lower() == "true"
|
||||
|
||||
_lifecycle_manager = LifecycleManager(
|
||||
idle_timeout_minutes=idle_timeout,
|
||||
check_interval_seconds=check_interval,
|
||||
enable_shutdown=enable_shutdown,
|
||||
enable_shutdown=enable,
|
||||
)
|
||||
return _lifecycle_manager
|
||||
|
||||
|
||||
async def start_lifecycle_manager() -> LifecycleManager:
|
||||
async def start_lifecycle_manager(
|
||||
user_id: Optional[str] = None,
|
||||
idle_timeout_minutes: Optional[int] = None,
|
||||
enable_idle_shutdown: Optional[bool] = None,
|
||||
) -> LifecycleManager:
|
||||
"""Initialize and start the lifecycle manager."""
|
||||
manager = get_lifecycle_manager()
|
||||
manager = get_lifecycle_manager(
|
||||
idle_timeout_minutes=idle_timeout_minutes,
|
||||
enable_shutdown=enable_idle_shutdown,
|
||||
)
|
||||
manager.setup_signal_handlers()
|
||||
await manager.start()
|
||||
return manager
|
||||
|
||||
@@ -18,12 +18,20 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import yaml
|
||||
import uvicorn
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.server.sse import SseServerTransport
|
||||
from starlette.applications import Starlette
|
||||
from starlette.routing import Route, Mount
|
||||
from starlette.requests import Request
|
||||
from starlette.responses import Response
|
||||
from sse_starlette import EventSourceResponse
|
||||
|
||||
from dexorder import EventPublisher, start_lifecycle_manager, get_lifecycle_manager
|
||||
from dexorder.events import EventType, UserEvent, DeliverySpec
|
||||
from dexorder.api.workspace_tools import get_workspace_store
|
||||
from dexorder.api.category_tools import get_category_manager
|
||||
|
||||
|
||||
# =============================================================================
|
||||
@@ -68,6 +76,9 @@ class Config:
|
||||
|
||||
# MCP server settings
|
||||
self.mcp_server_name: str = os.getenv("MCP_SERVER_NAME", "dexorder-user")
|
||||
self.mcp_transport: str = os.getenv("MCP_TRANSPORT", "sse") # "stdio" or "sse"
|
||||
self.mcp_http_port: int = int(os.getenv("MCP_HTTP_PORT", "3000"))
|
||||
self.mcp_http_host: str = os.getenv("MCP_HTTP_HOST", "0.0.0.0")
|
||||
|
||||
# Lifecycle settings
|
||||
self.idle_timeout_minutes: int = int(os.getenv("IDLE_TIMEOUT_MINUTES", "15"))
|
||||
@@ -134,6 +145,10 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
workspace_store = get_workspace_store(config.workspace_dir)
|
||||
logging.info(f"Workspace store initialized at {config.workspace_dir}")
|
||||
|
||||
# Initialize category file manager
|
||||
category_manager = get_category_manager(config.data_dir)
|
||||
logging.info(f"Category manager initialized at {config.data_dir}")
|
||||
|
||||
@server.list_resources()
|
||||
async def list_resources():
|
||||
"""List available resources"""
|
||||
@@ -143,6 +158,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
"name": "Hello World",
|
||||
"description": "A simple hello world resource",
|
||||
"mimeType": "text/plain",
|
||||
"annotations": {
|
||||
"agent_accessible": True, # Available to agent for ad-hoc queries
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -170,7 +188,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
|
||||
@server.list_tools()
|
||||
async def list_tools():
|
||||
"""List available tools including workspace tools"""
|
||||
"""List available tools including workspace and category tools"""
|
||||
return [
|
||||
{
|
||||
"name": "workspace_read",
|
||||
@@ -184,6 +202,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
}
|
||||
},
|
||||
"required": ["store_name"]
|
||||
},
|
||||
"annotations": {
|
||||
"agent_accessible": True, # Agent can read workspace stores
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -201,6 +222,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
}
|
||||
},
|
||||
"required": ["store_name", "data"]
|
||||
},
|
||||
"annotations": {
|
||||
"agent_accessible": True, # Agent can write workspace stores
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -228,13 +252,124 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
}
|
||||
},
|
||||
"required": ["store_name", "patch"]
|
||||
},
|
||||
"annotations": {
|
||||
"agent_accessible": True, # Agent can patch workspace stores
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "category_write",
|
||||
"description": "Write a new strategy, indicator, or research script with validation",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"category": {
|
||||
"type": "string",
|
||||
"enum": ["strategy", "indicator", "research"],
|
||||
"description": "Category of the script"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Display name (can contain special characters)"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "LLM-generated description of what this does (required)"
|
||||
},
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "Python implementation code"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"description": "Optional category-specific metadata (e.g., default_length for indicators, data_feeds for strategies)"
|
||||
}
|
||||
},
|
||||
"required": ["category", "name", "description", "code"]
|
||||
},
|
||||
"annotations": {
|
||||
"agent_accessible": True,
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "category_edit",
|
||||
"description": "Edit an existing category script (updates code, description, or metadata)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"category": {
|
||||
"type": "string",
|
||||
"enum": ["strategy", "indicator", "research"],
|
||||
"description": "Category of the script"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Display name of the existing item"
|
||||
},
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "Updated Python code (optional, omit to keep existing)"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Updated description (optional, omit to keep existing)"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"description": "Updated metadata fields (optional)"
|
||||
}
|
||||
},
|
||||
"required": ["category", "name"]
|
||||
},
|
||||
"annotations": {
|
||||
"agent_accessible": True,
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "category_read",
|
||||
"description": "Read a category script and its metadata",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"category": {
|
||||
"type": "string",
|
||||
"enum": ["strategy", "indicator", "research"],
|
||||
"description": "Category of the script"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Display name of the item"
|
||||
}
|
||||
},
|
||||
"required": ["category", "name"]
|
||||
},
|
||||
"annotations": {
|
||||
"agent_accessible": True,
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "category_list",
|
||||
"description": "List all items in a category with names and descriptions",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"category": {
|
||||
"type": "string",
|
||||
"enum": ["strategy", "indicator", "research"],
|
||||
"description": "Category to list"
|
||||
}
|
||||
},
|
||||
"required": ["category"]
|
||||
},
|
||||
"annotations": {
|
||||
"agent_accessible": True,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@server.call_tool()
|
||||
async def handle_tool_call(name: str, arguments: dict):
|
||||
"""Handle tool calls including workspace tools"""
|
||||
"""Handle tool calls including workspace and category tools"""
|
||||
if name == "workspace_read":
|
||||
return workspace_store.read(arguments.get("store_name", ""))
|
||||
elif name == "workspace_write":
|
||||
@@ -247,13 +382,78 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
arguments.get("store_name", ""),
|
||||
arguments.get("patch", [])
|
||||
)
|
||||
elif name == "category_write":
|
||||
return category_manager.write(
|
||||
category=arguments.get("category", ""),
|
||||
name=arguments.get("name", ""),
|
||||
description=arguments.get("description", ""),
|
||||
code=arguments.get("code", ""),
|
||||
metadata=arguments.get("metadata")
|
||||
)
|
||||
elif name == "category_edit":
|
||||
return category_manager.edit(
|
||||
category=arguments.get("category", ""),
|
||||
name=arguments.get("name", ""),
|
||||
code=arguments.get("code"),
|
||||
description=arguments.get("description"),
|
||||
metadata=arguments.get("metadata")
|
||||
)
|
||||
elif name == "category_read":
|
||||
return category_manager.read(
|
||||
category=arguments.get("category", ""),
|
||||
name=arguments.get("name", "")
|
||||
)
|
||||
elif name == "category_list":
|
||||
return category_manager.list_items(
|
||||
category=arguments.get("category", "")
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
logging.info(f"MCP server '{config.mcp_server_name}' created with workspace tools")
|
||||
logging.info(f"MCP server '{config.mcp_server_name}' created with workspace and category tools")
|
||||
return server
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# SSE Transport Setup
|
||||
# =============================================================================
|
||||
|
||||
def create_sse_app(mcp_server: Server) -> Starlette:
|
||||
"""Create Starlette app with SSE endpoint for MCP"""
|
||||
|
||||
# Create SSE transport instance
|
||||
sse = SseServerTransport("/messages/")
|
||||
|
||||
async def handle_sse(request: Request) -> Response:
|
||||
"""Handle SSE connections for MCP"""
|
||||
async with sse.connect_sse(
|
||||
request.scope, request.receive, request._send
|
||||
) as streams:
|
||||
await mcp_server.run(
|
||||
streams[0],
|
||||
streams[1],
|
||||
mcp_server.create_initialization_options()
|
||||
)
|
||||
return Response()
|
||||
|
||||
async def handle_health(request: Request) -> Response:
|
||||
"""Health check endpoint for k8s probes and gateway readiness checks"""
|
||||
return Response(
|
||||
content='{"status":"ok"}',
|
||||
media_type="application/json"
|
||||
)
|
||||
|
||||
app = Starlette(
|
||||
routes=[
|
||||
Route("/sse", handle_sse),
|
||||
Mount("/messages/", app=sse.handle_post_message),
|
||||
Route("/health", handle_health),
|
||||
]
|
||||
)
|
||||
|
||||
return app
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Main Application
|
||||
# =============================================================================
|
||||
@@ -347,17 +547,34 @@ class UserContainer:
|
||||
logging.info("User container stopped")
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Run the MCP server via stdio"""
|
||||
"""Run the MCP server with configured transport"""
|
||||
await self.start()
|
||||
|
||||
try:
|
||||
# Run MCP server on stdio
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await self.mcp_server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
self.mcp_server.create_initialization_options()
|
||||
if self.config.mcp_transport == "stdio":
|
||||
# Run MCP server on stdio (for dev/testing)
|
||||
logging.info("Starting MCP server with stdio transport")
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await self.mcp_server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
self.mcp_server.create_initialization_options()
|
||||
)
|
||||
elif self.config.mcp_transport == "sse":
|
||||
# Run MCP server via HTTP/SSE (for production)
|
||||
logging.info(f"Starting MCP server with SSE transport on {self.config.mcp_http_host}:{self.config.mcp_http_port}")
|
||||
app = create_sse_app(self.mcp_server)
|
||||
config = uvicorn.Config(
|
||||
app,
|
||||
host=self.config.mcp_http_host,
|
||||
port=self.config.mcp_http_port,
|
||||
log_level=os.getenv("LOG_LEVEL", "info").lower(),
|
||||
access_log=True,
|
||||
)
|
||||
server = uvicorn.Server(config)
|
||||
await server.serve()
|
||||
else:
|
||||
raise ValueError(f"Unknown MCP transport: {self.config.mcp_transport}")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
|
||||
@@ -14,8 +14,12 @@ setup(
|
||||
"protobuf>=4.25.0",
|
||||
"pyyaml>=6.0",
|
||||
"aiofiles>=23.0.0",
|
||||
"mcp>=0.9.0",
|
||||
"mcp>=1.0.0",
|
||||
"jsonpatch>=1.33",
|
||||
"starlette>=0.27.0",
|
||||
"uvicorn>=0.27.0",
|
||||
"sse-starlette>=1.6.0",
|
||||
"matplotlib>=3.7.0",
|
||||
],
|
||||
extras_require={
|
||||
"dev": [
|
||||
|
||||
Reference in New Issue
Block a user