data pipeline refactor and fix
This commit is contained in:
@@ -11,7 +11,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy dependency specifications
|
||||
COPY setup.py .
|
||||
COPY environment.yml .
|
||||
COPY dexorder/ dexorder/
|
||||
|
||||
@@ -27,9 +26,6 @@ RUN mkdir -p dexorder/generated && \
|
||||
RUN conda env create -f environment.yml -p /build/env && \
|
||||
conda clean -afy
|
||||
|
||||
# Install the local package into the conda environment
|
||||
RUN /build/env/bin/pip install --no-cache-dir .
|
||||
|
||||
# =============================================================================
|
||||
# Runtime stage
|
||||
# =============================================================================
|
||||
@@ -75,7 +71,8 @@ RUN chmod 755 /app/entrypoint.sh && chown root:root /app/entrypoint.sh
|
||||
USER dexorder
|
||||
|
||||
# Environment variables (can be overridden in k8s)
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
ENV PYTHONPATH=/app \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
MPLCONFIGDIR=/tmp \
|
||||
NUMBA_CACHE_DIR=/tmp/numba_cache \
|
||||
LOG_LEVEL=INFO \
|
||||
|
||||
@@ -12,6 +12,7 @@ For research scripts, import and use get_api() to access the API:
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from typing import Optional
|
||||
|
||||
from dexorder.api.api import API
|
||||
@@ -23,10 +24,13 @@ log = logging.getLogger(__name__)
|
||||
# Global API instance - managed by main.py
|
||||
_global_api: Optional[API] = None
|
||||
|
||||
# Thread-local API — used by harness threads so they don't overwrite the global
|
||||
_thread_local = threading.local()
|
||||
|
||||
|
||||
def get_api() -> API:
|
||||
"""
|
||||
Get the global API instance for accessing market data and charts.
|
||||
Get the API instance for accessing market data and charts.
|
||||
|
||||
Use this in research scripts to access the data and charting APIs.
|
||||
|
||||
@@ -53,15 +57,27 @@ def get_api() -> API:
|
||||
# Create chart
|
||||
fig, ax = api.charting.plot_ohlc(df, title="BTC/USDT")
|
||||
"""
|
||||
# Thread-local takes priority (set by harness threads)
|
||||
api = getattr(_thread_local, 'api', None)
|
||||
if api is not None:
|
||||
return api
|
||||
if _global_api is None:
|
||||
raise RuntimeError("API not initialized")
|
||||
return _global_api
|
||||
|
||||
|
||||
def set_api(api: API) -> None:
|
||||
"""Set the global API instance. Internal use only."""
|
||||
global _global_api
|
||||
_global_api = api
|
||||
"""Set the API instance.
|
||||
|
||||
When called from the main thread, sets the global API used by all threads.
|
||||
When called from a non-main thread (e.g. harness threads), sets a thread-local
|
||||
API so the global is not overwritten.
|
||||
"""
|
||||
if threading.current_thread() is threading.main_thread():
|
||||
global _global_api
|
||||
_global_api = api
|
||||
else:
|
||||
_thread_local.api = api
|
||||
|
||||
|
||||
__all__ = ['API', 'ChartingAPI', 'DataAPI', 'get_api', 'set_api']
|
||||
|
||||
@@ -3,6 +3,12 @@ Conda Package Manager
|
||||
|
||||
Manages dynamic installation and cleanup of conda packages for user components.
|
||||
Scans metadata files to determine required packages and syncs the conda environment.
|
||||
|
||||
Extra packages (user-installed beyond the base container) are tracked in
|
||||
``extra_packages.json`` under ``data_dir`` so they can be removed when no
|
||||
script references them. Packages that are later promoted into the base image
|
||||
(i.e. appear in ``environment.yml``) are silently evicted from tracking
|
||||
rather than uninstalled.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -12,6 +18,10 @@ import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional, Set
|
||||
|
||||
# Filename (stored under data_dir, outside the git repo) for tracking
|
||||
# user-installed extra packages.
|
||||
EXTRA_PACKAGES_FILENAME = "extra_packages.json"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -102,12 +112,35 @@ def get_installed_packages() -> Set[str]:
|
||||
return set()
|
||||
|
||||
|
||||
def install_packages(packages: list[str]) -> dict:
|
||||
def load_extra_packages(data_dir: Path) -> Set[str]:
|
||||
"""Load the set of user-installed extra packages (beyond the base container)."""
|
||||
path = data_dir / EXTRA_PACKAGES_FILENAME
|
||||
if path.exists():
|
||||
try:
|
||||
return set(json.loads(path.read_text()))
|
||||
except Exception as e:
|
||||
log.error(f"Failed to load extra packages: {e}")
|
||||
return set()
|
||||
|
||||
|
||||
def save_extra_packages(data_dir: Path, packages: Set[str]) -> None:
|
||||
"""Persist the set of user-installed extra packages."""
|
||||
path = data_dir / EXTRA_PACKAGES_FILENAME
|
||||
try:
|
||||
path.write_text(json.dumps(sorted(packages)))
|
||||
except Exception as e:
|
||||
log.error(f"Failed to save extra packages: {e}")
|
||||
|
||||
|
||||
def install_packages(packages: list[str], data_dir: Optional[Path] = None) -> dict:
|
||||
"""
|
||||
Install conda packages if not already installed.
|
||||
|
||||
Args:
|
||||
packages: List of package names to install
|
||||
data_dir: If provided, newly installed packages are added to the extra
|
||||
package tracking file (``extra_packages.json``) so they can
|
||||
be cleaned up when no longer needed.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
@@ -154,6 +187,10 @@ def install_packages(packages: list[str]) -> dict:
|
||||
|
||||
if result.returncode == 0:
|
||||
log.info(f"Successfully installed packages: {to_install}")
|
||||
if data_dir:
|
||||
extras = load_extra_packages(data_dir)
|
||||
extras.update(to_install)
|
||||
save_extra_packages(data_dir, extras)
|
||||
return {
|
||||
"success": True,
|
||||
"installed": to_install,
|
||||
@@ -324,9 +361,59 @@ def get_base_packages(environment_yml: Path) -> Set[str]:
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Sync Operation
|
||||
# Cleanup and Sync Operations
|
||||
# =============================================================================
|
||||
|
||||
def cleanup_extra_packages(data_dir: Path, environment_yml: Optional[Path] = None) -> dict:
|
||||
"""
|
||||
Remove tracked extra packages that are no longer needed by any script.
|
||||
|
||||
Only packages previously recorded in ``extra_packages.json`` are ever
|
||||
considered for removal — base container packages are never touched.
|
||||
|
||||
Packages that have since been promoted into the base container image
|
||||
(i.e. now appear in ``environment.yml``) are quietly evicted from the
|
||||
tracking file without being uninstalled.
|
||||
|
||||
Args:
|
||||
data_dir: Base data directory (tracking file lives here)
|
||||
environment_yml: Path to environment.yml for base package reconciliation
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- to_remove: list[str] - packages identified for removal
|
||||
- removed: list[str] - packages actually removed
|
||||
- error: str (if any)
|
||||
"""
|
||||
src_dir = data_dir / "src"
|
||||
required = scan_metadata_packages(src_dir)
|
||||
base = get_base_packages(environment_yml) if environment_yml and environment_yml.exists() else set()
|
||||
extras = load_extra_packages(data_dir)
|
||||
|
||||
# Packages promoted into the base image are no longer "extra" — evict from tracking
|
||||
now_base = extras & base
|
||||
if now_base:
|
||||
log.info(f"Packages promoted to base image, evicting from extra tracking: {now_base}")
|
||||
extras -= now_base
|
||||
|
||||
# Only remove packages that are tracked as extras and no longer referenced by any script
|
||||
to_remove = sorted(extras - required)
|
||||
result: dict = {"success": True, "to_remove": to_remove, "removed": []}
|
||||
|
||||
if to_remove:
|
||||
remove_result = remove_packages(to_remove)
|
||||
result["success"] = remove_result["success"]
|
||||
result["removed"] = remove_result.get("removed", [])
|
||||
if remove_result["success"]:
|
||||
extras -= set(to_remove)
|
||||
else:
|
||||
result["error"] = remove_result.get("error")
|
||||
|
||||
save_extra_packages(data_dir, extras)
|
||||
return result
|
||||
|
||||
|
||||
def sync_packages(data_dir: Path, environment_yml: Optional[Path] = None) -> dict:
|
||||
"""
|
||||
Sync conda packages with metadata requirements.
|
||||
@@ -350,8 +437,8 @@ def sync_packages(data_dir: Path, environment_yml: Optional[Path] = None) -> dic
|
||||
"""
|
||||
log.info("Starting conda package sync")
|
||||
|
||||
# Get required packages from metadata
|
||||
required_packages = scan_metadata_packages(data_dir)
|
||||
# Metadata lives under data_dir/src/category/item/metadata.json
|
||||
required_packages = scan_metadata_packages(data_dir / "src")
|
||||
log.info(f"Required packages from metadata: {required_packages}")
|
||||
|
||||
# Get base packages from environment.yml
|
||||
|
||||
@@ -42,6 +42,7 @@ class IcebergClient:
|
||||
s3_endpoint: Optional[str] = None,
|
||||
s3_access_key: Optional[str] = None,
|
||||
s3_secret_key: Optional[str] = None,
|
||||
s3_region: Optional[str] = None,
|
||||
):
|
||||
"""
|
||||
Initialize Iceberg client.
|
||||
@@ -52,6 +53,7 @@ class IcebergClient:
|
||||
s3_endpoint: S3/MinIO endpoint URL (e.g., "http://localhost:9000")
|
||||
s3_access_key: S3/MinIO access key
|
||||
s3_secret_key: S3/MinIO secret key
|
||||
s3_region: S3/MinIO region (e.g., "us-east-1")
|
||||
"""
|
||||
self.catalog_uri = catalog_uri
|
||||
self.namespace = namespace
|
||||
@@ -64,6 +66,8 @@ class IcebergClient:
|
||||
catalog_props["s3.access-key-id"] = s3_access_key
|
||||
if s3_secret_key:
|
||||
catalog_props["s3.secret-access-key"] = s3_secret_key
|
||||
if s3_region:
|
||||
catalog_props["s3.region"] = s3_region
|
||||
|
||||
self.catalog = load_catalog("trading", **catalog_props)
|
||||
self.table = self.catalog.load_table(f"{namespace}.ohlc")
|
||||
|
||||
@@ -15,6 +15,13 @@ log = logging.getLogger(__name__)
|
||||
# Standard OHLC columns always returned
|
||||
STANDARD_COLUMNS = ["timestamp", "open", "high", "low", "close"]
|
||||
|
||||
# All optional columns from the OHLC proto spec, returned by default when extra_columns=None
|
||||
OHLC_OPTIONAL_COLUMNS = [
|
||||
"volume", "buy_vol", "sell_vol",
|
||||
"open_time", "high_time", "low_time", "close_time",
|
||||
"open_interest",
|
||||
]
|
||||
|
||||
# All valid extra columns available in the Iceberg schema
|
||||
VALID_EXTRA_COLUMNS = {
|
||||
"volume", "buy_vol", "sell_vol",
|
||||
@@ -43,6 +50,7 @@ class DataAPIImpl(DataAPI):
|
||||
s3_endpoint: Optional[str] = None,
|
||||
s3_access_key: Optional[str] = None,
|
||||
s3_secret_key: Optional[str] = None,
|
||||
s3_region: Optional[str] = None,
|
||||
request_timeout: float = 30.0,
|
||||
):
|
||||
"""
|
||||
@@ -56,6 +64,7 @@ class DataAPIImpl(DataAPI):
|
||||
s3_endpoint: S3/MinIO endpoint URL (e.g., "http://minio:9000")
|
||||
s3_access_key: S3/MinIO access key
|
||||
s3_secret_key: S3/MinIO secret key
|
||||
s3_region: S3/MinIO region (e.g., "us-east-1")
|
||||
request_timeout: Default timeout for historical data requests in seconds (default: 30)
|
||||
"""
|
||||
self.ohlc_client = OHLCClient(
|
||||
@@ -66,6 +75,7 @@ class DataAPIImpl(DataAPI):
|
||||
s3_endpoint=s3_endpoint,
|
||||
s3_access_key=s3_access_key,
|
||||
s3_secret_key=s3_secret_key,
|
||||
s3_region=s3_region,
|
||||
)
|
||||
self.request_timeout = request_timeout
|
||||
self._started = False
|
||||
@@ -120,7 +130,9 @@ class DataAPIImpl(DataAPI):
|
||||
|
||||
# Determine which columns to fetch
|
||||
columns_to_fetch = STANDARD_COLUMNS.copy()
|
||||
if extra_columns:
|
||||
if extra_columns is None:
|
||||
columns_to_fetch.extend(OHLC_OPTIONAL_COLUMNS)
|
||||
elif extra_columns:
|
||||
columns_to_fetch.extend(extra_columns)
|
||||
|
||||
# Use OHLCClient which handles smart caching:
|
||||
|
||||
@@ -93,6 +93,82 @@ def _load_strategy_class(impl_path: Path) -> type:
|
||||
# Metrics extraction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _money_to_float(val) -> float | None:
|
||||
"""Convert a Nautilus Money object or string like '15.32 USDT' to float."""
|
||||
if val is None:
|
||||
return None
|
||||
try:
|
||||
if hasattr(val, "as_decimal"):
|
||||
return float(val.as_decimal())
|
||||
s = str(val).strip()
|
||||
if s and s.lower() not in ("none", "nan"):
|
||||
return float(s.split()[0])
|
||||
except (ValueError, TypeError, IndexError):
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _ts_to_s(raw) -> int | None:
|
||||
"""Convert a Nautilus nanosecond timestamp to Unix seconds."""
|
||||
try:
|
||||
return int(raw) // 1_000_000_000
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _extract_fills(engine) -> pd.DataFrame:
|
||||
"""Return a sorted fills DataFrame from BacktestEngine, or empty DataFrame."""
|
||||
try:
|
||||
df = engine.trader.generate_order_fills_report()
|
||||
if df is not None and len(df) > 0:
|
||||
if "ts_event" in df.columns:
|
||||
df = df.sort_values("ts_event")
|
||||
return df
|
||||
except Exception as exc:
|
||||
log.debug("generate_order_fills_report() failed: %s", exc)
|
||||
return pd.DataFrame()
|
||||
|
||||
|
||||
def _extract_trades(fills_df: pd.DataFrame, initial_capital: float) -> list[dict]:
|
||||
"""
|
||||
Pair fills into round-trip trades: buy → sell or sell → buy.
|
||||
Returns a list of trade dicts (capped at 500 for large backtests).
|
||||
"""
|
||||
if fills_df.empty:
|
||||
return []
|
||||
|
||||
trades: list[dict] = []
|
||||
open_positions: dict[str, dict] = {} # instrument_id -> pending entry
|
||||
|
||||
for _, fill in fills_df.iterrows():
|
||||
instrument = str(fill.get("instrument_id", ""))
|
||||
side = str(fill.get("order_side", "")).upper()
|
||||
qty = _money_to_float(fill.get("last_qty")) or 0.0
|
||||
price = _money_to_float(fill.get("last_px")) or 0.0
|
||||
ts_s = _ts_to_s(fill.get("ts_event"))
|
||||
rpnl = _money_to_float(fill.get("realized_pnl"))
|
||||
|
||||
if rpnl is not None and rpnl != 0.0:
|
||||
# This fill closes a position — record as a completed trade
|
||||
entry = open_positions.pop(instrument, None)
|
||||
trade = {
|
||||
"instrument": instrument,
|
||||
"side": side,
|
||||
"quantity": round(qty, 8),
|
||||
"entry_price": round(entry["price"], 8) if entry else None,
|
||||
"exit_price": round(price, 8),
|
||||
"entry_time": entry["ts_s"] if entry else None,
|
||||
"exit_time": ts_s,
|
||||
"pnl": round(rpnl, 6),
|
||||
}
|
||||
trades.append(trade)
|
||||
else:
|
||||
# Opening fill — store for pairing
|
||||
open_positions[instrument] = {"price": price, "ts_s": ts_s, "side": side}
|
||||
|
||||
return trades[:500] # cap for large backtests
|
||||
|
||||
|
||||
def _compute_metrics(
|
||||
engine,
|
||||
venue_strs: list[str],
|
||||
@@ -100,17 +176,18 @@ def _compute_metrics(
|
||||
all_bars: list,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Extract performance metrics from a completed BacktestEngine.
|
||||
Extract structured performance metrics from a completed BacktestEngine.
|
||||
|
||||
Returns dict with:
|
||||
total_return float — fractional (0.15 = +15%)
|
||||
sharpe_ratio float — annualized; 0.0 if no trades or constant equity
|
||||
max_drawdown float — max peak-to-trough as fraction (0.10 = 10% drawdown)
|
||||
win_rate float — fraction of trades with positive realized PnL
|
||||
trade_count int
|
||||
equity_curve list[{timestamp: int_unix_s, equity: float}]
|
||||
summary dict — core metrics (total_return, sharpe, drawdown, etc.)
|
||||
statistics dict — extended stats (sortino, calmar, profit_factor, etc.)
|
||||
trades list — individual round-trip trades (capped at 500)
|
||||
equity_curve list[{timestamp: int_unix_s, equity: float}]
|
||||
"""
|
||||
# Reconstruct equity curve from fills
|
||||
fills_df = _extract_fills(engine)
|
||||
trades = _extract_trades(fills_df, initial_capital)
|
||||
|
||||
# --- Equity curve reconstruction ---
|
||||
equity_points: list[dict] = []
|
||||
if all_bars:
|
||||
equity_points.append({
|
||||
@@ -121,51 +198,24 @@ def _compute_metrics(
|
||||
running_equity = initial_capital
|
||||
trade_count = 0
|
||||
winning_trades = 0
|
||||
total_profit = 0.0
|
||||
total_loss = 0.0
|
||||
|
||||
try:
|
||||
fills_df = engine.trader.generate_order_fills_report()
|
||||
except Exception as exc:
|
||||
log.debug("generate_order_fills_report() failed: %s", exc)
|
||||
fills_df = None
|
||||
|
||||
if fills_df is not None and len(fills_df) > 0:
|
||||
# Sort by event time
|
||||
if "ts_event" in fills_df.columns:
|
||||
fills_df = fills_df.sort_values("ts_event")
|
||||
|
||||
if not fills_df.empty:
|
||||
for _, fill in fills_df.iterrows():
|
||||
rpnl = fill.get("realized_pnl") if hasattr(fill, "get") else None
|
||||
if rpnl is None:
|
||||
rpnl = _money_to_float(fill.get("realized_pnl"))
|
||||
if rpnl is None or rpnl == 0.0:
|
||||
continue
|
||||
|
||||
# Nautilus Money objects: str form is "15.32 USDT"
|
||||
rpnl_float: float | None = None
|
||||
try:
|
||||
if hasattr(rpnl, "as_decimal"):
|
||||
rpnl_float = float(rpnl.as_decimal())
|
||||
elif rpnl is not None:
|
||||
rpnl_str = str(rpnl).strip()
|
||||
if rpnl_str and rpnl_str.lower() not in ("none", "nan"):
|
||||
rpnl_float = float(rpnl_str.split()[0])
|
||||
except (ValueError, TypeError, IndexError):
|
||||
pass
|
||||
|
||||
if rpnl_float is not None and rpnl_float != 0.0:
|
||||
ts_s: int | None = None
|
||||
raw_ts = fill.get("ts_event") if hasattr(fill, "get") else None
|
||||
if raw_ts is not None:
|
||||
try:
|
||||
ts_s = int(raw_ts) // 1_000_000_000
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
running_equity += rpnl_float
|
||||
trade_count += 1
|
||||
if rpnl_float > 0:
|
||||
winning_trades += 1
|
||||
|
||||
if ts_s is not None:
|
||||
equity_points.append({"timestamp": ts_s, "equity": running_equity})
|
||||
ts_s = _ts_to_s(fill.get("ts_event"))
|
||||
running_equity += rpnl
|
||||
trade_count += 1
|
||||
if rpnl > 0:
|
||||
winning_trades += 1
|
||||
total_profit += rpnl
|
||||
else:
|
||||
total_loss += abs(rpnl)
|
||||
if ts_s is not None:
|
||||
equity_points.append({"timestamp": ts_s, "equity": running_equity})
|
||||
|
||||
if all_bars:
|
||||
equity_points.append({
|
||||
@@ -173,19 +223,16 @@ def _compute_metrics(
|
||||
"equity": running_equity,
|
||||
})
|
||||
|
||||
# Try to get actual final balance from the account (more accurate than fill reconstruction)
|
||||
# Prefer definitive final balance from account cache
|
||||
try:
|
||||
from nautilus_trader.model.identifiers import Venue
|
||||
for venue_str in venue_strs:
|
||||
account = engine.cache.account_for_venue(Venue(venue_str))
|
||||
if account is None:
|
||||
continue
|
||||
# Sum all balances (quote currency is what we started with)
|
||||
for bal in account.balances().values():
|
||||
total = getattr(bal, "total", None)
|
||||
if total is not None:
|
||||
final_val = float(str(total).split()[0]) if not hasattr(total, "as_decimal") else float(total.as_decimal())
|
||||
# Use the account balance as the definitive final equity
|
||||
final_val = _money_to_float(getattr(bal, "total", None))
|
||||
if final_val is not None:
|
||||
running_equity = final_val
|
||||
if equity_points:
|
||||
equity_points[-1]["equity"] = running_equity
|
||||
@@ -193,36 +240,71 @@ def _compute_metrics(
|
||||
except Exception as exc:
|
||||
log.debug("Account balance extraction failed: %s", exc)
|
||||
|
||||
# Core metrics
|
||||
# --- Core metrics ---
|
||||
total_return = (running_equity - initial_capital) / initial_capital if initial_capital else 0.0
|
||||
win_rate = winning_trades / trade_count if trade_count > 0 else 0.0
|
||||
profit_factor = (total_profit / total_loss) if total_loss > 0 else (float("inf") if total_profit > 0 else 0.0)
|
||||
|
||||
# Determine bar duration for annualisation
|
||||
bar_duration_ns = 0.0
|
||||
if all_bars and len(all_bars) > 1:
|
||||
bar_duration_ns = (all_bars[-1].ts_event - all_bars[0].ts_event) / max(len(all_bars) - 1, 1)
|
||||
bars_per_year = (365 * 24 * 3600 * 1e9) / bar_duration_ns if bar_duration_ns > 0 else 0.0
|
||||
|
||||
equity_series = pd.Series([p["equity"] for p in equity_points]) if len(equity_points) > 2 else pd.Series([initial_capital, running_equity])
|
||||
returns = equity_series.pct_change().dropna()
|
||||
|
||||
# Sharpe ratio (annualized) from equity curve returns
|
||||
sharpe = 0.0
|
||||
if len(equity_points) > 2 and all_bars and len(all_bars) > 1:
|
||||
equity_series = pd.Series([p["equity"] for p in equity_points])
|
||||
returns = equity_series.pct_change().dropna()
|
||||
if len(returns) > 1 and returns.std() > 0:
|
||||
bar_duration_ns = (all_bars[-1].ts_event - all_bars[0].ts_event) / max(len(all_bars) - 1, 1)
|
||||
if bar_duration_ns > 0:
|
||||
bars_per_year = (365 * 24 * 3600 * 1e9) / bar_duration_ns
|
||||
sharpe = float((returns.mean() / returns.std()) * (bars_per_year ** 0.5))
|
||||
sortino = 0.0
|
||||
if len(returns) > 1 and bars_per_year > 0:
|
||||
mean_r = returns.mean()
|
||||
std_r = returns.std()
|
||||
if std_r > 0:
|
||||
sharpe = float((mean_r / std_r) * (bars_per_year ** 0.5))
|
||||
downside = returns[returns < 0]
|
||||
downside_std = downside.std() if len(downside) > 1 else 0.0
|
||||
if downside_std > 0:
|
||||
sortino = float((mean_r / downside_std) * (bars_per_year ** 0.5))
|
||||
|
||||
# Max drawdown
|
||||
max_drawdown = 0.0
|
||||
if len(equity_points) > 1:
|
||||
equity_arr = pd.Series([p["equity"] for p in equity_points])
|
||||
rolling_max = equity_arr.cummax()
|
||||
drawdowns = (equity_arr - rolling_max) / rolling_max.replace(0, float("nan"))
|
||||
if len(equity_series) > 1:
|
||||
rolling_max = equity_series.cummax()
|
||||
drawdowns = (equity_series - rolling_max) / rolling_max.replace(0, float("nan"))
|
||||
max_drawdown = float(abs(drawdowns.min())) if len(drawdowns) > 0 else 0.0
|
||||
|
||||
# Calmar ratio
|
||||
annualized_return = 0.0
|
||||
if bars_per_year > 0 and len(all_bars) > 1:
|
||||
years = (all_bars[-1].ts_event - all_bars[0].ts_event) / (365 * 24 * 3600 * 1e9)
|
||||
if years > 0:
|
||||
annualized_return = (running_equity / initial_capital) ** (1.0 / years) - 1 if initial_capital else 0.0
|
||||
calmar = annualized_return / max_drawdown if max_drawdown > 0 else 0.0
|
||||
|
||||
# Average win / average loss
|
||||
avg_win = total_profit / winning_trades if winning_trades > 0 else 0.0
|
||||
avg_loss = total_loss / (trade_count - winning_trades) if (trade_count - winning_trades) > 0 else 0.0
|
||||
|
||||
return {
|
||||
"total_return": round(total_return, 6),
|
||||
"sharpe_ratio": round(sharpe, 4),
|
||||
"max_drawdown": round(max_drawdown, 6),
|
||||
"win_rate": round(win_rate, 4),
|
||||
"trade_count": trade_count,
|
||||
"equity_curve": equity_points,
|
||||
"summary": {
|
||||
"total_return": round(total_return, 6),
|
||||
"sharpe_ratio": round(sharpe, 4),
|
||||
"max_drawdown": round(max_drawdown, 6),
|
||||
"win_rate": round(win_rate, 4),
|
||||
"trade_count": trade_count,
|
||||
"total_trades": len(trades),
|
||||
},
|
||||
"statistics": {
|
||||
"sortino_ratio": round(sortino, 4),
|
||||
"calmar_ratio": round(calmar, 4),
|
||||
"profit_factor": round(profit_factor, 4) if profit_factor != float("inf") else None,
|
||||
"avg_win": round(avg_win, 4),
|
||||
"avg_loss": round(avg_loss, 4),
|
||||
"total_profit": round(total_profit, 4),
|
||||
"total_loss": round(total_loss, 4),
|
||||
},
|
||||
"trades": trades,
|
||||
"equity_curve": equity_points,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ make_instrument_from_metadata — instrument with best-effort precision
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from decimal import Decimal
|
||||
from typing import Optional
|
||||
|
||||
import pandas as pd
|
||||
@@ -71,8 +72,8 @@ def make_instrument(
|
||||
size_precision: int = 8,
|
||||
tick_size: Optional[float] = None,
|
||||
lot_size: Optional[float] = None,
|
||||
maker_fee: float = 0.001,
|
||||
taker_fee: float = 0.001,
|
||||
maker_fee: float = 0.0,
|
||||
taker_fee: float = 0.0,
|
||||
margin_init: float = 0.0,
|
||||
margin_maint: float = 0.0,
|
||||
) -> CurrencyPair:
|
||||
@@ -118,8 +119,8 @@ def make_instrument(
|
||||
min_price=None,
|
||||
margin_init=margin_init,
|
||||
margin_maint=margin_maint,
|
||||
maker_fee=maker_fee,
|
||||
taker_fee=taker_fee,
|
||||
maker_fee=Decimal(str(maker_fee)),
|
||||
taker_fee=Decimal(str(taker_fee)),
|
||||
ts_event=ts_now,
|
||||
ts_init=ts_now,
|
||||
)
|
||||
@@ -154,8 +155,8 @@ def make_instrument_from_metadata(ticker: str) -> tuple[CurrencyPair, int, int]:
|
||||
size_precision=sp,
|
||||
tick_size=meta.tick_size,
|
||||
lot_size=meta.lot_size,
|
||||
maker_fee=meta.maker_fee or 0.001,
|
||||
taker_fee=meta.taker_fee or 0.001,
|
||||
maker_fee=meta.maker_fee or 0.0,
|
||||
taker_fee=meta.taker_fee or 0.0,
|
||||
margin_init=meta.margin_init or 0.0,
|
||||
margin_maint=meta.margin_maint or 0.0,
|
||||
)
|
||||
|
||||
@@ -39,6 +39,7 @@ class OHLCClient:
|
||||
s3_endpoint: str = None,
|
||||
s3_access_key: str = None,
|
||||
s3_secret_key: str = None,
|
||||
s3_region: str = None,
|
||||
):
|
||||
"""
|
||||
Initialize OHLC client.
|
||||
@@ -51,12 +52,14 @@ class OHLCClient:
|
||||
s3_endpoint: S3/MinIO endpoint URL (e.g., "http://localhost:9000")
|
||||
s3_access_key: S3/MinIO access key
|
||||
s3_secret_key: S3/MinIO secret key
|
||||
s3_region: S3/MinIO region (e.g., "us-east-1")
|
||||
"""
|
||||
self.iceberg = IcebergClient(
|
||||
iceberg_catalog_uri, namespace,
|
||||
s3_endpoint=s3_endpoint,
|
||||
s3_access_key=s3_access_key,
|
||||
s3_secret_key=s3_secret_key,
|
||||
s3_region=s3_region,
|
||||
)
|
||||
self.history = HistoryClient(relay_endpoint, notification_endpoint)
|
||||
log.info("OHLCClient initialized")
|
||||
@@ -122,7 +125,7 @@ class OHLCClient:
|
||||
|
||||
if not missing_ranges:
|
||||
# All data exists in Iceberg
|
||||
return self._forward_fill_gaps(df, period_seconds)
|
||||
return df
|
||||
|
||||
# Step 3: Request missing data for each range
|
||||
# For simplicity, request entire range (relay can merge adjacent requests)
|
||||
|
||||
1
sandbox/dexorder/strategy/__init__.py
Normal file
1
sandbox/dexorder/strategy/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Strategy runtime package
|
||||
361
sandbox/dexorder/strategy/db.py
Normal file
361
sandbox/dexorder/strategy/db.py
Normal file
@@ -0,0 +1,361 @@
|
||||
"""
|
||||
SQLite database for strategy execution state, trade logs, and backtest history.
|
||||
|
||||
All data is stored under DATA_DIR/dexorder.db.
|
||||
Uses aiosqlite for async compatibility with the MCP server's event loop.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
_SCHEMA = """
|
||||
CREATE TABLE IF NOT EXISTS strategies (
|
||||
name TEXT PRIMARY KEY,
|
||||
status TEXT NOT NULL DEFAULT 'stopped',
|
||||
git_rev TEXT,
|
||||
worktree_path TEXT,
|
||||
started_at REAL,
|
||||
stopped_at REAL,
|
||||
allocation REAL NOT NULL DEFAULT 0,
|
||||
paper INTEGER NOT NULL DEFAULT 1,
|
||||
feeds_json TEXT,
|
||||
config_json TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS strategy_state (
|
||||
name TEXT PRIMARY KEY,
|
||||
realized_pnl REAL NOT NULL DEFAULT 0,
|
||||
unrealized_pnl REAL NOT NULL DEFAULT 0,
|
||||
trade_count INTEGER NOT NULL DEFAULT 0,
|
||||
positions_json TEXT,
|
||||
updated_at REAL NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS trades (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
strategy_name TEXT NOT NULL,
|
||||
instrument TEXT NOT NULL,
|
||||
side TEXT NOT NULL,
|
||||
quantity REAL NOT NULL,
|
||||
entry_price REAL,
|
||||
exit_price REAL NOT NULL,
|
||||
entry_time REAL,
|
||||
exit_time REAL NOT NULL,
|
||||
pnl REAL NOT NULL,
|
||||
recorded_at REAL NOT NULL DEFAULT (unixepoch())
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backtest_runs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
strategy_name TEXT NOT NULL,
|
||||
ran_at REAL NOT NULL DEFAULT (unixepoch()),
|
||||
from_time REAL,
|
||||
to_time REAL,
|
||||
initial_capital REAL,
|
||||
feeds_json TEXT,
|
||||
summary_json TEXT,
|
||||
statistics_json TEXT,
|
||||
trades_json TEXT,
|
||||
equity_curve_json TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS strategy_events (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
strategy_name TEXT NOT NULL,
|
||||
event_type TEXT NOT NULL,
|
||||
payload_json TEXT,
|
||||
recorded_at REAL NOT NULL DEFAULT (unixepoch())
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_trades_strategy ON trades(strategy_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_backtest_strategy ON backtest_runs(strategy_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_events_strategy ON strategy_events(strategy_name);
|
||||
"""
|
||||
|
||||
|
||||
class StrategyDB:
|
||||
"""Async SQLite interface for strategy persistence."""
|
||||
|
||||
def __init__(self, db_path: Path):
|
||||
self.db_path = db_path
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Create tables if they don't exist."""
|
||||
import aiosqlite
|
||||
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.executescript(_SCHEMA)
|
||||
await db.commit()
|
||||
log.info("StrategyDB initialized at %s", self.db_path)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Strategy lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def upsert_strategy(
|
||||
self,
|
||||
name: str,
|
||||
status: str,
|
||||
allocation: float,
|
||||
paper: bool,
|
||||
feeds: list[dict],
|
||||
git_rev: Optional[str] = None,
|
||||
worktree_path: Optional[str] = None,
|
||||
config: Optional[dict] = None,
|
||||
) -> None:
|
||||
import aiosqlite
|
||||
now = time.time()
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute("""
|
||||
INSERT INTO strategies
|
||||
(name, status, git_rev, worktree_path, started_at, allocation, paper, feeds_json, config_json)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(name) DO UPDATE SET
|
||||
status=excluded.status,
|
||||
git_rev=excluded.git_rev,
|
||||
worktree_path=excluded.worktree_path,
|
||||
started_at=excluded.started_at,
|
||||
allocation=excluded.allocation,
|
||||
paper=excluded.paper,
|
||||
feeds_json=excluded.feeds_json,
|
||||
config_json=excluded.config_json
|
||||
""", (
|
||||
name, status, git_rev, worktree_path, now,
|
||||
allocation, int(paper),
|
||||
json.dumps(feeds),
|
||||
json.dumps(config or {}),
|
||||
))
|
||||
await db.commit()
|
||||
|
||||
async def update_strategy_status(self, name: str, status: str) -> None:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
if status == "stopped":
|
||||
await db.execute(
|
||||
"UPDATE strategies SET status=?, stopped_at=? WHERE name=?",
|
||||
(status, time.time(), name)
|
||||
)
|
||||
else:
|
||||
await db.execute("UPDATE strategies SET status=? WHERE name=?", (status, name))
|
||||
await db.commit()
|
||||
|
||||
async def get_strategy(self, name: str) -> Optional[dict]:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
async with db.execute("SELECT * FROM strategies WHERE name=?", (name,)) as cur:
|
||||
row = await cur.fetchone()
|
||||
return dict(row) if row else None
|
||||
|
||||
async def get_all_strategies(self) -> list[dict]:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
async with db.execute("SELECT * FROM strategies ORDER BY started_at DESC") as cur:
|
||||
rows = await cur.fetchall()
|
||||
return [dict(r) for r in rows]
|
||||
|
||||
async def get_running_strategies(self) -> list[dict]:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
async with db.execute(
|
||||
"SELECT * FROM strategies WHERE status='running' OR status='starting'",
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
return [dict(r) for r in rows]
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# PnL state
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def update_pnl_state(
|
||||
self,
|
||||
name: str,
|
||||
realized_pnl: float,
|
||||
unrealized_pnl: float,
|
||||
trade_count: int,
|
||||
positions: Optional[dict] = None,
|
||||
) -> None:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute("""
|
||||
INSERT INTO strategy_state
|
||||
(name, realized_pnl, unrealized_pnl, trade_count, positions_json, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(name) DO UPDATE SET
|
||||
realized_pnl=excluded.realized_pnl,
|
||||
unrealized_pnl=excluded.unrealized_pnl,
|
||||
trade_count=excluded.trade_count,
|
||||
positions_json=excluded.positions_json,
|
||||
updated_at=excluded.updated_at
|
||||
""", (name, realized_pnl, unrealized_pnl, trade_count,
|
||||
json.dumps(positions or {}), time.time()))
|
||||
await db.commit()
|
||||
|
||||
async def get_pnl_state(self, name: str) -> Optional[dict]:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
async with db.execute("SELECT * FROM strategy_state WHERE name=?", (name,)) as cur:
|
||||
row = await cur.fetchone()
|
||||
return dict(row) if row else None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Trades
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def insert_trade(self, strategy_name: str, trade: dict) -> None:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute("""
|
||||
INSERT INTO trades
|
||||
(strategy_name, instrument, side, quantity, entry_price,
|
||||
exit_price, entry_time, exit_time, pnl)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (
|
||||
strategy_name,
|
||||
trade.get("instrument", ""),
|
||||
trade.get("side", ""),
|
||||
trade.get("quantity", 0),
|
||||
trade.get("entry_price"),
|
||||
trade.get("exit_price", 0),
|
||||
trade.get("entry_time"),
|
||||
trade.get("exit_time", time.time()),
|
||||
trade.get("pnl", 0),
|
||||
))
|
||||
await db.commit()
|
||||
|
||||
async def get_trades(self, strategy_name: str, limit: int = 200) -> list[dict]:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
async with db.execute(
|
||||
"SELECT * FROM trades WHERE strategy_name=? ORDER BY exit_time DESC LIMIT ?",
|
||||
(strategy_name, limit),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
return [dict(r) for r in rows]
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Backtest runs
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def insert_backtest(
|
||||
self,
|
||||
strategy_name: str,
|
||||
from_time: Any,
|
||||
to_time: Any,
|
||||
initial_capital: float,
|
||||
feeds: list[dict],
|
||||
summary: dict,
|
||||
statistics: dict,
|
||||
trades: list[dict],
|
||||
equity_curve: list[dict],
|
||||
) -> int:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
cur = await db.execute("""
|
||||
INSERT INTO backtest_runs
|
||||
(strategy_name, from_time, to_time, initial_capital, feeds_json,
|
||||
summary_json, statistics_json, trades_json, equity_curve_json)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (
|
||||
strategy_name,
|
||||
float(from_time) if from_time else None,
|
||||
float(to_time) if to_time else None,
|
||||
initial_capital,
|
||||
json.dumps(feeds),
|
||||
json.dumps(summary),
|
||||
json.dumps(statistics),
|
||||
json.dumps(trades[:500]), # cap
|
||||
json.dumps(equity_curve),
|
||||
))
|
||||
await db.commit()
|
||||
return cur.lastrowid
|
||||
|
||||
async def get_backtests(self, strategy_name: str, limit: int = 10) -> list[dict]:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
async with db.execute(
|
||||
"SELECT * FROM backtest_runs WHERE strategy_name=? ORDER BY ran_at DESC LIMIT ?",
|
||||
(strategy_name, limit),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
result = []
|
||||
for r in rows:
|
||||
d = dict(r)
|
||||
for key in ("feeds_json", "summary_json", "statistics_json",
|
||||
"trades_json", "equity_curve_json"):
|
||||
if d.get(key):
|
||||
plain = key.replace("_json", "")
|
||||
d[plain] = json.loads(d.pop(key))
|
||||
else:
|
||||
d.pop(key, None)
|
||||
result.append(d)
|
||||
return result
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Events
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def insert_event(self, strategy_name: str, event_type: str, payload: dict) -> None:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute(
|
||||
"INSERT INTO strategy_events (strategy_name, event_type, payload_json) VALUES (?, ?, ?)",
|
||||
(strategy_name, event_type, json.dumps(payload)),
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
async def get_events(
|
||||
self,
|
||||
strategy_name: str,
|
||||
event_type: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
) -> list[dict]:
|
||||
import aiosqlite
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
if event_type:
|
||||
async with db.execute(
|
||||
"SELECT * FROM strategy_events WHERE strategy_name=? AND event_type=? "
|
||||
"ORDER BY recorded_at DESC LIMIT ?",
|
||||
(strategy_name, event_type, limit),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
else:
|
||||
async with db.execute(
|
||||
"SELECT * FROM strategy_events WHERE strategy_name=? "
|
||||
"ORDER BY recorded_at DESC LIMIT ?",
|
||||
(strategy_name, limit),
|
||||
) as cur:
|
||||
rows = await cur.fetchall()
|
||||
result = []
|
||||
for r in rows:
|
||||
d = dict(r)
|
||||
if d.get("payload_json"):
|
||||
d["payload"] = json.loads(d.pop("payload_json"))
|
||||
result.append(d)
|
||||
return result
|
||||
|
||||
|
||||
# Singleton
|
||||
_db: Optional[StrategyDB] = None
|
||||
|
||||
|
||||
def get_strategy_db(data_dir: Optional[Path] = None) -> StrategyDB:
|
||||
global _db
|
||||
if _db is None:
|
||||
if data_dir is None:
|
||||
import os
|
||||
data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
|
||||
_db = StrategyDB(data_dir / "dexorder.db")
|
||||
return _db
|
||||
152
sandbox/dexorder/strategy/event_bridge.py
Normal file
152
sandbox/dexorder/strategy/event_bridge.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""
|
||||
StrategyEventBridge — receives internal strategy events from subprocesses
|
||||
and forwards them to the user-facing EventPublisher.
|
||||
|
||||
Architecture:
|
||||
Strategy subprocess ──PUSH──> [IPC socket] ──PULL──> StrategyEventBridge
|
||||
└─> EventPublisher
|
||||
├── XPUB (informational)
|
||||
└── DEALER (critical)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import zmq
|
||||
import zmq.asyncio
|
||||
|
||||
from .events import StrategyEvent, StrategyEventType, IPC_ENDPOINT
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# How long without a heartbeat before a strategy is considered dead (seconds)
|
||||
HEARTBEAT_TIMEOUT = 60.0
|
||||
|
||||
|
||||
class StrategyEventBridge:
|
||||
"""
|
||||
Binds a ZMQ PULL socket and relays strategy events to EventPublisher.
|
||||
|
||||
Also monitors heartbeats to detect crashed strategy subprocesses.
|
||||
"""
|
||||
|
||||
def __init__(self, event_publisher, strategy_lifecycle=None):
|
||||
"""
|
||||
Args:
|
||||
event_publisher: dexorder.events.publisher.EventPublisher instance
|
||||
strategy_lifecycle: StrategyLifecycleManager (optional) for marking crashed strategies
|
||||
"""
|
||||
self._publisher = event_publisher
|
||||
self._lifecycle = strategy_lifecycle
|
||||
self._ctx: Optional[zmq.asyncio.Context] = None
|
||||
self._socket: Optional[zmq.asyncio.Socket] = None
|
||||
self._task: Optional[asyncio.Task] = None
|
||||
self._heartbeat_task: Optional[asyncio.Task] = None
|
||||
self._last_heartbeat: dict[str, float] = {} # strategy_name -> timestamp
|
||||
self._running = False
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Bind PULL socket and start receive loop."""
|
||||
self._ctx = zmq.asyncio.Context.instance()
|
||||
self._socket = self._ctx.socket(zmq.PULL)
|
||||
self._socket.bind(IPC_ENDPOINT)
|
||||
self._running = True
|
||||
self._task = asyncio.create_task(self._receive_loop())
|
||||
self._heartbeat_task = asyncio.create_task(self._heartbeat_monitor())
|
||||
log.info("StrategyEventBridge started on %s", IPC_ENDPOINT)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop receive loop and close socket."""
|
||||
self._running = False
|
||||
for task in [self._task, self._heartbeat_task]:
|
||||
if task:
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
if self._socket:
|
||||
self._socket.close()
|
||||
log.info("StrategyEventBridge stopped")
|
||||
|
||||
def notify_strategy_started(self, strategy_name: str) -> None:
|
||||
"""Called by lifecycle manager when a strategy subprocess starts."""
|
||||
self._last_heartbeat[strategy_name] = time.time()
|
||||
|
||||
def notify_strategy_stopped(self, strategy_name: str) -> None:
|
||||
"""Called by lifecycle manager when a strategy is deactivated."""
|
||||
self._last_heartbeat.pop(strategy_name, None)
|
||||
|
||||
async def _receive_loop(self) -> None:
|
||||
while self._running:
|
||||
try:
|
||||
raw = await asyncio.wait_for(self._socket.recv(), timeout=1.0)
|
||||
event = StrategyEvent.deserialize(raw)
|
||||
await self._handle_event(event)
|
||||
except asyncio.TimeoutError:
|
||||
continue
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
log.error("Error receiving strategy event: %s", e)
|
||||
|
||||
async def _handle_event(self, event: StrategyEvent) -> None:
|
||||
"""Translate internal StrategyEvent to UserEvent and publish."""
|
||||
from dexorder.events.types import EventType, Priority
|
||||
from dexorder.events.publisher import UserEvent, DeliverySpec
|
||||
|
||||
name = event.strategy_name
|
||||
|
||||
if event.event_type == StrategyEventType.HEARTBEAT:
|
||||
self._last_heartbeat[name] = time.time()
|
||||
return # heartbeats are not forwarded to the user
|
||||
|
||||
# Map to UserEvent types
|
||||
type_map = {
|
||||
StrategyEventType.STARTED: (EventType.STRATEGY_STARTED, Priority.INFORMATIONAL),
|
||||
StrategyEventType.STOPPED: (EventType.STRATEGY_STOPPED, Priority.INFORMATIONAL),
|
||||
StrategyEventType.ORDER_SUBMITTED: (EventType.ORDER_PLACED, Priority.NORMAL),
|
||||
StrategyEventType.ORDER_FILLED: (EventType.ORDER_FILLED, Priority.CRITICAL),
|
||||
StrategyEventType.POSITION_UPDATE: (EventType.POSITION_UPDATED, Priority.INFORMATIONAL),
|
||||
StrategyEventType.PNL_UPDATE: (EventType.STRATEGY_LOG, Priority.INFORMATIONAL),
|
||||
StrategyEventType.ERROR: (EventType.STRATEGY_ERROR, Priority.CRITICAL),
|
||||
StrategyEventType.LOG: (EventType.STRATEGY_LOG, Priority.INFORMATIONAL),
|
||||
}
|
||||
et, priority = type_map.get(event.event_type, (EventType.STRATEGY_LOG, Priority.INFORMATIONAL))
|
||||
|
||||
payload = {"strategy_name": name, **event.payload}
|
||||
|
||||
delivery = (
|
||||
DeliverySpec.critical() if priority == Priority.CRITICAL
|
||||
else DeliverySpec.informational()
|
||||
)
|
||||
|
||||
try:
|
||||
from dexorder.events.types import UserEvent as UE
|
||||
await self._publisher.publish(UE(
|
||||
event_type=et,
|
||||
payload=payload,
|
||||
delivery=delivery,
|
||||
))
|
||||
except Exception as e:
|
||||
log.error("Failed to publish strategy event %s: %s", event.event_type, e)
|
||||
|
||||
async def _heartbeat_monitor(self) -> None:
|
||||
"""Periodically check for strategies that stopped sending heartbeats."""
|
||||
while self._running:
|
||||
try:
|
||||
await asyncio.sleep(30)
|
||||
now = time.time()
|
||||
for name, last_seen in list(self._last_heartbeat.items()):
|
||||
if now - last_seen > HEARTBEAT_TIMEOUT:
|
||||
log.warning("Strategy '%s' missed heartbeat, marking as crashed", name)
|
||||
self._last_heartbeat.pop(name, None)
|
||||
if self._lifecycle:
|
||||
await self._lifecycle.mark_crashed(name)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
log.error("Heartbeat monitor error: %s", e)
|
||||
61
sandbox/dexorder/strategy/events.py
Normal file
61
sandbox/dexorder/strategy/events.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""
|
||||
Internal strategy event types for subprocess → main-process communication.
|
||||
|
||||
Strategy subprocesses push StrategyEvents via ZMQ PUSH socket.
|
||||
The main process's StrategyEventBridge receives them via PULL and forwards
|
||||
them to the user-facing EventPublisher (dexorder/events/publisher.py).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from enum import IntEnum
|
||||
|
||||
|
||||
class StrategyEventType(IntEnum):
|
||||
"""Internal event types produced by strategy subprocesses."""
|
||||
STARTED = 1
|
||||
STOPPED = 2
|
||||
HEARTBEAT = 3
|
||||
ORDER_SUBMITTED = 10
|
||||
ORDER_FILLED = 11
|
||||
POSITION_UPDATE = 20
|
||||
PNL_UPDATE = 21
|
||||
ERROR = 30
|
||||
LOG = 31
|
||||
|
||||
|
||||
@dataclass
|
||||
class StrategyEvent:
|
||||
"""Internal event envelope sent from strategy subprocess to main process."""
|
||||
event_type: StrategyEventType
|
||||
strategy_name: str
|
||||
payload: dict
|
||||
timestamp: float = field(default_factory=time.time)
|
||||
event_id: str = field(default_factory=lambda: str(uuid.uuid4())[:8])
|
||||
|
||||
def serialize(self) -> bytes:
|
||||
return json.dumps({
|
||||
"event_type": int(self.event_type),
|
||||
"strategy_name": self.strategy_name,
|
||||
"payload": self.payload,
|
||||
"timestamp": self.timestamp,
|
||||
"event_id": self.event_id,
|
||||
}).encode()
|
||||
|
||||
@classmethod
|
||||
def deserialize(cls, data: bytes) -> "StrategyEvent":
|
||||
d = json.loads(data.decode())
|
||||
return cls(
|
||||
event_type=StrategyEventType(d["event_type"]),
|
||||
strategy_name=d["strategy_name"],
|
||||
payload=d.get("payload", {}),
|
||||
timestamp=d.get("timestamp", time.time()),
|
||||
event_id=d.get("event_id", ""),
|
||||
)
|
||||
|
||||
|
||||
# IPC endpoint used for strategy subprocess → main process communication
|
||||
IPC_ENDPOINT = "ipc:///tmp/dexorder-strategy-events.sock"
|
||||
322
sandbox/dexorder/strategy/lifecycle.py
Normal file
322
sandbox/dexorder/strategy/lifecycle.py
Normal file
@@ -0,0 +1,322 @@
|
||||
"""
|
||||
StrategyLifecycleManager — manages running strategy subprocesses.
|
||||
|
||||
Responsibilities:
|
||||
- Starting strategy subprocesses from git worktrees
|
||||
- Stopping subprocesses on deactivation
|
||||
- Persisting state to SQLite for crash recovery
|
||||
- Registering strategies as LifecycleManager triggers (prevents idle shutdown)
|
||||
- Enforcing max concurrent strategy limit
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
MAX_CONCURRENT_STRATEGIES = 5
|
||||
DEFAULT_POLL_INTERVAL = 60 # seconds between bar checks
|
||||
|
||||
|
||||
class StrategyLifecycleManager:
|
||||
|
||||
def __init__(self, data_dir: Path, event_bridge=None, lifecycle_manager=None):
|
||||
self.data_dir = data_dir
|
||||
self.worktrees_dir = data_dir / "worktrees"
|
||||
self.configs_dir = data_dir / "strategy_configs"
|
||||
self._bridge = event_bridge
|
||||
self._lifecycle = lifecycle_manager # dexorder LifecycleManager
|
||||
self._runners: dict[str, tuple[threading.Thread, threading.Event]] = {} # name -> (thread, stop_event)
|
||||
self._db: Optional["StrategyDB"] = None
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Initialize DB and prune stale worktrees."""
|
||||
from dexorder.strategy.db import get_strategy_db
|
||||
from dexorder.tools.python_tools import get_category_manager
|
||||
|
||||
self._db = get_strategy_db(self.data_dir)
|
||||
await self._db.initialize()
|
||||
self.worktrees_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.configs_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Prune any git worktrees that are no longer registered
|
||||
try:
|
||||
mgr = get_category_manager(self.data_dir)
|
||||
mgr.git.prune_worktrees()
|
||||
except Exception as e:
|
||||
log.warning("git worktree prune failed: %s", e)
|
||||
|
||||
async def resume_running(self) -> None:
|
||||
"""On container restart, re-launch strategies that were 'running' at shutdown."""
|
||||
if self._db is None:
|
||||
return
|
||||
try:
|
||||
running = await self._db.get_running_strategies()
|
||||
for row in running:
|
||||
name = row["name"]
|
||||
log.info("Resuming strategy '%s' after container restart", name)
|
||||
feeds = json.loads(row.get("feeds_json") or "[]")
|
||||
await self.activate(
|
||||
strategy_name=name,
|
||||
feeds=feeds,
|
||||
allocation=row.get("allocation", 10_000.0),
|
||||
paper=bool(row.get("paper", 1)),
|
||||
_resume=True,
|
||||
)
|
||||
except Exception as e:
|
||||
log.error("Failed to resume strategies: %s", e)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Activate / Deactivate
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def activate(
|
||||
self,
|
||||
strategy_name: str,
|
||||
feeds: list[dict],
|
||||
allocation: float,
|
||||
paper: bool = True,
|
||||
git_revision: str = "HEAD",
|
||||
_resume: bool = False,
|
||||
) -> dict:
|
||||
"""
|
||||
Activate a strategy.
|
||||
|
||||
Creates a git worktree at the given revision, writes a config file,
|
||||
and spawns a subprocess running runner.py.
|
||||
|
||||
Returns a dict with status and details.
|
||||
"""
|
||||
if strategy_name in self._runners:
|
||||
return {"error": f"Strategy '{strategy_name}' is already running"}
|
||||
|
||||
if len(self._runners) >= MAX_CONCURRENT_STRATEGIES:
|
||||
return {
|
||||
"error": f"Maximum concurrent strategies ({MAX_CONCURRENT_STRATEGIES}) reached. "
|
||||
"Deactivate a running strategy first."
|
||||
}
|
||||
|
||||
# Build worktree
|
||||
from dexorder.tools.python_tools import get_category_manager, sanitize_name
|
||||
mgr = get_category_manager(self.data_dir)
|
||||
|
||||
safe_name = sanitize_name(strategy_name)
|
||||
impl_path = self.data_dir / "src" / "strategy" / safe_name / "implementation.py"
|
||||
if not impl_path.exists():
|
||||
return {"error": f"Strategy '{strategy_name}' not found at {impl_path}"}
|
||||
|
||||
try:
|
||||
short_hash = mgr.git.head_short_hash() if git_revision == "HEAD" else git_revision[:7]
|
||||
worktree_name = f"{safe_name}_{short_hash}"
|
||||
worktree_path = self.worktrees_dir / worktree_name
|
||||
|
||||
if not worktree_path.exists():
|
||||
actual_hash = mgr.git.create_worktree(worktree_path, git_revision)
|
||||
else:
|
||||
actual_hash = short_hash
|
||||
except Exception as e:
|
||||
return {"error": f"Failed to create git worktree: {e}"}
|
||||
|
||||
worktree_impl = worktree_path / "src" / "strategy" / safe_name / "implementation.py"
|
||||
if not worktree_impl.exists():
|
||||
# Fall back to live impl (worktree may not include subdirs on first use)
|
||||
worktree_impl = impl_path
|
||||
|
||||
# Feed configs as list of [ticker, period_seconds]
|
||||
feed_configs = [[f.get("symbol", ""), int(f.get("period_seconds", 3600))] for f in feeds]
|
||||
|
||||
# Write runner config to a temp file under DATA_DIR
|
||||
runner_config = {
|
||||
"strategy_name": strategy_name,
|
||||
"impl_path": str(worktree_impl),
|
||||
"feed_configs": feed_configs,
|
||||
"allocation": allocation,
|
||||
"ipc_endpoint": "ipc:///tmp/dexorder-strategy-events.sock",
|
||||
"data_dir": str(self.data_dir),
|
||||
"poll_interval": DEFAULT_POLL_INTERVAL,
|
||||
}
|
||||
config_file = self.configs_dir / f"{safe_name}.json"
|
||||
config_file.write_text(json.dumps(runner_config, indent=2))
|
||||
|
||||
# Launch strategy in a daemon thread
|
||||
try:
|
||||
from dexorder.strategy.runner import run_thread
|
||||
stop_event = threading.Event()
|
||||
thread = threading.Thread(
|
||||
target=run_thread,
|
||||
args=(runner_config, stop_event),
|
||||
daemon=True,
|
||||
name=f"strategy-{safe_name}",
|
||||
)
|
||||
thread.start()
|
||||
except Exception as e:
|
||||
return {"error": f"Failed to start strategy thread: {e}"}
|
||||
|
||||
self._runners[strategy_name] = (thread, stop_event)
|
||||
|
||||
# Register as lifecycle trigger
|
||||
if self._lifecycle:
|
||||
self._lifecycle.add_trigger(f"strategy:{strategy_name}")
|
||||
|
||||
# Notify event bridge
|
||||
if self._bridge:
|
||||
self._bridge.notify_strategy_started(strategy_name)
|
||||
|
||||
# Persist to DB
|
||||
if self._db:
|
||||
await self._db.upsert_strategy(
|
||||
name=strategy_name,
|
||||
status="running",
|
||||
allocation=allocation,
|
||||
paper=paper,
|
||||
feeds=feeds,
|
||||
git_rev=actual_hash,
|
||||
worktree_path=str(worktree_path),
|
||||
config=runner_config,
|
||||
)
|
||||
|
||||
log.info("Strategy '%s' activated (thread=%d, rev=%s)", strategy_name, thread.ident, actual_hash)
|
||||
return {
|
||||
"status": "activated",
|
||||
"strategy_name": strategy_name,
|
||||
"paper": paper,
|
||||
"allocation": allocation,
|
||||
"git_revision": actual_hash,
|
||||
"thread_id": thread.ident,
|
||||
}
|
||||
|
||||
async def deactivate(self, strategy_name: str) -> dict:
|
||||
"""Stop a running strategy and clean up its worktree."""
|
||||
entry = self._runners.pop(strategy_name, None)
|
||||
if entry is None:
|
||||
return {"error": f"Strategy '{strategy_name}' is not running"}
|
||||
|
||||
thread, stop_event = entry
|
||||
|
||||
# Signal the runner to stop and wait for the thread to exit
|
||||
stop_event.set()
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None, lambda: thread.join(timeout=15)
|
||||
)
|
||||
if thread.is_alive():
|
||||
log.warning("Strategy '%s' thread did not exit within timeout", strategy_name)
|
||||
|
||||
# Remove lifecycle trigger
|
||||
if self._lifecycle:
|
||||
self._lifecycle.remove_trigger(f"strategy:{strategy_name}")
|
||||
|
||||
# Notify bridge
|
||||
if self._bridge:
|
||||
self._bridge.notify_strategy_stopped(strategy_name)
|
||||
|
||||
# Get final PnL from DB
|
||||
final_pnl = 0.0
|
||||
if self._db:
|
||||
state = await self._db.get_pnl_state(strategy_name)
|
||||
if state:
|
||||
final_pnl = state.get("realized_pnl", 0.0)
|
||||
await self._db.update_strategy_status(strategy_name, "stopped")
|
||||
|
||||
# Clean up worktree
|
||||
await self._cleanup_worktree(strategy_name)
|
||||
|
||||
log.info("Strategy '%s' deactivated, final_pnl=%.4f", strategy_name, final_pnl)
|
||||
return {
|
||||
"status": "deactivated",
|
||||
"strategy_name": strategy_name,
|
||||
"final_pnl": final_pnl,
|
||||
}
|
||||
|
||||
async def mark_crashed(self, strategy_name: str) -> None:
|
||||
"""Mark a strategy as crashed (called by heartbeat monitor)."""
|
||||
self._runners.pop(strategy_name, None)
|
||||
if self._lifecycle:
|
||||
self._lifecycle.remove_trigger(f"strategy:{strategy_name}")
|
||||
if self._db:
|
||||
await self._db.update_strategy_status(strategy_name, "error")
|
||||
log.error("Strategy '%s' marked as crashed (heartbeat timeout)", strategy_name)
|
||||
|
||||
async def update_pnl(self, strategy_name: str, payload: dict) -> None:
|
||||
"""Called by event bridge when a PNL_UPDATE event arrives."""
|
||||
if self._db:
|
||||
await self._db.update_pnl_state(
|
||||
name=strategy_name,
|
||||
realized_pnl=payload.get("realized_pnl", 0.0),
|
||||
unrealized_pnl=payload.get("unrealized_pnl", 0.0),
|
||||
trade_count=payload.get("trade_count", 0),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Listing
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def list_active(self) -> list[dict]:
|
||||
"""Return currently running strategies with PnL state."""
|
||||
if self._db is None:
|
||||
return []
|
||||
strategies = await self._db.get_running_strategies()
|
||||
result = []
|
||||
for s in strategies:
|
||||
name = s["name"]
|
||||
state = await self._db.get_pnl_state(name)
|
||||
entry = {
|
||||
"strategy_name": name,
|
||||
"status": s.get("status", "unknown"),
|
||||
"paper": bool(s.get("paper", 1)),
|
||||
"allocation": s.get("allocation", 0),
|
||||
"git_revision": s.get("git_rev"),
|
||||
"started_at": s.get("started_at"),
|
||||
"feeds": json.loads(s.get("feeds_json") or "[]"),
|
||||
"realized_pnl": state.get("realized_pnl", 0.0) if state else 0.0,
|
||||
"unrealized_pnl": state.get("unrealized_pnl", 0.0) if state else 0.0,
|
||||
"trade_count": state.get("trade_count", 0) if state else 0,
|
||||
}
|
||||
result.append(entry)
|
||||
return result
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Shutdown
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""Stop all running strategies on container shutdown."""
|
||||
names = list(self._runners.keys())
|
||||
for name in names:
|
||||
await self.deactivate(name)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def _cleanup_worktree(self, strategy_name: str) -> None:
|
||||
if self._db is None:
|
||||
return
|
||||
try:
|
||||
row = await self._db.get_strategy(strategy_name)
|
||||
wt = row.get("worktree_path") if row else None
|
||||
if wt:
|
||||
from dexorder.tools.python_tools import get_category_manager
|
||||
mgr = get_category_manager(self.data_dir)
|
||||
mgr.git.remove_worktree(Path(wt))
|
||||
except Exception as e:
|
||||
log.warning("Worktree cleanup failed for '%s': %s", strategy_name, e)
|
||||
|
||||
|
||||
# Singleton
|
||||
_lifecycle_manager: Optional[StrategyLifecycleManager] = None
|
||||
|
||||
|
||||
def get_strategy_lifecycle(data_dir: Optional[Path] = None) -> StrategyLifecycleManager:
|
||||
global _lifecycle_manager
|
||||
if _lifecycle_manager is None:
|
||||
if data_dir is None:
|
||||
import os
|
||||
data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
|
||||
_lifecycle_manager = StrategyLifecycleManager(data_dir)
|
||||
return _lifecycle_manager
|
||||
196
sandbox/dexorder/strategy/paper_account.py
Normal file
196
sandbox/dexorder/strategy/paper_account.py
Normal file
@@ -0,0 +1,196 @@
|
||||
"""
|
||||
Lightweight paper trading account for strategy subprocesses.
|
||||
|
||||
Simulates order execution at bar-close prices without requiring Nautilus TradingNode.
|
||||
Tracks positions, PnL, and trade history. All amounts are in the quote currency.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Position:
|
||||
"""An open position."""
|
||||
instrument: str
|
||||
side: str # "long" or "short"
|
||||
quantity: float
|
||||
entry_price: float
|
||||
entry_time: float # Unix timestamp
|
||||
|
||||
|
||||
@dataclass
|
||||
class Trade:
|
||||
"""A completed round-trip trade."""
|
||||
instrument: str
|
||||
side: str # direction of the entry
|
||||
quantity: float
|
||||
entry_price: float
|
||||
exit_price: float
|
||||
entry_time: float
|
||||
exit_time: float
|
||||
pnl: float
|
||||
|
||||
|
||||
class PaperAccount:
|
||||
"""
|
||||
Simulates a cash paper account for a single strategy.
|
||||
|
||||
Positions are opened/closed by calling buy(), sell(), and flatten().
|
||||
Fills execute at the provided price (e.g. bar close).
|
||||
"""
|
||||
|
||||
def __init__(self, initial_capital: float, feed_key: Optional[str] = None):
|
||||
self.initial_capital = initial_capital
|
||||
self.balance = initial_capital
|
||||
self._positions: dict[str, Position] = {} # feed_key → Position
|
||||
self._trades: list[Trade] = []
|
||||
self._default_feed_key = feed_key
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Order API (mirrors PandasStrategy's order API)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def buy(self, quantity: float, price: float, feed_key: Optional[str] = None) -> None:
|
||||
"""Open a long or close a short at price."""
|
||||
fk = feed_key or self._default_feed_key or "default"
|
||||
existing = self._positions.get(fk)
|
||||
|
||||
if existing and existing.side == "short":
|
||||
# Close short
|
||||
pnl = (existing.entry_price - price) * existing.quantity
|
||||
self._close_position(fk, price, pnl)
|
||||
elif not existing:
|
||||
# Open long
|
||||
cost = price * quantity
|
||||
if cost > self.balance:
|
||||
quantity = self.balance / price # size down to available capital
|
||||
if quantity > 0:
|
||||
self._positions[fk] = Position(
|
||||
instrument=fk, side="long", quantity=quantity,
|
||||
entry_price=price, entry_time=time.time(),
|
||||
)
|
||||
log.debug("Paper BUY %.6f @ %.2f (%s)", quantity, price, fk)
|
||||
|
||||
def sell(self, quantity: float, price: float, feed_key: Optional[str] = None) -> None:
|
||||
"""Open a short or close a long at price."""
|
||||
fk = feed_key or self._default_feed_key or "default"
|
||||
existing = self._positions.get(fk)
|
||||
|
||||
if existing and existing.side == "long":
|
||||
# Close long
|
||||
pnl = (price - existing.entry_price) * existing.quantity
|
||||
self._close_position(fk, price, pnl)
|
||||
elif not existing:
|
||||
# Open short (using margin — simplified: require 2x capital)
|
||||
cost = price * quantity * 2
|
||||
if cost > self.balance:
|
||||
quantity = self.balance / (price * 2)
|
||||
if quantity > 0:
|
||||
self._positions[fk] = Position(
|
||||
instrument=fk, side="short", quantity=quantity,
|
||||
entry_price=price, entry_time=time.time(),
|
||||
)
|
||||
log.debug("Paper SELL %.6f @ %.2f (%s)", quantity, price, fk)
|
||||
|
||||
def flatten(self, price: float, feed_key: Optional[str] = None) -> None:
|
||||
"""Close any open position at price."""
|
||||
if feed_key:
|
||||
keys = [feed_key]
|
||||
else:
|
||||
keys = list(self._positions.keys())
|
||||
|
||||
for fk in keys:
|
||||
pos = self._positions.get(fk)
|
||||
if pos is None:
|
||||
continue
|
||||
if pos.side == "long":
|
||||
pnl = (price - pos.entry_price) * pos.quantity
|
||||
else:
|
||||
pnl = (pos.entry_price - price) * pos.quantity
|
||||
self._close_position(fk, price, pnl)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Reporting
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def unrealized_pnl(self, current_prices: dict[str, float]) -> float:
|
||||
"""Compute unrealized PnL using current prices."""
|
||||
total = 0.0
|
||||
for fk, pos in self._positions.items():
|
||||
price = current_prices.get(fk)
|
||||
if price is None:
|
||||
continue
|
||||
if pos.side == "long":
|
||||
total += (price - pos.entry_price) * pos.quantity
|
||||
else:
|
||||
total += (pos.entry_price - price) * pos.quantity
|
||||
return total
|
||||
|
||||
def realized_pnl(self) -> float:
|
||||
return sum(t.pnl for t in self._trades)
|
||||
|
||||
def total_pnl(self, current_prices: dict[str, float] | None = None) -> float:
|
||||
rpnl = self.realized_pnl()
|
||||
upnl = self.unrealized_pnl(current_prices) if current_prices else 0.0
|
||||
return rpnl + upnl
|
||||
|
||||
def trade_count(self) -> int:
|
||||
return len(self._trades)
|
||||
|
||||
def win_rate(self) -> float:
|
||||
if not self._trades:
|
||||
return 0.0
|
||||
wins = sum(1 for t in self._trades if t.pnl > 0)
|
||||
return wins / len(self._trades)
|
||||
|
||||
def positions(self) -> dict[str, dict]:
|
||||
return {
|
||||
fk: {
|
||||
"side": p.side,
|
||||
"quantity": p.quantity,
|
||||
"entry_price": p.entry_price,
|
||||
}
|
||||
for fk, p in self._positions.items()
|
||||
}
|
||||
|
||||
def recent_trades(self, n: int = 50) -> list[dict]:
|
||||
return [
|
||||
{
|
||||
"instrument": t.instrument,
|
||||
"side": t.side,
|
||||
"quantity": round(t.quantity, 8),
|
||||
"entry_price": round(t.entry_price, 8),
|
||||
"exit_price": round(t.exit_price, 8),
|
||||
"entry_time": t.entry_time,
|
||||
"exit_time": t.exit_time,
|
||||
"pnl": round(t.pnl, 6),
|
||||
}
|
||||
for t in self._trades[-n:]
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _close_position(self, fk: str, price: float, pnl: float) -> None:
|
||||
pos = self._positions.pop(fk, None)
|
||||
if pos is None:
|
||||
return
|
||||
self.balance += pnl
|
||||
self._trades.append(Trade(
|
||||
instrument=fk,
|
||||
side=pos.side,
|
||||
quantity=pos.quantity,
|
||||
entry_price=pos.entry_price,
|
||||
exit_price=price,
|
||||
entry_time=pos.entry_time,
|
||||
exit_time=time.time(),
|
||||
pnl=pnl,
|
||||
))
|
||||
log.debug("Paper trade closed: pnl=%.4f balance=%.2f (%s)", pnl, self.balance, fk)
|
||||
395
sandbox/dexorder/strategy/runner.py
Normal file
395
sandbox/dexorder/strategy/runner.py
Normal file
@@ -0,0 +1,395 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Strategy subprocess runner.
|
||||
|
||||
Loads a PandasStrategy from a git worktree path, subscribes to live bar data
|
||||
(polling DataAPI), runs the paper trading loop, and pushes events to the main
|
||||
MCP process via ZMQ PUSH.
|
||||
|
||||
Usage:
|
||||
python -m dexorder.strategy.runner --config <json_config_path>
|
||||
|
||||
Config JSON:
|
||||
{
|
||||
"strategy_name": "My RSI Strategy",
|
||||
"impl_path": "/app/data/worktrees/my_rsi_strategy_abc1234/strategy/my_rsi_strategy/implementation.py",
|
||||
"feed_configs": [["BTC/USDT.BINANCE", 3600]],
|
||||
"allocation": 5000.0,
|
||||
"ipc_endpoint": "ipc:///tmp/dexorder-strategy-events.sock",
|
||||
"data_dir": "/app/data",
|
||||
"poll_interval": 60
|
||||
}
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure the worktree's parent (which contains dexorder package) is on the path.
|
||||
# Also ensure the original dexorder package is importable.
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StrategyRunner:
|
||||
"""Runs a PandasStrategy in paper trading mode using DataAPI polling."""
|
||||
|
||||
def __init__(self, config: dict, stop_event: threading.Event | None = None):
|
||||
self.strategy_name = config["strategy_name"]
|
||||
self.impl_path = Path(config["impl_path"])
|
||||
self.feed_configs: list[tuple[str, int]] = [
|
||||
(f[0], int(f[1])) for f in config["feed_configs"]
|
||||
]
|
||||
self.allocation = float(config.get("allocation", 10_000.0))
|
||||
self.ipc_endpoint = config.get("ipc_endpoint", "ipc:///tmp/dexorder-strategy-events.sock")
|
||||
self.data_dir = Path(config.get("data_dir", "/app/data"))
|
||||
self.poll_interval = int(config.get("poll_interval", 60)) # seconds
|
||||
|
||||
self._stop_event = stop_event or threading.Event()
|
||||
self._running = False
|
||||
self._push_socket = None
|
||||
self._strategy = None
|
||||
self._paper: "PaperAccount | None" = None
|
||||
self._last_timestamps: dict[str, int] = {} # feed_key -> last seen timestamp_ns
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Main async entry point."""
|
||||
self._setup_zmq()
|
||||
await self._push_event("STARTED", {})
|
||||
|
||||
try:
|
||||
await self._setup_strategy()
|
||||
await self._trading_loop()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as e:
|
||||
log.exception("Strategy runner fatal error")
|
||||
await self._push_event("ERROR", {"message": str(e)})
|
||||
finally:
|
||||
await self._push_event("STOPPED", {
|
||||
"pnl": self._paper.realized_pnl() if self._paper else 0.0,
|
||||
"trade_count": self._paper.trade_count() if self._paper else 0,
|
||||
})
|
||||
self._cleanup_zmq()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Setup
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _setup_zmq(self) -> None:
|
||||
import zmq
|
||||
ctx = zmq.Context.instance()
|
||||
self._push_socket = ctx.socket(zmq.PUSH)
|
||||
self._push_socket.connect(self.ipc_endpoint)
|
||||
log.info("Connected PUSH socket to %s", self.ipc_endpoint)
|
||||
|
||||
def _cleanup_zmq(self) -> None:
|
||||
if self._push_socket:
|
||||
self._push_socket.close()
|
||||
|
||||
async def _setup_strategy(self) -> None:
|
||||
from dexorder.nautilus.backtest_runner import _load_strategy_class, _setup_custom_indicators
|
||||
from dexorder.nautilus.pandas_strategy import PandasStrategyConfig, make_feed_key
|
||||
from dexorder.strategy.paper_account import PaperAccount
|
||||
|
||||
# Register custom indicators
|
||||
try:
|
||||
_setup_custom_indicators(self.data_dir)
|
||||
except Exception as e:
|
||||
log.warning("Custom indicator setup failed: %s", e)
|
||||
|
||||
# Load strategy class from worktree impl path
|
||||
strategy_class = _load_strategy_class(self.impl_path)
|
||||
log.info("Loaded strategy class: %s", strategy_class.__name__)
|
||||
|
||||
feed_keys = tuple(make_feed_key(t, p) for t, p in self.feed_configs)
|
||||
config = PandasStrategyConfig(
|
||||
strategy_id=f"{strategy_class.__name__}-PAPER",
|
||||
feed_keys=feed_keys,
|
||||
initial_capital=self.allocation,
|
||||
)
|
||||
self._strategy = strategy_class(config=config)
|
||||
self._paper = PaperAccount(self.allocation, feed_keys[0] if feed_keys else None)
|
||||
|
||||
# Wire paper account into strategy's order methods
|
||||
self._wire_paper_account(feed_keys)
|
||||
log.info("Strategy '%s' initialized with %d feed(s)", self.strategy_name, len(feed_keys))
|
||||
|
||||
def _wire_paper_account(self, feed_keys: tuple) -> None:
|
||||
"""Replace strategy's order methods with paper account calls."""
|
||||
paper = self._paper
|
||||
from dexorder.nautilus.pandas_strategy import make_feed_key
|
||||
|
||||
def paper_buy(quantity, feed_key=None):
|
||||
fk = feed_key or (feed_keys[0] if feed_keys else "default")
|
||||
# Get current close price from last seen bars
|
||||
price = self._current_price(fk)
|
||||
if price:
|
||||
paper.buy(quantity, price, fk)
|
||||
asyncio.create_task(self._push_event("ORDER_FILLED", {
|
||||
"side": "buy", "quantity": quantity,
|
||||
"price": price, "feed_key": fk,
|
||||
"pnl": paper.realized_pnl(),
|
||||
}))
|
||||
|
||||
def paper_sell(quantity, feed_key=None):
|
||||
fk = feed_key or (feed_keys[0] if feed_keys else "default")
|
||||
price = self._current_price(fk)
|
||||
if price:
|
||||
paper.sell(quantity, price, fk)
|
||||
asyncio.create_task(self._push_event("ORDER_FILLED", {
|
||||
"side": "sell", "quantity": quantity,
|
||||
"price": price, "feed_key": fk,
|
||||
"pnl": paper.realized_pnl(),
|
||||
}))
|
||||
|
||||
def paper_flatten(feed_key=None):
|
||||
if feed_key:
|
||||
fk_list = [feed_key]
|
||||
else:
|
||||
fk_list = list(feed_keys)
|
||||
for fk in fk_list:
|
||||
price = self._current_price(fk)
|
||||
if price:
|
||||
paper.flatten(price, fk)
|
||||
|
||||
self._strategy.buy = paper_buy
|
||||
self._strategy.sell = paper_sell
|
||||
self._strategy.flatten = paper_flatten
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Trading loop
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def _trading_loop(self) -> None:
|
||||
"""Poll DataAPI for new bars and call strategy.evaluate() on each update."""
|
||||
import pandas as pd
|
||||
from dexorder.api import get_api
|
||||
from dexorder.nautilus.pandas_strategy import make_feed_key
|
||||
|
||||
api = get_api()
|
||||
accumulated: dict[str, list[dict]] = {
|
||||
make_feed_key(t, p): [] for t, p in self.feed_configs
|
||||
}
|
||||
self._current_prices: dict[str, float] = {}
|
||||
|
||||
heartbeat_task = asyncio.create_task(self._heartbeat_loop())
|
||||
self._running = True
|
||||
|
||||
try:
|
||||
while self._running and not self._stop_event.is_set():
|
||||
now = int(time.time())
|
||||
updated_any = False
|
||||
|
||||
for ticker, period_seconds in self.feed_configs:
|
||||
fk = make_feed_key(ticker, period_seconds)
|
||||
last_ts_ns = self._last_timestamps.get(fk, 0)
|
||||
# Request last N bars to catch up
|
||||
lookback = now - max(last_ts_ns // 1_000_000_000, now - 7 * 24 * 3600)
|
||||
from_time = lookback if last_ts_ns == 0 else (last_ts_ns // 1_000_000_000)
|
||||
|
||||
try:
|
||||
df = await api.data.historical_ohlc(
|
||||
ticker=ticker,
|
||||
period_seconds=period_seconds,
|
||||
start_time=from_time,
|
||||
end_time=now,
|
||||
extra_columns=["volume", "buy_vol", "sell_vol",
|
||||
"open_time", "high_time", "low_time", "close_time",
|
||||
"open_interest"],
|
||||
)
|
||||
except Exception as e:
|
||||
log.warning("OHLC fetch failed for %s: %s", fk, e)
|
||||
continue
|
||||
|
||||
if df.empty:
|
||||
continue
|
||||
|
||||
# Find new bars
|
||||
ts_col = "timestamp" if "timestamp" in df.columns else df.columns[0]
|
||||
new_bars = df[df[ts_col] > last_ts_ns] if last_ts_ns else df
|
||||
|
||||
for _, row in new_bars.iterrows():
|
||||
ts_ns = int(row.get(ts_col, 0))
|
||||
entry = {
|
||||
"timestamp": ts_ns,
|
||||
"open": float(row.get("open", 0)),
|
||||
"high": float(row.get("high", 0)),
|
||||
"low": float(row.get("low", 0)),
|
||||
"close": float(row.get("close", 0)),
|
||||
"volume": float(row.get("volume", 0)),
|
||||
"buy_vol": float(row.get("buy_vol", 0)) if "buy_vol" in row else None,
|
||||
"sell_vol": float(row.get("sell_vol", 0)) if "sell_vol" in row else None,
|
||||
"open_interest": float(row.get("open_interest", 0)) if "open_interest" in row else None,
|
||||
}
|
||||
accumulated[fk].append(entry)
|
||||
self._last_timestamps[fk] = max(self._last_timestamps.get(fk, 0), ts_ns)
|
||||
self._current_prices[fk] = entry["close"]
|
||||
updated_any = True
|
||||
|
||||
if updated_any:
|
||||
# Build DataFrames and call evaluate
|
||||
dfs = {fk: pd.DataFrame(rows) for fk, rows in accumulated.items() if rows}
|
||||
try:
|
||||
self._strategy.evaluate(dfs)
|
||||
except Exception as e:
|
||||
log.error("evaluate() error: %s", e)
|
||||
await self._push_event("ERROR", {"message": f"evaluate() error: {e}"})
|
||||
|
||||
# Push PnL update
|
||||
rpnl = self._paper.realized_pnl() if self._paper else 0.0
|
||||
upnl = self._paper.unrealized_pnl(self._current_prices) if self._paper else 0.0
|
||||
await self._push_event("PNL_UPDATE", {
|
||||
"realized_pnl": rpnl,
|
||||
"unrealized_pnl": upnl,
|
||||
"total_pnl": rpnl + upnl,
|
||||
"trade_count": self._paper.trade_count() if self._paper else 0,
|
||||
})
|
||||
|
||||
# Sleep in 1s increments so stop_event is checked promptly
|
||||
for _ in range(self.poll_interval):
|
||||
if self._stop_event.is_set():
|
||||
self._running = False
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
finally:
|
||||
heartbeat_task.cancel()
|
||||
try:
|
||||
await heartbeat_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
async def _heartbeat_loop(self) -> None:
|
||||
while True:
|
||||
await asyncio.sleep(10)
|
||||
await self._push_event("HEARTBEAT", {})
|
||||
|
||||
def _current_price(self, feed_key: str) -> float | None:
|
||||
return getattr(self, "_current_prices", {}).get(feed_key)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Event publishing
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def _push_event(self, event_type: str, payload: dict) -> None:
|
||||
from dexorder.strategy.events import StrategyEvent, StrategyEventType
|
||||
type_map = {
|
||||
"STARTED": StrategyEventType.STARTED,
|
||||
"STOPPED": StrategyEventType.STOPPED,
|
||||
"HEARTBEAT": StrategyEventType.HEARTBEAT,
|
||||
"ORDER_FILLED": StrategyEventType.ORDER_FILLED,
|
||||
"POSITION_UPDATE": StrategyEventType.POSITION_UPDATE,
|
||||
"PNL_UPDATE": StrategyEventType.PNL_UPDATE,
|
||||
"ERROR": StrategyEventType.ERROR,
|
||||
"LOG": StrategyEventType.LOG,
|
||||
}
|
||||
et = type_map.get(event_type, StrategyEventType.LOG)
|
||||
event = StrategyEvent(
|
||||
event_type=et,
|
||||
strategy_name=self.strategy_name,
|
||||
payload=payload,
|
||||
)
|
||||
try:
|
||||
if self._push_socket:
|
||||
self._push_socket.send(event.serialize(), flags=1) # NOBLOCK
|
||||
except Exception as e:
|
||||
log.debug("Failed to push event %s: %s", event_type, e)
|
||||
|
||||
|
||||
def _init_api() -> None:
|
||||
"""Initialize thread-local API from environment config. Non-fatal on error."""
|
||||
try:
|
||||
import yaml
|
||||
config_path = os.environ.get("CONFIG_PATH", "/app/config/config.yaml")
|
||||
secrets_path = os.environ.get("SECRETS_PATH", "/app/config/secrets.yaml")
|
||||
config_data, secrets_data = {}, {}
|
||||
if Path(config_path).exists():
|
||||
with open(config_path) as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
if Path(secrets_path).exists():
|
||||
with open(secrets_path) as f:
|
||||
secrets_data = yaml.safe_load(f) or {}
|
||||
|
||||
data_cfg = config_data.get("data", {})
|
||||
iceberg_cfg = data_cfg.get("iceberg", {})
|
||||
relay_cfg = data_cfg.get("relay", {})
|
||||
|
||||
from dexorder.api import set_api, API
|
||||
from dexorder.impl.charting_api_impl import ChartingAPIImpl
|
||||
from dexorder.impl.data_api_impl import DataAPIImpl
|
||||
|
||||
data_api = DataAPIImpl(
|
||||
iceberg_catalog_uri=iceberg_cfg.get("catalog_uri", "http://iceberg-catalog:8181"),
|
||||
relay_endpoint=relay_cfg.get("endpoint", "tcp://relay:5559"),
|
||||
notification_endpoint=relay_cfg.get("notification_endpoint", "tcp://relay:5558"),
|
||||
namespace=iceberg_cfg.get("namespace", "trading"),
|
||||
s3_endpoint=iceberg_cfg.get("s3_endpoint") or secrets_data.get("s3_endpoint"),
|
||||
s3_access_key=iceberg_cfg.get("s3_access_key") or secrets_data.get("s3_access_key"),
|
||||
s3_secret_key=iceberg_cfg.get("s3_secret_key") or secrets_data.get("s3_secret_key"),
|
||||
)
|
||||
set_api(API(charting=ChartingAPIImpl(), data=data_api))
|
||||
except Exception as e:
|
||||
log.warning("API initialization failed: %s", e)
|
||||
|
||||
|
||||
def run_thread(config: dict, stop_event: threading.Event) -> None:
|
||||
"""
|
||||
Entry point for running a strategy in a daemon thread.
|
||||
|
||||
Initializes a thread-local API, creates a StrategyRunner with the given
|
||||
stop_event, and runs the async trading loop until stop_event is set.
|
||||
"""
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s [%(name)s] %(message)s",
|
||||
)
|
||||
_init_api()
|
||||
|
||||
runner = StrategyRunner(config, stop_event=stop_event)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
loop.run_until_complete(runner.run())
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
def main():
|
||||
"""Subprocess entry point (backward compatibility)."""
|
||||
import signal
|
||||
|
||||
parser = argparse.ArgumentParser(description="Dexorder strategy subprocess runner")
|
||||
parser.add_argument("--config", required=True, help="Path to JSON config file")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s [%(name)s] %(message)s",
|
||||
)
|
||||
|
||||
with open(args.config) as f:
|
||||
config = json.load(f)
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
def _shutdown(signum, frame):
|
||||
log.info("Received signal %d, stopping runner", signum)
|
||||
stop_event.set()
|
||||
|
||||
signal.signal(signal.SIGTERM, _shutdown)
|
||||
signal.signal(signal.SIGINT, _shutdown)
|
||||
|
||||
run_thread(config, stop_event)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,15 +1,14 @@
|
||||
"""
|
||||
activate_strategy / deactivate_strategy — start and stop live or paper trading.
|
||||
activate_strategy / deactivate_strategy / list_active_strategies
|
||||
|
||||
paper=True (default): forward paper trading — strategy runs on live data with
|
||||
simulated fills. No API keys required.
|
||||
simulated fills via PaperAccount.
|
||||
|
||||
paper=False: live trading — real order execution via user's exchange API keys,
|
||||
retrieved from the user secrets vault. Currently raises
|
||||
NotImplementedError until the vault is implemented.
|
||||
paper=False: live trading — not yet implemented (requires secrets vault).
|
||||
|
||||
Full live-data feed streaming for forward testing is TBD (requires a live bar
|
||||
source). This module establishes the interface and stubs the runtime loop.
|
||||
Each activated strategy runs in its own subprocess from a git worktree,
|
||||
ensuring the production version is isolated from edits in the working tree.
|
||||
Events (fills, PnL updates, errors) flow via ZMQ PUSH/PULL to EventPublisher.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -18,10 +17,6 @@ from typing import Any
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Registry of active strategies: {strategy_name → runtime state dict}
|
||||
# In a future implementation this will hold live strategy runners.
|
||||
_active_strategies: dict[str, dict] = {}
|
||||
|
||||
|
||||
async def activate_strategy(
|
||||
strategy_name: str,
|
||||
@@ -34,16 +29,14 @@ async def activate_strategy(
|
||||
|
||||
Args:
|
||||
strategy_name: Display name as saved via python_write("strategy", ...)
|
||||
feeds: List of feed dicts, e.g. [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
|
||||
feeds: List of feed dicts: [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
|
||||
allocation: Capital allocated in quote currency (e.g. 5000.0 USDT)
|
||||
paper: True = paper/simulated fills (default); False = live execution
|
||||
paper: True = paper/simulated fills (default); False = live (not yet implemented)
|
||||
|
||||
Returns:
|
||||
list[TextContent] with JSON:
|
||||
{"status": "activated", "strategy_name": str, "paper": bool, "allocation": float}
|
||||
|
||||
On error:
|
||||
{"error": str}
|
||||
{"status": "activated", "strategy_name": str, "paper": bool, "allocation": float,
|
||||
"git_revision": str, "pid": int}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
@@ -51,87 +44,45 @@ async def activate_strategy(
|
||||
log.error("activate_strategy '%s': %s", strategy_name, msg)
|
||||
return [TextContent(type="text", text=json.dumps({"error": msg}))]
|
||||
|
||||
if strategy_name in _active_strategies:
|
||||
if not paper:
|
||||
return _err(
|
||||
f"Strategy '{strategy_name}' is already active. "
|
||||
"Call deactivate_strategy first."
|
||||
"Live trading (paper=False) requires the user secrets vault, "
|
||||
"which is not yet implemented. Use paper=True for paper forward testing."
|
||||
)
|
||||
|
||||
if not paper:
|
||||
# Live execution requires the user secrets vault for API keys.
|
||||
# The vault is not yet implemented.
|
||||
try:
|
||||
from dexorder.secrets_vault import SecretsVault
|
||||
_vault = SecretsVault()
|
||||
_vault.get_secret("__probe__") # will raise NotImplementedError
|
||||
except NotImplementedError:
|
||||
return _err(
|
||||
"Live trading (paper=False) requires the user secrets vault, "
|
||||
"which is not yet implemented. Use paper=True for paper forward testing."
|
||||
)
|
||||
|
||||
# Validate feeds
|
||||
if not feeds:
|
||||
return _err("feeds list is empty")
|
||||
|
||||
parsed_feeds: list[tuple[str, int]] = []
|
||||
for f in feeds:
|
||||
sym = f.get("symbol", "")
|
||||
ps = f.get("period_seconds", 3600)
|
||||
if not sym:
|
||||
if not f.get("symbol"):
|
||||
return _err(f"Feed entry missing 'symbol': {f}")
|
||||
parsed_feeds.append((sym, int(ps)))
|
||||
|
||||
# TODO: Full implementation — start a live/paper trading loop:
|
||||
# 1. Load strategy class from category files
|
||||
# 2. Set up custom indicators via _setup_custom_indicators()
|
||||
# 3. Subscribe to live bar stream for each feed
|
||||
# 4. Initialize paper account (Nautilus SimulatedExchange) or live account
|
||||
# 5. Run strategy event loop (on_bar → evaluate → submit orders)
|
||||
# This requires a live data feed adapter (TBD).
|
||||
try:
|
||||
from dexorder.strategy.lifecycle import get_strategy_lifecycle
|
||||
lifecycle = get_strategy_lifecycle()
|
||||
result = await lifecycle.activate(
|
||||
strategy_name=strategy_name,
|
||||
feeds=feeds,
|
||||
allocation=allocation,
|
||||
paper=paper,
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception("activate_strategy: lifecycle activation failed")
|
||||
return _err(f"Activation failed: {exc}")
|
||||
|
||||
log.info(
|
||||
"activate_strategy: registering '%s' (paper=%s, allocation=%.2f) — "
|
||||
"live feed loop is TBD",
|
||||
strategy_name, paper, allocation,
|
||||
)
|
||||
if "error" in result:
|
||||
return _err(result["error"])
|
||||
|
||||
_active_strategies[strategy_name] = {
|
||||
"strategy_name": strategy_name,
|
||||
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
|
||||
"allocation": allocation,
|
||||
"paper": paper,
|
||||
"status": "registered",
|
||||
"pnl": 0.0,
|
||||
}
|
||||
|
||||
payload = {
|
||||
"status": "activated",
|
||||
"strategy_name": strategy_name,
|
||||
"paper": paper,
|
||||
"allocation": allocation,
|
||||
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
|
||||
"note": (
|
||||
"Strategy registered. Live data feed streaming is not yet implemented — "
|
||||
"forward trading will begin when the live feed adapter is available."
|
||||
),
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
return [TextContent(type="text", text=json.dumps(result))]
|
||||
|
||||
|
||||
async def deactivate_strategy(strategy_name: str) -> list:
|
||||
"""
|
||||
Deactivate a running strategy and return its final P&L summary.
|
||||
|
||||
Args:
|
||||
strategy_name: Display name of the active strategy
|
||||
|
||||
Returns:
|
||||
list[TextContent] with JSON:
|
||||
{"status": "deactivated", "strategy_name": str, "final_pnl": float}
|
||||
|
||||
On error:
|
||||
{"error": str}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
@@ -139,35 +90,36 @@ async def deactivate_strategy(strategy_name: str) -> list:
|
||||
log.error("deactivate_strategy '%s': %s", strategy_name, msg)
|
||||
return [TextContent(type="text", text=json.dumps({"error": msg}))]
|
||||
|
||||
if strategy_name not in _active_strategies:
|
||||
return _err(f"Strategy '{strategy_name}' is not active")
|
||||
try:
|
||||
from dexorder.strategy.lifecycle import get_strategy_lifecycle
|
||||
lifecycle = get_strategy_lifecycle()
|
||||
result = await lifecycle.deactivate(strategy_name)
|
||||
except Exception as exc:
|
||||
log.exception("deactivate_strategy: failed")
|
||||
return _err(f"Deactivation failed: {exc}")
|
||||
|
||||
state = _active_strategies.pop(strategy_name)
|
||||
if "error" in result:
|
||||
return _err(result["error"])
|
||||
|
||||
# TODO: Stop the live feed loop and collect final P&L from the running engine.
|
||||
final_pnl = state.get("pnl", 0.0)
|
||||
|
||||
log.info("deactivate_strategy: stopped '%s', final_pnl=%.4f", strategy_name, final_pnl)
|
||||
|
||||
payload = {
|
||||
"status": "deactivated",
|
||||
"strategy_name": strategy_name,
|
||||
"final_pnl": final_pnl,
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
return [TextContent(type="text", text=json.dumps(result))]
|
||||
|
||||
|
||||
async def list_active_strategies() -> list:
|
||||
"""
|
||||
Return a list of currently active strategies and their status.
|
||||
Return a list of currently active strategies with PnL state.
|
||||
|
||||
Returns:
|
||||
list[TextContent] with JSON:
|
||||
{"active_strategies": [{strategy_name, paper, allocation, feeds, pnl}, ...]}
|
||||
{"active_strategies": [{strategy_name, paper, allocation, feeds, realized_pnl, ...}]}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
payload = {
|
||||
"active_strategies": list(_active_strategies.values()),
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
try:
|
||||
from dexorder.strategy.lifecycle import get_strategy_lifecycle
|
||||
lifecycle = get_strategy_lifecycle()
|
||||
active = await lifecycle.list_active()
|
||||
except Exception as exc:
|
||||
log.exception("list_active_strategies: failed")
|
||||
active = []
|
||||
|
||||
return [TextContent(type="text", text=json.dumps({"active_strategies": active}))]
|
||||
|
||||
@@ -15,7 +15,11 @@ from typing import Any
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# All OHLC+ columns to request from the DataAPI
|
||||
_OHLC_EXTRA_COLUMNS = ["volume", "buy_vol", "sell_vol", "open_interest"]
|
||||
_OHLC_EXTRA_COLUMNS = [
|
||||
"volume", "buy_vol", "sell_vol",
|
||||
"open_time", "high_time", "low_time", "close_time",
|
||||
"open_interest",
|
||||
]
|
||||
|
||||
|
||||
async def backtest_strategy(
|
||||
@@ -153,11 +157,11 @@ async def backtest_strategy(
|
||||
|
||||
# --- 7. Return results ---
|
||||
payload = {
|
||||
"strategy_name": strategy_name,
|
||||
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
|
||||
"strategy_name": strategy_name,
|
||||
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
|
||||
"initial_capital": initial_capital,
|
||||
"paper": paper,
|
||||
"total_candles": total_candles,
|
||||
**metrics,
|
||||
**metrics, # keys: summary, statistics, trades, equity_curve
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
"""
|
||||
Indicator harness — tests a custom indicator against synthetic OHLC data.
|
||||
|
||||
Runs in a subprocess so the indicator code is isolated from the MCP server process.
|
||||
Can be called in-process (preferred) via run() or as a subprocess for backward
|
||||
compatibility.
|
||||
|
||||
Usage: python indicator_harness.py <impl_path> <metadata_path>
|
||||
Usage (subprocess): python indicator_harness.py <impl_path> <metadata_path>
|
||||
|
||||
Outputs JSON to stdout:
|
||||
{
|
||||
@@ -21,7 +22,7 @@ import traceback
|
||||
import types
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure dexorder package is importable (same as research_harness.py)
|
||||
# Ensure dexorder package is importable when run as a subprocess
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
||||
|
||||
|
||||
@@ -84,13 +85,15 @@ def summarize(result, n: int) -> str:
|
||||
return f"Unexpected return type: {type(result).__name__}"
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print(json.dumps({"success": False, "error": "Usage: indicator_harness.py <impl_path> <metadata_path>"}))
|
||||
sys.exit(1)
|
||||
def run(impl_path: Path, metadata_path: Path) -> dict:
|
||||
"""
|
||||
Run an indicator against synthetic OHLC data and return results.
|
||||
|
||||
impl_path = sys.argv[1]
|
||||
metadata_path = sys.argv[2]
|
||||
Returns:
|
||||
dict with success, output, error fields
|
||||
"""
|
||||
impl_path = Path(impl_path)
|
||||
metadata_path = Path(metadata_path)
|
||||
|
||||
# --- Load metadata ---
|
||||
input_series = ["close"]
|
||||
@@ -107,34 +110,32 @@ def main():
|
||||
# bare value (legacy)
|
||||
parameters[pname] = pinfo
|
||||
except Exception as e:
|
||||
print(json.dumps({"success": False, "error": f"Failed to read metadata: {e}"}))
|
||||
sys.exit(0)
|
||||
return {"success": False, "error": f"Failed to read metadata: {e}"}
|
||||
|
||||
# --- Generate synthetic data ---
|
||||
try:
|
||||
import numpy # noqa: F401 — verify numpy available
|
||||
import pandas as pd
|
||||
except ImportError as e:
|
||||
print(json.dumps({"success": False, "error": f"Missing required package: {e}"}))
|
||||
sys.exit(0)
|
||||
return {"success": False, "error": f"Missing required package: {e}"}
|
||||
|
||||
df = make_synthetic_ohlcv(n=200)
|
||||
n = len(df)
|
||||
|
||||
# --- Load implementation ---
|
||||
# Clear from sys.modules first so edits are picked up
|
||||
module_name = f"_dexorder_indicator_{impl_path.parent.name}"
|
||||
sys.modules.pop(module_name, None)
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location("_indicator_impl", impl_path)
|
||||
spec = importlib.util.spec_from_file_location(module_name, impl_path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module) # type: ignore[union-attr]
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
print(json.dumps({"success": False, "error": f"Import failed:\n{tb}"}))
|
||||
sys.exit(0)
|
||||
return {"success": False, "error": f"Import failed:\n{tb}"}
|
||||
|
||||
# --- Find the indicator function ---
|
||||
# Prefer a function whose name matches the sanitized directory name,
|
||||
# fall back to the first public function in the module.
|
||||
fn_name = os.path.basename(os.path.dirname(impl_path)).lower()
|
||||
fn_name = impl_path.parent.name.lower()
|
||||
fn = getattr(module, fn_name, None)
|
||||
if fn is None:
|
||||
candidates = [
|
||||
@@ -144,15 +145,13 @@ def main():
|
||||
fn = candidates[0] if candidates else None
|
||||
|
||||
if fn is None:
|
||||
print(json.dumps({"success": False, "error": "No callable function found in implementation.py"}))
|
||||
sys.exit(0)
|
||||
return {"success": False, "error": "No callable function found in implementation.py"}
|
||||
|
||||
# --- Build positional args from input_series ---
|
||||
args = []
|
||||
for col in input_series:
|
||||
if col not in df.columns:
|
||||
print(json.dumps({"success": False, "error": f"input_series '{col}' not in synthetic df columns {list(df.columns)}"}))
|
||||
sys.exit(0)
|
||||
return {"success": False, "error": f"input_series '{col}' not in synthetic df columns {list(df.columns)}"}
|
||||
args.append(df[col])
|
||||
|
||||
# --- Execute ---
|
||||
@@ -160,22 +159,29 @@ def main():
|
||||
result = fn(*args, **parameters)
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
print(json.dumps({"success": False, "error": f"Execution failed:\n{tb}"}))
|
||||
sys.exit(0)
|
||||
return {"success": False, "error": f"Execution failed:\n{tb}"}
|
||||
|
||||
# --- Validate output type ---
|
||||
if not isinstance(result, (pd.Series, pd.DataFrame)):
|
||||
print(json.dumps({
|
||||
return {
|
||||
"success": False,
|
||||
"error": (
|
||||
f"Indicator must return pd.Series or pd.DataFrame, "
|
||||
f"got {type(result).__name__}. "
|
||||
"Wrap the output if using pandas-ta internally."
|
||||
),
|
||||
}))
|
||||
sys.exit(0)
|
||||
}
|
||||
|
||||
print(json.dumps({"success": True, "output": summarize(result, n)}))
|
||||
return {"success": True, "output": summarize(result, n)}
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print(json.dumps({"success": False, "error": "Usage: indicator_harness.py <impl_path> <metadata_path>"}))
|
||||
sys.exit(1)
|
||||
|
||||
result = run(Path(sys.argv[1]), Path(sys.argv[2]))
|
||||
print(json.dumps(result))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -18,11 +18,13 @@ After write/edit operations, a category-specific test harness runs to validate
|
||||
the code and capture errors/output for agent feedback.
|
||||
"""
|
||||
|
||||
import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import traceback
|
||||
from dataclasses import dataclass, asdict
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
@@ -30,16 +32,37 @@ from typing import Any, Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Path to the harness scripts (written to disk, not inline)
|
||||
_RESEARCH_HARNESS = Path(__file__).parent / "research_harness.py"
|
||||
_INDICATOR_HARNESS = Path(__file__).parent / "indicator_harness.py"
|
||||
|
||||
# Import conda manager for package installation
|
||||
def _run_inprocess(fn, *args, timeout: int) -> dict:
|
||||
"""
|
||||
Run fn(*args) in a one-shot thread and return its result dict.
|
||||
|
||||
Uses a thread so the calling coroutine is not blocked and the calling
|
||||
process does not fork a new Python interpreter. All already-loaded
|
||||
libraries (numpy, pandas, matplotlib, etc.) are shared with the thread.
|
||||
|
||||
On timeout returns a dict with _timeout=True. On unexpected exception
|
||||
returns a dict with error=True and the traceback in stderr.
|
||||
"""
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(fn, *args)
|
||||
try:
|
||||
return future.result(timeout=timeout)
|
||||
except concurrent.futures.TimeoutError:
|
||||
return {"_timeout": True, "error": True,
|
||||
"stdout": "", "stderr": "", "images": []}
|
||||
except Exception:
|
||||
return {"error": True, "stdout": "",
|
||||
"stderr": traceback.format_exc(), "images": []}
|
||||
|
||||
|
||||
# Import conda manager for package installation and tracking
|
||||
try:
|
||||
from dexorder.conda_manager import install_packages
|
||||
from dexorder.conda_manager import install_packages, cleanup_extra_packages
|
||||
except ImportError:
|
||||
log.warning("conda_manager not available - package installation disabled")
|
||||
install_packages = None
|
||||
cleanup_extra_packages = None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
@@ -289,6 +312,49 @@ class GitManager:
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise RuntimeError(e.stderr.strip()) from e
|
||||
|
||||
def head_short_hash(self) -> str:
|
||||
"""Return the short hash of HEAD, or 'unknown' on error."""
|
||||
try:
|
||||
result = self._run("rev-parse", "--short", "HEAD")
|
||||
return result.stdout.strip()
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
def create_worktree(self, worktree_path: Path, revision: str = "HEAD") -> str:
|
||||
"""
|
||||
Create a git worktree at worktree_path pinned to revision.
|
||||
|
||||
Returns the short hash of the checked-out commit.
|
||||
"""
|
||||
worktree_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
self._run("worktree", "add", "--detach", str(worktree_path), revision)
|
||||
# Get short hash of the worktree's HEAD
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "--short", "HEAD"],
|
||||
cwd=str(worktree_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise RuntimeError(f"git worktree add failed: {e.stderr.strip()}") from e
|
||||
|
||||
def remove_worktree(self, worktree_path: Path) -> None:
|
||||
"""Remove a git worktree, silently ignoring errors if it no longer exists."""
|
||||
try:
|
||||
self._run("worktree", "remove", "--force", str(worktree_path), check=False)
|
||||
except Exception as e:
|
||||
log.warning("git worktree remove failed (non-fatal): %s", e)
|
||||
|
||||
def prune_worktrees(self) -> None:
|
||||
"""Prune stale worktree references."""
|
||||
try:
|
||||
self._run("worktree", "prune", check=False)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Custom Indicator Setup
|
||||
@@ -733,7 +799,7 @@ class CategoryFileManager:
|
||||
conda_packages = metadata.get("conda_packages", [])
|
||||
if conda_packages:
|
||||
log.info(f"Installing packages for validation: {conda_packages}")
|
||||
install_result = install_packages(conda_packages)
|
||||
install_result = install_packages(conda_packages, data_dir=self.data_dir)
|
||||
if install_result.get("success"):
|
||||
packages_installed = install_result.get("installed", [])
|
||||
if packages_installed:
|
||||
@@ -761,48 +827,49 @@ class CategoryFileManager:
|
||||
|
||||
def _validate_strategy(self, impl_path: Path) -> dict[str, Any]:
|
||||
"""
|
||||
Validate a strategy implementation.
|
||||
Validate a strategy by running it against synthetic OHLC data.
|
||||
|
||||
Runs basic syntax check and imports.
|
||||
Runs strategy_harness.py in-process via a thread. Catches import errors,
|
||||
runtime errors in evaluate(), and wrong class hierarchy — not just syntax.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "py_compile", str(impl_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
meta_path = impl_path.parent / "metadata.json"
|
||||
return self._execute_strategy(impl_path.parent, timeout=45)
|
||||
|
||||
if result.returncode == 0:
|
||||
return {
|
||||
"success": True,
|
||||
"output": "Strategy syntax valid",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"output": result.stderr,
|
||||
"error": "Syntax error in strategy",
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"success": False, "error": "Validation timeout"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Validation failed: {e}"}
|
||||
def _execute_strategy(self, item_dir: Path, timeout: int = 45) -> dict[str, Any]:
|
||||
"""
|
||||
Run a strategy against synthetic OHLC data in-process via a thread.
|
||||
|
||||
Returns:
|
||||
dict with success, output (human-readable summary), trade_count, error
|
||||
"""
|
||||
impl_path = item_dir / "implementation.py"
|
||||
meta_path = item_dir / "metadata.json"
|
||||
|
||||
if not impl_path.exists():
|
||||
return {"success": False, "error": "implementation.py not found"}
|
||||
if not meta_path.exists():
|
||||
return {"success": False, "error": "metadata.json not found"}
|
||||
|
||||
from dexorder.tools.strategy_harness import run as _strategy_run
|
||||
result = _run_inprocess(_strategy_run, impl_path, meta_path, timeout=timeout)
|
||||
|
||||
if result.get("_timeout"):
|
||||
return {"success": False, "error": f"Strategy test timed out after {timeout}s"}
|
||||
return result
|
||||
|
||||
def _validate_indicator(self, impl_path: Path) -> dict[str, Any]:
|
||||
"""
|
||||
Validate an indicator by running it against synthetic OHLC data.
|
||||
|
||||
Uses indicator_harness.py in a subprocess so the indicator code is
|
||||
isolated from the MCP server process. Catches import errors, runtime
|
||||
errors, and wrong return types — not just syntax.
|
||||
Runs indicator_harness.py in-process via a thread. Catches import errors,
|
||||
runtime errors, and wrong return types — not just syntax.
|
||||
"""
|
||||
meta_path = impl_path.parent / "metadata.json"
|
||||
return self._execute_indicator(impl_path.parent, timeout=30)
|
||||
|
||||
def _execute_indicator(self, item_dir: Path, timeout: int = 30) -> dict[str, Any]:
|
||||
"""
|
||||
Run an indicator against synthetic OHLC data via indicator_harness.py.
|
||||
Run an indicator against synthetic OHLC data in-process via a thread.
|
||||
|
||||
Returns:
|
||||
dict with success, output (human-readable summary), error
|
||||
@@ -815,77 +882,22 @@ class CategoryFileManager:
|
||||
if not meta_path.exists():
|
||||
return {"success": False, "error": "metadata.json not found"}
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(_INDICATOR_HARNESS), str(impl_path), str(meta_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=str(item_dir),
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
from dexorder.tools.indicator_harness import run as _indicator_run
|
||||
result = _run_inprocess(_indicator_run, impl_path, meta_path, timeout=timeout)
|
||||
|
||||
if result.get("_timeout"):
|
||||
return {"success": False, "error": f"Indicator test timed out after {timeout}s"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Harness launch failed: {e}"}
|
||||
return result
|
||||
|
||||
if result.returncode != 0:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Harness process failed:\n{result.stderr}",
|
||||
}
|
||||
|
||||
try:
|
||||
data = json.loads(result.stdout)
|
||||
except json.JSONDecodeError:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Harness produced invalid JSON:\n{result.stdout[:500]}",
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
def _run_research_harness(self, impl_path: Path, item_dir: Path, timeout: int = 30) -> dict[str, Any]:
|
||||
def _run_research_harness(self, impl_path: Path, item_dir: Path, timeout: int = 300) -> dict[str, Any]:
|
||||
"""
|
||||
Run a research script via the on-disk harness and return parsed results.
|
||||
|
||||
The harness (research_harness.py) handles API initialization, stdout/stderr
|
||||
capture, matplotlib figure capture, and outputs JSON to stdout.
|
||||
Run a research script in-process via a thread and return captured results.
|
||||
|
||||
Returns:
|
||||
dict with stdout, stderr, images, error fields — or an error dict.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(_RESEARCH_HARNESS), str(impl_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=str(item_dir),
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
try:
|
||||
return json.loads(result.stdout)
|
||||
except json.JSONDecodeError:
|
||||
return {
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr,
|
||||
"images": [],
|
||||
"error": True,
|
||||
}
|
||||
else:
|
||||
# Harness itself failed (import error, bad args, etc.)
|
||||
return {
|
||||
"stdout": "",
|
||||
"stderr": result.stderr,
|
||||
"images": [],
|
||||
"error": True,
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"stdout": "", "stderr": "", "images": [], "error": True,
|
||||
"_timeout": True}
|
||||
except Exception as e:
|
||||
return {"stdout": "", "stderr": str(e), "images": [], "error": True}
|
||||
from dexorder.tools.research_harness import run as _research_run
|
||||
return _run_inprocess(_research_run, impl_path, item_dir, timeout=timeout)
|
||||
|
||||
def _validate_research(self, impl_path: Path, item_dir: Path) -> dict[str, Any]:
|
||||
"""
|
||||
@@ -893,7 +905,7 @@ class CategoryFileManager:
|
||||
|
||||
Runs the script via the harness and captures output + pyplot images.
|
||||
"""
|
||||
data = self._run_research_harness(impl_path, item_dir, timeout=30)
|
||||
data = self._run_research_harness(impl_path, item_dir, timeout=300)
|
||||
|
||||
if data.get("_timeout"):
|
||||
return {"success": False, "error": "Research script timeout"}
|
||||
@@ -983,6 +995,48 @@ class CategoryFileManager:
|
||||
return {"content": content}
|
||||
|
||||
|
||||
def delete(self, category: str, name: str) -> dict[str, Any]:
|
||||
"""
|
||||
Delete a category script directory and commit the removal to git.
|
||||
|
||||
Args:
|
||||
category: Category name (strategy, indicator, research)
|
||||
name: Display name of the item to delete
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- category: str
|
||||
- name: str
|
||||
- revision: str - git commit hash of the deletion commit
|
||||
- error: str (if any)
|
||||
"""
|
||||
import shutil
|
||||
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Invalid category '{category}'. Must be one of: {', '.join(c.value for c in Category)}"
|
||||
}
|
||||
|
||||
item_dir = get_category_path(self.src_dir, cat, name)
|
||||
if not item_dir.exists():
|
||||
return {"success": False, "error": f"{category} '{name}' not found"}
|
||||
|
||||
try:
|
||||
shutil.rmtree(item_dir)
|
||||
log.info(f"Deleted {cat.value}: {item_dir}")
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to delete: {e}"}
|
||||
|
||||
commit_hash = self.git.commit(f"delete({category}): {name}")
|
||||
result: dict[str, Any] = {"success": True, "category": category, "name": name}
|
||||
if commit_hash:
|
||||
result["revision"] = commit_hash
|
||||
return result
|
||||
|
||||
def git_log(
|
||||
self,
|
||||
category: Optional[str] = None,
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Research script harness - runs implementation.py in a subprocess with API
|
||||
initialization, stdout/stderr capture, and matplotlib figure capture.
|
||||
Research script harness - runs implementation.py with API initialization,
|
||||
stdout/stderr capture, and matplotlib figure capture.
|
||||
|
||||
This file is written to disk and invoked by python_tools.py rather than
|
||||
being passed inline via `python -c`, so the harness code is inspectable and
|
||||
not regenerated on every call.
|
||||
Can be called in-process (preferred) via run() or as a subprocess for backward
|
||||
compatibility.
|
||||
|
||||
Usage:
|
||||
Usage (subprocess):
|
||||
python -m dexorder.tools.research_harness <implementation_path>
|
||||
|
||||
Output (JSON to stdout):
|
||||
@@ -19,73 +18,148 @@ Output (JSON to stdout):
|
||||
}
|
||||
"""
|
||||
|
||||
import sys
|
||||
import io
|
||||
import os
|
||||
import base64
|
||||
import json
|
||||
import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
# Non-interactive matplotlib backend (must be set before importing pyplot)
|
||||
# Non-interactive matplotlib backend (must be set before importing pyplot).
|
||||
# Idempotent — safe to call multiple times.
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Ensure dexorder package is importable
|
||||
# Ensure dexorder package is importable when run as a subprocess
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Initialize API from config files so research scripts can call get_api()
|
||||
# ---------------------------------------------------------------------------
|
||||
try:
|
||||
import yaml
|
||||
|
||||
config_path = os.environ.get("CONFIG_PATH", "/app/config/config.yaml")
|
||||
secrets_path = os.environ.get("SECRETS_PATH", "/app/config/secrets.yaml")
|
||||
def run(impl_path: Path, item_dir: Path) -> dict:
|
||||
"""
|
||||
Run a research script in-process and return captured results.
|
||||
|
||||
config_data = {}
|
||||
secrets_data = {}
|
||||
if Path(config_path).exists():
|
||||
with open(config_path) as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
if Path(secrets_path).exists():
|
||||
with open(secrets_path) as f:
|
||||
secrets_data = yaml.safe_load(f) or {}
|
||||
Creates a fresh DataAPIImpl per call (thread-safe: API stored in thread-local
|
||||
via set_api() so the global API is not overwritten).
|
||||
|
||||
data_cfg = config_data.get("data", {})
|
||||
iceberg_cfg = data_cfg.get("iceberg", {})
|
||||
relay_cfg = data_cfg.get("relay", {})
|
||||
Returns:
|
||||
dict with stdout, stderr, images, error fields
|
||||
"""
|
||||
impl_path = Path(impl_path)
|
||||
|
||||
from dexorder.api import set_api, API
|
||||
from dexorder.impl.charting_api_impl import ChartingAPIImpl
|
||||
from dexorder.impl.data_api_impl import DataAPIImpl
|
||||
if not impl_path.exists():
|
||||
return {
|
||||
"stdout": "",
|
||||
"stderr": f"Implementation file not found: {impl_path}",
|
||||
"images": [],
|
||||
"error": True,
|
||||
}
|
||||
|
||||
_data_api = DataAPIImpl(
|
||||
iceberg_catalog_uri=iceberg_cfg.get("catalog_uri", "http://iceberg-catalog:8181"),
|
||||
relay_endpoint=relay_cfg.get("endpoint", "tcp://relay:5559"),
|
||||
notification_endpoint=relay_cfg.get("notification_endpoint", "tcp://relay:5558"),
|
||||
namespace=iceberg_cfg.get("namespace", "trading"),
|
||||
s3_endpoint=iceberg_cfg.get("s3_endpoint") or secrets_data.get("s3_endpoint"),
|
||||
s3_access_key=iceberg_cfg.get("s3_access_key") or secrets_data.get("s3_access_key"),
|
||||
s3_secret_key=iceberg_cfg.get("s3_secret_key") or secrets_data.get("s3_secret_key"),
|
||||
)
|
||||
# NOTE: We intentionally do NOT call asyncio.run(_data_api.start()) here.
|
||||
# DataAPIImpl.historical_ohlc() auto-starts on first use, which ensures the
|
||||
# ZMQ context and notification listener are created inside the user's own
|
||||
# asyncio.run() event loop — avoiding cross-loop lifecycle issues.
|
||||
set_api(API(charting=ChartingAPIImpl(), data=_data_api))
|
||||
except Exception as e:
|
||||
print(f"WARNING: API initialization failed: {e}", file=sys.stderr)
|
||||
# ---------------------------------------------------------------------------
|
||||
# Initialize a fresh API instance for this execution (thread-local)
|
||||
# ---------------------------------------------------------------------------
|
||||
try:
|
||||
import yaml
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Register custom indicators so research scripts can use df.ta.my_indicator()
|
||||
# ---------------------------------------------------------------------------
|
||||
try:
|
||||
from dexorder.tools.python_tools import setup_custom_indicators
|
||||
_data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
|
||||
setup_custom_indicators(_data_dir)
|
||||
except Exception as e:
|
||||
print(f"WARNING: Custom indicator registration failed: {e}", file=sys.stderr)
|
||||
config_path = os.environ.get("CONFIG_PATH", "/app/config/config.yaml")
|
||||
secrets_path = os.environ.get("SECRETS_PATH", "/app/config/secrets.yaml")
|
||||
|
||||
config_data = {}
|
||||
secrets_data = {}
|
||||
if Path(config_path).exists():
|
||||
with open(config_path) as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
if Path(secrets_path).exists():
|
||||
with open(secrets_path) as f:
|
||||
secrets_data = yaml.safe_load(f) or {}
|
||||
|
||||
data_cfg = config_data.get("data", {})
|
||||
iceberg_cfg = data_cfg.get("iceberg", {})
|
||||
relay_cfg = data_cfg.get("relay", {})
|
||||
|
||||
from dexorder.api import set_api, API
|
||||
from dexorder.impl.charting_api_impl import ChartingAPIImpl
|
||||
from dexorder.impl.data_api_impl import DataAPIImpl
|
||||
|
||||
_data_api = DataAPIImpl(
|
||||
iceberg_catalog_uri=iceberg_cfg.get("catalog_uri", "http://iceberg-catalog:8181"),
|
||||
relay_endpoint=relay_cfg.get("endpoint", "tcp://relay:5559"),
|
||||
notification_endpoint=relay_cfg.get("notification_endpoint", "tcp://relay:5558"),
|
||||
namespace=iceberg_cfg.get("namespace", "trading"),
|
||||
s3_endpoint=iceberg_cfg.get("s3_endpoint") or secrets_data.get("s3_endpoint"),
|
||||
s3_access_key=iceberg_cfg.get("s3_access_key") or secrets_data.get("s3_access_key"),
|
||||
s3_secret_key=iceberg_cfg.get("s3_secret_key") or secrets_data.get("s3_secret_key"),
|
||||
s3_region=iceberg_cfg.get("s3_region") or secrets_data.get("s3_region"),
|
||||
request_timeout=240.0,
|
||||
)
|
||||
# NOTE: We intentionally do NOT call asyncio.run(_data_api.start()) here.
|
||||
# DataAPIImpl.historical_ohlc() auto-starts on first use, which ensures the
|
||||
# ZMQ context and notification listener are created inside the user's own
|
||||
# asyncio.run() event loop — avoiding cross-loop lifecycle issues.
|
||||
# In a harness thread, set_api() stores to thread-local (not the global).
|
||||
set_api(API(charting=ChartingAPIImpl(), data=_data_api))
|
||||
except Exception as e:
|
||||
# Non-fatal — script may not use the API
|
||||
sys.stderr.write(f"WARNING: API initialization failed: {e}\n")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Register custom indicators
|
||||
# ---------------------------------------------------------------------------
|
||||
try:
|
||||
from dexorder.tools.python_tools import setup_custom_indicators
|
||||
_data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
|
||||
setup_custom_indicators(_data_dir)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"WARNING: Custom indicator registration failed: {e}\n")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Execute user script with captured stdout/stderr
|
||||
# ---------------------------------------------------------------------------
|
||||
stdout_buf = io.StringIO()
|
||||
stderr_buf = io.StringIO()
|
||||
|
||||
error_occurred = False
|
||||
old_stdout, old_stderr = sys.stdout, sys.stderr
|
||||
old_cwd = os.getcwd()
|
||||
sys.stdout = stdout_buf
|
||||
sys.stderr = stderr_buf
|
||||
|
||||
try:
|
||||
os.chdir(impl_path.parent)
|
||||
exec(compile(impl_path.read_text(), str(impl_path), 'exec'), {})
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}", file=sys.stderr)
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
error_occurred = True
|
||||
finally:
|
||||
sys.stdout = old_stdout
|
||||
sys.stderr = old_stderr
|
||||
os.chdir(old_cwd)
|
||||
|
||||
stdout_output = stdout_buf.getvalue()
|
||||
stderr_output = stderr_buf.getvalue()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Capture matplotlib figures
|
||||
# ---------------------------------------------------------------------------
|
||||
images = []
|
||||
if not error_occurred:
|
||||
for fig_num in plt.get_fignums():
|
||||
fig = plt.figure(fig_num)
|
||||
buf = io.BytesIO()
|
||||
fig.savefig(buf, format='png', dpi=100, bbox_inches='tight')
|
||||
buf.seek(0)
|
||||
images.append({"format": "png", "data": base64.b64encode(buf.read()).decode('utf-8')})
|
||||
buf.close()
|
||||
plt.close('all')
|
||||
|
||||
return {
|
||||
"stdout": stdout_output,
|
||||
"stderr": stderr_output,
|
||||
"images": images,
|
||||
"error": error_occurred,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
@@ -94,55 +168,8 @@ def main():
|
||||
sys.exit(2)
|
||||
|
||||
impl_path = Path(sys.argv[1])
|
||||
if not impl_path.exists():
|
||||
print(json.dumps({
|
||||
"stdout": "",
|
||||
"stderr": f"Implementation file not found: {impl_path}",
|
||||
"images": [],
|
||||
"error": True,
|
||||
}))
|
||||
sys.exit(0)
|
||||
|
||||
# Capture stdout and stderr
|
||||
old_stdout = sys.stdout
|
||||
old_stderr = sys.stderr
|
||||
sys.stdout = io.StringIO()
|
||||
sys.stderr = io.StringIO()
|
||||
|
||||
error_occurred = False
|
||||
try:
|
||||
exec(compile(impl_path.read_text(), str(impl_path), 'exec'), {})
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
error_occurred = True
|
||||
|
||||
# Restore stdout/stderr
|
||||
stdout_output = sys.stdout.getvalue()
|
||||
stderr_output = sys.stderr.getvalue()
|
||||
sys.stdout = old_stdout
|
||||
sys.stderr = old_stderr
|
||||
|
||||
# Capture all matplotlib figures as base64 PNGs
|
||||
images = []
|
||||
for fig_num in plt.get_fignums():
|
||||
fig = plt.figure(fig_num)
|
||||
buf = io.BytesIO()
|
||||
fig.savefig(buf, format='png', dpi=100, bbox_inches='tight')
|
||||
buf.seek(0)
|
||||
img_b64 = base64.b64encode(buf.read()).decode('utf-8')
|
||||
images.append({"format": "png", "data": img_b64})
|
||||
buf.close()
|
||||
plt.close('all')
|
||||
|
||||
# Output results as JSON to real stdout
|
||||
result = {
|
||||
"stdout": stdout_output,
|
||||
"stderr": stderr_output,
|
||||
"images": images,
|
||||
"error": error_occurred,
|
||||
}
|
||||
item_dir = impl_path.parent
|
||||
result = run(impl_path, item_dir)
|
||||
print(json.dumps(result))
|
||||
|
||||
|
||||
|
||||
228
sandbox/dexorder/tools/strategy_harness.py
Normal file
228
sandbox/dexorder/tools/strategy_harness.py
Normal file
@@ -0,0 +1,228 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Strategy harness — validates a PandasStrategy against synthetic OHLC data.
|
||||
|
||||
Can be called in-process (preferred) via run() or as a subprocess for backward
|
||||
compatibility.
|
||||
|
||||
Usage (subprocess): python strategy_harness.py <impl_path> <metadata_path>
|
||||
|
||||
Outputs JSON to stdout:
|
||||
{
|
||||
"success": bool,
|
||||
"output": str, # human-readable summary on success
|
||||
"trade_count": int, # number of trades executed in the mini-backtest
|
||||
"error": str | null # error message / traceback if failed
|
||||
}
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure dexorder package is importable when run as a subprocess
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Synthetic OHLCV data — 100 deterministic bars, no network required
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def make_synthetic_ohlcv(n: int = 100):
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
rng = np.random.default_rng(42)
|
||||
returns = rng.normal(0, 0.015, n)
|
||||
closes = 40_000.0 * np.cumprod(1.0 + returns)
|
||||
|
||||
opens = np.empty(n)
|
||||
opens[0] = closes[0]
|
||||
opens[1:] = closes[:-1]
|
||||
|
||||
noise = np.abs(rng.normal(0, 0.005, n))
|
||||
highs = np.maximum(opens, closes) * (1.0 + noise)
|
||||
lows = np.minimum(opens, closes) * (1.0 - noise)
|
||||
volumes = rng.uniform(1e6, 1e8, n)
|
||||
buy_vols = volumes * rng.uniform(0.4, 0.6, n)
|
||||
|
||||
now_ns = 1_700_000_000_000_000_000 # arbitrary epoch in nanoseconds
|
||||
step_ns = 3_600_000_000_000 # 1 hour in nanoseconds
|
||||
timestamps = [now_ns + i * step_ns for i in range(n)]
|
||||
|
||||
return pd.DataFrame({
|
||||
"timestamp": timestamps,
|
||||
"open": opens,
|
||||
"high": highs,
|
||||
"low": lows,
|
||||
"close": closes,
|
||||
"volume": volumes,
|
||||
"buy_vol": buy_vols,
|
||||
"sell_vol": volumes - buy_vols,
|
||||
"open_interest": rng.uniform(1e8, 1e9, n),
|
||||
})
|
||||
|
||||
|
||||
def run(impl_path: Path, metadata_path: Path) -> dict:
|
||||
"""
|
||||
Validate a strategy against synthetic OHLC data and return results.
|
||||
|
||||
Returns:
|
||||
dict with success, output, trade_count, error fields
|
||||
"""
|
||||
impl_path = Path(impl_path)
|
||||
metadata_path = Path(metadata_path)
|
||||
|
||||
# --- Load metadata (feeds, parameters) ---
|
||||
data_feeds: list[dict] = []
|
||||
parameters: dict = {}
|
||||
try:
|
||||
with open(metadata_path) as f:
|
||||
meta = json.load(f)
|
||||
data_feeds = meta.get("data_feeds") or []
|
||||
param_schema = meta.get("parameters") or {}
|
||||
for pname, pinfo in param_schema.items():
|
||||
if isinstance(pinfo, dict) and "default" in pinfo:
|
||||
parameters[pname] = pinfo["default"]
|
||||
elif not isinstance(pinfo, dict):
|
||||
parameters[pname] = pinfo
|
||||
except Exception as e:
|
||||
return {"success": False, "output": "", "trade_count": 0, "error": f"Failed to read metadata: {e}"}
|
||||
|
||||
# --- Build synthetic feed keys ---
|
||||
if data_feeds:
|
||||
feed_configs = [(f.get("symbol", "BTC/USDT.SYNTH"), int(f.get("period_seconds", 3600)))
|
||||
for f in data_feeds]
|
||||
else:
|
||||
feed_configs = [("BTC/USDT.SYNTH", 3600)]
|
||||
|
||||
# --- Register custom indicators ---
|
||||
try:
|
||||
from dexorder.tools.python_tools import setup_custom_indicators
|
||||
data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
|
||||
setup_custom_indicators(data_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# --- Load strategy class ---
|
||||
try:
|
||||
from dexorder.nautilus.backtest_runner import _load_strategy_class
|
||||
strategy_class = _load_strategy_class(impl_path)
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
return {"success": False, "output": "", "trade_count": 0, "error": f"Strategy load failed:\n{tb}"}
|
||||
|
||||
# --- Run a minimal backtest with synthetic data ---
|
||||
try:
|
||||
import pandas as pd
|
||||
from dexorder.nautilus.pandas_strategy import PandasStrategyConfig, make_feed_key
|
||||
from dexorder.nautilus.backtest_runner import _setup_custom_indicators
|
||||
|
||||
try:
|
||||
data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
|
||||
_setup_custom_indicators(data_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Build one synthetic DataFrame per feed
|
||||
feed_dfs: dict[str, pd.DataFrame] = {}
|
||||
for ticker, period_seconds in feed_configs:
|
||||
fk = make_feed_key(ticker, period_seconds)
|
||||
feed_dfs[fk] = make_synthetic_ohlcv(100)
|
||||
|
||||
feed_keys = tuple(make_feed_key(t, p) for t, p in feed_configs)
|
||||
config = PandasStrategyConfig(
|
||||
strategy_id=f"{strategy_class.__name__}-HARNESS",
|
||||
feed_keys=feed_keys,
|
||||
initial_capital=10_000.0,
|
||||
)
|
||||
|
||||
strat = strategy_class(config=config)
|
||||
|
||||
for pname, pval in parameters.items():
|
||||
if hasattr(strat, pname):
|
||||
setattr(strat, pname, pval)
|
||||
|
||||
# Replay bars: accumulate rows and call evaluate()
|
||||
buy_count = 0
|
||||
sell_count = 0
|
||||
evaluate_errors: list[str] = []
|
||||
rows_by_feed: dict[str, list] = {fk: [] for fk in feed_keys}
|
||||
|
||||
for i in range(len(next(iter(feed_dfs.values())))):
|
||||
for fk, df in feed_dfs.items():
|
||||
row = df.iloc[i].to_dict()
|
||||
rows_by_feed[fk].append(row)
|
||||
current_dfs = {k: pd.DataFrame(v) for k, v in rows_by_feed.items()}
|
||||
|
||||
_orig_buy = strat.buy
|
||||
_orig_sell = strat.sell
|
||||
_orig_flatten = strat.flatten
|
||||
|
||||
class _BuyCounter:
|
||||
def __call__(inner_self, *a, **kw):
|
||||
nonlocal buy_count
|
||||
buy_count += 1
|
||||
|
||||
class _SellCounter:
|
||||
def __call__(inner_self, *a, **kw):
|
||||
nonlocal sell_count
|
||||
sell_count += 1
|
||||
|
||||
strat.buy = _BuyCounter()
|
||||
strat.sell = _SellCounter()
|
||||
strat.flatten = lambda *a, **kw: None
|
||||
|
||||
try:
|
||||
strat.evaluate(current_dfs)
|
||||
except Exception as e:
|
||||
evaluate_errors.append(f"Bar {i}: {e}")
|
||||
if len(evaluate_errors) > 3:
|
||||
break
|
||||
finally:
|
||||
strat.buy = _orig_buy
|
||||
strat.sell = _orig_sell
|
||||
strat.flatten = _orig_flatten
|
||||
|
||||
if evaluate_errors and len(evaluate_errors) > 3:
|
||||
break
|
||||
|
||||
if evaluate_errors:
|
||||
return {
|
||||
"success": False,
|
||||
"output": "",
|
||||
"trade_count": 0,
|
||||
"error": "evaluate() raised errors:\n" + "\n".join(evaluate_errors[:3]),
|
||||
}
|
||||
|
||||
trade_count = buy_count + sell_count
|
||||
n_bars = len(next(iter(feed_dfs.values())))
|
||||
n_feeds = len(feed_dfs)
|
||||
output = (
|
||||
f"Strategy validated OK: {n_bars} bars × {n_feeds} feed(s), "
|
||||
f"buy_signals={buy_count}, sell_signals={sell_count}"
|
||||
)
|
||||
return {"success": True, "output": output, "trade_count": trade_count, "error": None}
|
||||
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
return {"success": False, "output": "", "trade_count": 0, "error": f"Harness execution failed:\n{tb}"}
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print(json.dumps({
|
||||
"success": False,
|
||||
"output": "",
|
||||
"trade_count": 0,
|
||||
"error": "Usage: strategy_harness.py <impl_path> <metadata_path>",
|
||||
}))
|
||||
sys.exit(1)
|
||||
|
||||
result = run(Path(sys.argv[1]), Path(sys.argv[2]))
|
||||
print(json.dumps(result))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -51,3 +51,4 @@ dependencies:
|
||||
- uvicorn>=0.27.0
|
||||
- sse-starlette>=1.6.0
|
||||
- nautilus_trader>=1.200.0
|
||||
- aiosqlite>=0.19.0
|
||||
|
||||
344
sandbox/main.py
344
sandbox/main.py
@@ -11,6 +11,7 @@ Brings together:
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
@@ -32,7 +33,7 @@ from starlette.routing import Route, Mount
|
||||
|
||||
from dexorder import EventPublisher, start_lifecycle_manager, get_lifecycle_manager
|
||||
from dexorder.api import set_api, API
|
||||
from dexorder.conda_manager import sync_packages, install_packages
|
||||
from dexorder.conda_manager import sync_packages, install_packages, cleanup_extra_packages
|
||||
from dexorder.events import EventType, UserEvent, DeliverySpec
|
||||
from dexorder.impl.charting_api_impl import ChartingAPIImpl
|
||||
from dexorder.impl.data_api_impl import DataAPIImpl
|
||||
@@ -41,6 +42,8 @@ from dexorder.tools.workspace_tools import get_workspace_store
|
||||
from dexorder.tools.evaluate_indicator import evaluate_indicator
|
||||
from dexorder.tools.backtest_strategy import backtest_strategy
|
||||
from dexorder.tools.activate_strategy import activate_strategy, deactivate_strategy, list_active_strategies
|
||||
from dexorder.strategy.event_bridge import StrategyEventBridge
|
||||
from dexorder.strategy.lifecycle import get_strategy_lifecycle
|
||||
|
||||
# =============================================================================
|
||||
# Global Data Directory
|
||||
@@ -59,19 +62,34 @@ def get_data_dir() -> Path:
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Indicator Types Helpers
|
||||
# Category Types Helpers
|
||||
# =============================================================================
|
||||
|
||||
def _build_indicator_type_entry(meta: dict) -> dict:
|
||||
"""Build an indicator_types workspace entry from indicator metadata dict."""
|
||||
def _type_store_name(category: str) -> str:
|
||||
return f"{category}_types"
|
||||
|
||||
|
||||
def _type_store_key(category: str, name: str) -> str:
|
||||
sanitized = sanitize_name(name).lower()
|
||||
return f"custom_{sanitized}" if category == "indicator" else sanitized
|
||||
|
||||
|
||||
def _build_type_entry(category: str, meta: dict) -> dict:
|
||||
"""Build a {category}_types workspace entry from a metadata dict."""
|
||||
name = meta.get('name', '')
|
||||
pandas_ta_name = f"custom_{sanitize_name(name).lower()}"
|
||||
key = _type_store_key(category, name)
|
||||
now = int(time.time())
|
||||
return {
|
||||
'pandas_ta_name': pandas_ta_name,
|
||||
entry = {
|
||||
'key': key,
|
||||
'display_name': name,
|
||||
'description': meta.get('description', ''),
|
||||
'metadata': {
|
||||
'metadata': {},
|
||||
'created_at': now,
|
||||
'modified_at': now,
|
||||
}
|
||||
if category == "indicator":
|
||||
entry['pandas_ta_name'] = key
|
||||
entry['metadata'] = {
|
||||
'display_name': name,
|
||||
'parameters': meta.get('parameters') or {},
|
||||
'input_series': meta.get('input_series') or ['close'],
|
||||
@@ -79,31 +97,89 @@ def _build_indicator_type_entry(meta: dict) -> dict:
|
||||
'pane': meta.get('pane', 'separate'),
|
||||
'filled_areas': meta.get('filled_areas') or [],
|
||||
'bands': meta.get('bands') or [],
|
||||
},
|
||||
'created_at': now,
|
||||
'modified_at': now,
|
||||
}
|
||||
}
|
||||
elif category == "strategy":
|
||||
entry['metadata'] = {
|
||||
'data_feeds': meta.get('data_feeds') or [],
|
||||
'parameters': meta.get('parameters') or {},
|
||||
}
|
||||
# research: metadata stays empty (no fields beyond base)
|
||||
return entry
|
||||
|
||||
|
||||
def _upsert_indicator_type(workspace_store, category_manager, name: str) -> None:
|
||||
"""Read indicator metadata from disk and upsert into indicator_types workspace store."""
|
||||
read_result = category_manager.read('indicator', name)
|
||||
def _upsert_type(workspace_store, category_manager, category: str, name: str) -> None:
|
||||
"""Read category metadata from disk and upsert into the {category}_types workspace store."""
|
||||
read_result = category_manager.read(category, name)
|
||||
if not read_result.get('exists') or not read_result.get('metadata'):
|
||||
return
|
||||
meta = read_result['metadata']
|
||||
entry = _build_indicator_type_entry(meta)
|
||||
pandas_ta_name = entry['pandas_ta_name']
|
||||
entry = _build_type_entry(category, read_result['metadata'])
|
||||
key = entry['key']
|
||||
store = _type_store_name(category)
|
||||
|
||||
# Preserve original created_at if already present
|
||||
existing = workspace_store.read('indicator_types')
|
||||
existing = workspace_store.read(store)
|
||||
existing_types = (existing.get('data') or {}).get('types') or {}
|
||||
if pandas_ta_name in existing_types:
|
||||
entry['created_at'] = existing_types[pandas_ta_name].get('created_at', entry['created_at'])
|
||||
if key in existing_types:
|
||||
entry['created_at'] = existing_types[key].get('created_at', entry['created_at'])
|
||||
|
||||
workspace_store.patch('indicator_types', [
|
||||
{'op': 'add', 'path': f'/types/{pandas_ta_name}', 'value': entry}
|
||||
])
|
||||
logging.info(f"Upserted indicator_types/{pandas_ta_name} for '{name}'")
|
||||
workspace_store.patch(store, [{'op': 'add', 'path': f'/types/{key}', 'value': entry}])
|
||||
logging.info(f"Upserted {store}/{key} for '{name}'")
|
||||
|
||||
|
||||
def _remove_type(workspace_store, category: str, name: str) -> None:
|
||||
"""Remove a category item from the {category}_types workspace store."""
|
||||
key = _type_store_key(category, name)
|
||||
store = _type_store_name(category)
|
||||
try:
|
||||
workspace_store.patch(store, [{'op': 'remove', 'path': f'/types/{key}'}])
|
||||
logging.info(f"Removed {store}/{key} for '{name}'")
|
||||
except Exception:
|
||||
pass # entry may not exist; that's fine
|
||||
if category == "indicator":
|
||||
_remove_indicator_instances(workspace_store, key)
|
||||
|
||||
|
||||
def _remove_indicator_instances(workspace_store, pandas_ta_name: str) -> None:
|
||||
"""Remove all instances of a custom indicator from the indicators workspace store."""
|
||||
existing = workspace_store.read('indicators')
|
||||
instances = (existing.get('data') or {}).get('indicators') or {}
|
||||
to_remove = [inst_id for inst_id, inst in instances.items()
|
||||
if inst.get('pandas_ta_name') == pandas_ta_name]
|
||||
if not to_remove:
|
||||
return
|
||||
patches = [{'op': 'remove', 'path': f'/indicators/{inst_id}'} for inst_id in to_remove]
|
||||
try:
|
||||
workspace_store.patch('indicators', patches)
|
||||
logging.info(f"Removed {len(to_remove)} instance(s) of {pandas_ta_name} from indicators store")
|
||||
except Exception:
|
||||
logging.warning(f"Failed to remove indicator instances for {pandas_ta_name}", exc_info=True)
|
||||
|
||||
|
||||
def _populate_types_from_disk(workspace_store, category_manager, category: str) -> None:
|
||||
"""Scan existing category items and add any missing entries to the {category}_types store."""
|
||||
store = _type_store_name(category)
|
||||
existing = workspace_store.read(store)
|
||||
existing_types = (existing.get('data') or {}).get('types') or {}
|
||||
|
||||
items = category_manager.list_items(category).get('items', [])
|
||||
added = 0
|
||||
for item in items:
|
||||
item_name = item.get('name', '')
|
||||
if not item_name:
|
||||
continue
|
||||
key = _type_store_key(category, item_name)
|
||||
if key not in existing_types:
|
||||
_upsert_type(workspace_store, category_manager, category, item_name)
|
||||
added += 1
|
||||
|
||||
if added > 0:
|
||||
logging.info(f"Populated {added} {category} type(s) from disk into {store}")
|
||||
|
||||
|
||||
def _get_env_yml() -> Optional[Path]:
|
||||
"""Return the path to environment.yml if it exists alongside main.py."""
|
||||
p = Path(__file__).parent / "environment.yml"
|
||||
return p if p.exists() else None
|
||||
|
||||
|
||||
def _populate_indicator_types_from_disk(workspace_store, category_manager) -> None:
|
||||
@@ -226,8 +302,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
category_manager = get_category_manager(config.data_dir)
|
||||
logging.info(f"Category manager initialized at {config.data_dir}")
|
||||
|
||||
# Populate indicator_types store from existing indicators on disk (migration/startup sync)
|
||||
_populate_indicator_types_from_disk(workspace_store, category_manager)
|
||||
# Populate {category}_types stores from existing items on disk (migration/startup sync)
|
||||
for _cat in ("indicator", "strategy", "research"):
|
||||
_populate_types_from_disk(workspace_store, category_manager, _cat)
|
||||
|
||||
@server.list_resources()
|
||||
async def list_resources():
|
||||
@@ -503,6 +580,25 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
"required": ["revision", "category", "name"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="python_delete",
|
||||
description="Delete a category script permanently. Commits removal to git history and removes any conda packages that are no longer needed.",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"category": {
|
||||
"type": "string",
|
||||
"enum": ["strategy", "indicator", "research"],
|
||||
"description": "Category of the script"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Display name of the item to delete"
|
||||
}
|
||||
},
|
||||
"required": ["category", "name"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="conda_sync",
|
||||
description="Sync conda packages: scan all metadata, remove unused packages (excluding base environment)",
|
||||
@@ -699,6 +795,77 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
"required": []
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_backtest_results",
|
||||
description=(
|
||||
"Retrieve stored backtest results for a strategy. "
|
||||
"Returns the most recent backtest runs with summary stats, "
|
||||
"extended statistics, trade list, and equity curve."
|
||||
),
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"strategy_name": {
|
||||
"type": "string",
|
||||
"description": "Display name of the strategy"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of backtest runs to return (default 5)",
|
||||
"default": 5
|
||||
}
|
||||
},
|
||||
"required": ["strategy_name"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_strategy_trades",
|
||||
description=(
|
||||
"Retrieve the trade log for a strategy (live/paper or backtest). "
|
||||
"Returns individual round-trip trades with entry/exit prices and PnL."
|
||||
),
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"strategy_name": {
|
||||
"type": "string",
|
||||
"description": "Display name of the strategy"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of trades to return (default 100)",
|
||||
"default": 100
|
||||
}
|
||||
},
|
||||
"required": ["strategy_name"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_strategy_events",
|
||||
description=(
|
||||
"Retrieve the event log for a strategy "
|
||||
"(PnL updates, fills, errors, status changes)."
|
||||
),
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"strategy_name": {
|
||||
"type": "string",
|
||||
"description": "Display name of the strategy"
|
||||
},
|
||||
"event_type": {
|
||||
"type": "string",
|
||||
"description": "Filter by event type (optional): PNL_UPDATE, ORDER_FILLED, ERROR, etc."
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of events to return (default 50)",
|
||||
"default": 50
|
||||
}
|
||||
},
|
||||
"required": ["strategy_name"]
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -734,7 +901,11 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
metadata=arguments.get("metadata")
|
||||
)
|
||||
content = []
|
||||
meta_parts = [f"success: {result['success']}", f"path: {result['path']}"]
|
||||
meta_parts = [f"success: {result['success']}"]
|
||||
if result.get('path'):
|
||||
meta_parts.append(f"path: {result['path']}")
|
||||
if result.get('error'):
|
||||
meta_parts.append(f"error: {result['error']}")
|
||||
if result.get("revision"):
|
||||
meta_parts.append(f"revision: {result['revision']}")
|
||||
if result.get("validation") and not result["validation"].get("success"):
|
||||
@@ -747,8 +918,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
logging.info(f"python_write '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
|
||||
else:
|
||||
logging.info(f"python_write '{arguments.get('name')}': no execution result (category={arguments.get('category')})")
|
||||
if result.get("success") and arguments.get("category") == "indicator":
|
||||
_upsert_indicator_type(workspace_store, category_manager, arguments.get("name", ""))
|
||||
if result.get("success"):
|
||||
_upsert_type(workspace_store, category_manager, arguments.get("category", ""), arguments.get("name", ""))
|
||||
cleanup_extra_packages(get_data_dir(), _get_env_yml())
|
||||
return content
|
||||
elif name == "python_edit":
|
||||
result = category_manager.edit(
|
||||
@@ -760,7 +932,11 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
metadata=arguments.get("metadata")
|
||||
)
|
||||
content = []
|
||||
meta_parts = [f"success: {result['success']}", f"path: {result['path']}"]
|
||||
meta_parts = [f"success: {result['success']}"]
|
||||
if result.get('path'):
|
||||
meta_parts.append(f"path: {result['path']}")
|
||||
if result.get('error'):
|
||||
meta_parts.append(f"error: {result['error']}")
|
||||
if result.get("revision"):
|
||||
meta_parts.append(f"revision: {result['revision']}")
|
||||
if result.get("validation") and not result["validation"].get("success"):
|
||||
@@ -773,8 +949,9 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
logging.info(f"python_edit '{arguments.get('name')}': returning {len(content)} items, {image_count} images")
|
||||
else:
|
||||
logging.info(f"python_edit '{arguments.get('name')}': no execution result")
|
||||
if result.get("success") and arguments.get("category") == "indicator":
|
||||
_upsert_indicator_type(workspace_store, category_manager, arguments.get("name", ""))
|
||||
if result.get("success"):
|
||||
_upsert_type(workspace_store, category_manager, arguments.get("category", ""), arguments.get("name", ""))
|
||||
cleanup_extra_packages(get_data_dir(), _get_env_yml())
|
||||
return content
|
||||
elif name == "python_read":
|
||||
return category_manager.read(
|
||||
@@ -808,13 +985,28 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
meta_parts.append(f"error: {result['error']}")
|
||||
if result.get("validation") and not result["validation"].get("success"):
|
||||
meta_parts.append(f"validation errors: {result['validation'].get('errors', [])}")
|
||||
if result.get("success"):
|
||||
_upsert_type(workspace_store, category_manager, arguments.get("category", ""), arguments.get("name", ""))
|
||||
return [TextContent(type="text", text="\n".join(meta_parts))]
|
||||
elif name == "python_delete":
|
||||
result = category_manager.delete(
|
||||
category=arguments.get("category", ""),
|
||||
name=arguments.get("name", "")
|
||||
)
|
||||
if result.get("success"):
|
||||
_remove_type(workspace_store, arguments.get("category", ""), arguments.get("name", ""))
|
||||
cleanup_result = cleanup_extra_packages(get_data_dir(), _get_env_yml())
|
||||
if cleanup_result.get("removed"):
|
||||
result["packages_removed"] = cleanup_result["removed"]
|
||||
parts = [f"success: {result['success']}"]
|
||||
for k in ("category", "name", "revision", "packages_removed", "error"):
|
||||
if result.get(k):
|
||||
parts.append(f"{k}: {result[k]}")
|
||||
return [TextContent(type="text", text="\n".join(parts))]
|
||||
elif name == "conda_sync":
|
||||
# Get environment.yml path relative to main.py
|
||||
env_yml = Path(__file__).parent / "environment.yml"
|
||||
return sync_packages(
|
||||
data_dir=get_data_dir(),
|
||||
environment_yml=env_yml if env_yml.exists() else None
|
||||
environment_yml=_get_env_yml()
|
||||
)
|
||||
elif name == "conda_install":
|
||||
return install_packages(arguments.get("packages", []))
|
||||
@@ -837,7 +1029,7 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
parameters=arguments.get("parameters") or {},
|
||||
)
|
||||
elif name == "backtest_strategy":
|
||||
return await backtest_strategy(
|
||||
result = await backtest_strategy(
|
||||
strategy_name=arguments.get("strategy_name", ""),
|
||||
feeds=arguments.get("feeds", []),
|
||||
from_time=arguments.get("from_time"),
|
||||
@@ -845,6 +1037,26 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
initial_capital=float(arguments.get("initial_capital", 10_000.0)),
|
||||
paper=bool(arguments.get("paper", True)),
|
||||
)
|
||||
# Persist backtest to DB (non-fatal)
|
||||
try:
|
||||
payload = json.loads(result[0].text) if result and isinstance(result[0], TextContent) else {}
|
||||
if payload and "summary" in payload:
|
||||
from dexorder.strategy.db import get_strategy_db
|
||||
db = get_strategy_db(get_data_dir())
|
||||
await db.insert_backtest(
|
||||
strategy_name=arguments.get("strategy_name", ""),
|
||||
from_time=arguments.get("from_time"),
|
||||
to_time=arguments.get("to_time"),
|
||||
initial_capital=float(arguments.get("initial_capital", 10_000.0)),
|
||||
feeds=arguments.get("feeds", []),
|
||||
summary=payload.get("summary", {}),
|
||||
statistics=payload.get("statistics", {}),
|
||||
trades=payload.get("trades", []),
|
||||
equity_curve=payload.get("equity_curve", []),
|
||||
)
|
||||
except Exception as _e:
|
||||
logging.debug("Failed to persist backtest results: %s", _e)
|
||||
return result
|
||||
elif name == "activate_strategy":
|
||||
return await activate_strategy(
|
||||
strategy_name=arguments.get("strategy_name", ""),
|
||||
@@ -858,6 +1070,31 @@ def create_mcp_server(config: Config, event_publisher: EventPublisher) -> Server
|
||||
)
|
||||
elif name == "list_active_strategies":
|
||||
return await list_active_strategies()
|
||||
elif name == "get_backtest_results":
|
||||
from dexorder.strategy.db import get_strategy_db
|
||||
db = get_strategy_db(get_data_dir())
|
||||
results = await db.get_backtests(
|
||||
strategy_name=arguments.get("strategy_name", ""),
|
||||
limit=int(arguments.get("limit", 5)),
|
||||
)
|
||||
return [TextContent(type="text", text=json.dumps({"backtest_runs": results}))]
|
||||
elif name == "get_strategy_trades":
|
||||
from dexorder.strategy.db import get_strategy_db
|
||||
db = get_strategy_db(get_data_dir())
|
||||
trades = await db.get_trades(
|
||||
strategy_name=arguments.get("strategy_name", ""),
|
||||
limit=int(arguments.get("limit", 100)),
|
||||
)
|
||||
return [TextContent(type="text", text=json.dumps({"trades": trades}))]
|
||||
elif name == "get_strategy_events":
|
||||
from dexorder.strategy.db import get_strategy_db
|
||||
db = get_strategy_db(get_data_dir())
|
||||
events = await db.get_events(
|
||||
strategy_name=arguments.get("strategy_name", ""),
|
||||
event_type=arguments.get("event_type"),
|
||||
limit=int(arguments.get("limit", 50)),
|
||||
)
|
||||
return [TextContent(type="text", text=json.dumps({"events": events}))]
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
@@ -909,6 +1146,7 @@ class UserContainer:
|
||||
self.event_publisher: Optional[EventPublisher] = None
|
||||
self.mcp_server: Optional[Server] = None
|
||||
self.data_api: Optional[DataAPIImpl] = None
|
||||
self.event_bridge: Optional[StrategyEventBridge] = None
|
||||
self.running = False
|
||||
|
||||
async def start(self) -> None:
|
||||
@@ -933,6 +1171,7 @@ class UserContainer:
|
||||
s3_endpoint=s3_cfg.get("s3_endpoint") or secrets.get("s3_endpoint"),
|
||||
s3_access_key=s3_cfg.get("s3_access_key") or secrets.get("s3_access_key"),
|
||||
s3_secret_key=s3_cfg.get("s3_secret_key") or secrets.get("s3_secret_key"),
|
||||
s3_region=s3_cfg.get("s3_region") or secrets.get("s3_region"),
|
||||
)
|
||||
await self.data_api.start()
|
||||
set_api(API(charting=ChartingAPIImpl(), data=self.data_api))
|
||||
@@ -965,6 +1204,23 @@ class UserContainer:
|
||||
delivery=DeliverySpec.active_or_telegram(),
|
||||
))
|
||||
|
||||
# Initialize strategy lifecycle manager (sets up DB + worktrees dir)
|
||||
strategy_lifecycle = get_strategy_lifecycle(self.config.data_dir)
|
||||
await strategy_lifecycle.initialize()
|
||||
|
||||
# Start strategy event bridge (PULL socket for subprocess events)
|
||||
self.event_bridge = StrategyEventBridge(
|
||||
event_publisher=self.event_publisher,
|
||||
strategy_lifecycle=strategy_lifecycle,
|
||||
)
|
||||
await self.event_bridge.start()
|
||||
strategy_lifecycle._bridge = self.event_bridge
|
||||
strategy_lifecycle._lifecycle = get_lifecycle_manager()
|
||||
logging.info("Strategy event bridge started")
|
||||
|
||||
# Resume any strategies that were running before container restart
|
||||
await strategy_lifecycle.resume_running()
|
||||
|
||||
# Create MCP server
|
||||
self.mcp_server = create_mcp_server(self.config, self.event_publisher)
|
||||
|
||||
@@ -998,6 +1254,20 @@ class UserContainer:
|
||||
delivery=DeliverySpec.active_or_telegram(),
|
||||
))
|
||||
|
||||
# Stop running strategies gracefully
|
||||
try:
|
||||
from dexorder.strategy.lifecycle import get_strategy_lifecycle
|
||||
strategy_lifecycle = get_strategy_lifecycle()
|
||||
await strategy_lifecycle.shutdown()
|
||||
logging.info("Strategy lifecycle manager stopped")
|
||||
except Exception as e:
|
||||
logging.warning("Error stopping strategy lifecycle: %s", e)
|
||||
|
||||
# Stop event bridge
|
||||
if self.event_bridge:
|
||||
await self.event_bridge.stop()
|
||||
logging.info("Strategy event bridge stopped")
|
||||
|
||||
# Stop subsystems
|
||||
if self.data_api:
|
||||
await self.data_api.stop()
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name="dexorder-sandbox",
|
||||
version="0.1.0",
|
||||
description="Dexorder Trading Platform Sandbox",
|
||||
packages=find_packages(),
|
||||
python_requires=">=3.9",
|
||||
install_requires=[
|
||||
"pyiceberg>=0.6.0",
|
||||
"pyarrow>=14.0.0",
|
||||
"pandas>=2.0.0",
|
||||
"pyzmq>=25.0.0",
|
||||
"protobuf>=4.25.0",
|
||||
"pyyaml>=6.0",
|
||||
"aiofiles>=23.0.0",
|
||||
"mcp>=1.0.0",
|
||||
"jsonpatch>=1.33",
|
||||
"starlette>=0.27.0",
|
||||
"uvicorn>=0.27.0",
|
||||
"sse-starlette>=1.6.0",
|
||||
"matplotlib>=3.7.0",
|
||||
],
|
||||
extras_require={
|
||||
"dev": [
|
||||
"pytest>=7.0.0",
|
||||
"pytest-asyncio>=0.21.0",
|
||||
]
|
||||
},
|
||||
)
|
||||
Reference in New Issue
Block a user