data fixes, partial custom indicator support
This commit is contained in:
34
sandbox/dexorder/nautilus/__init__.py
Normal file
34
sandbox/dexorder/nautilus/__init__.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""
|
||||
dexorder.nautilus — Nautilus Trader integration for strategy backtesting.
|
||||
|
||||
Quants import PandasStrategy to write strategies:
|
||||
|
||||
from dexorder.nautilus import PandasStrategy
|
||||
import pandas as pd
|
||||
import pandas_ta as ta
|
||||
|
||||
class MyStrategy(PandasStrategy):
|
||||
def evaluate(self, dfs):
|
||||
df = dfs.get("BTC/USDT.BINANCE:3600")
|
||||
if df is None or len(df) < 14:
|
||||
return
|
||||
rsi = ta.rsi(df["close"], length=14)
|
||||
if rsi.iloc[-1] < 30:
|
||||
self.buy(0.01)
|
||||
elif rsi.iloc[-1] > 70:
|
||||
self.sell(0.01)
|
||||
|
||||
SecretsVault provides the interface for user-owned exchange API keys
|
||||
(stub until the user-local vault is implemented):
|
||||
|
||||
from dexorder.nautilus import SecretsVault
|
||||
"""
|
||||
|
||||
from dexorder.nautilus.pandas_strategy import PandasStrategy, PandasStrategyConfig
|
||||
from dexorder.secrets_vault import SecretsVault
|
||||
|
||||
__all__ = [
|
||||
"PandasStrategy",
|
||||
"PandasStrategyConfig",
|
||||
"SecretsVault",
|
||||
]
|
||||
358
sandbox/dexorder/nautilus/backtest_runner.py
Normal file
358
sandbox/dexorder/nautilus/backtest_runner.py
Normal file
@@ -0,0 +1,358 @@
|
||||
"""
|
||||
Backtest runner — sets up Nautilus BacktestEngine and runs a PandasStrategy.
|
||||
|
||||
Entry points
|
||||
------------
|
||||
run_backtest() — called from execute_strategy MCP tool (via thread executor)
|
||||
_load_strategy_class() — exec() the user's implementation.py, find PandasStrategy subclass
|
||||
_setup_custom_indicators() — register user indicators with pandas-ta via ta.import_dir()
|
||||
_compute_metrics() — extract P&L metrics from completed BacktestEngine
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import pandas as pd
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Custom indicator setup
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _setup_custom_indicators(data_dir: Path) -> None:
|
||||
"""Register user's custom indicators with pandas-ta (delegates to python_tools)."""
|
||||
from dexorder.tools.python_tools import setup_custom_indicators
|
||||
setup_custom_indicators(data_dir)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Strategy class loading
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _load_strategy_class(impl_path: Path) -> type:
|
||||
"""
|
||||
Execute implementation.py and return the unique PandasStrategy subclass.
|
||||
|
||||
The exec namespace is seeded with:
|
||||
PandasStrategy — base class (so the subclass check works)
|
||||
pd — pandas
|
||||
ta — pandas_ta (if available)
|
||||
|
||||
Raises:
|
||||
ValueError: if zero or multiple PandasStrategy subclasses are defined
|
||||
SyntaxError / Exception: if the file fails to parse or execute
|
||||
"""
|
||||
from dexorder.nautilus.pandas_strategy import PandasStrategy
|
||||
|
||||
namespace: dict[str, Any] = {
|
||||
"__builtins__": __builtins__,
|
||||
"PandasStrategy": PandasStrategy,
|
||||
"pd": pd,
|
||||
}
|
||||
|
||||
try:
|
||||
import pandas_ta as ta
|
||||
namespace["ta"] = ta
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
code = impl_path.read_text()
|
||||
exec(compile(code, str(impl_path), "exec"), namespace) # noqa: S102
|
||||
|
||||
subclasses = [
|
||||
obj for obj in namespace.values()
|
||||
if (
|
||||
inspect.isclass(obj)
|
||||
and issubclass(obj, PandasStrategy)
|
||||
and obj is not PandasStrategy
|
||||
)
|
||||
]
|
||||
|
||||
if len(subclasses) == 0:
|
||||
raise ValueError(
|
||||
f"No PandasStrategy subclass found in {impl_path}. "
|
||||
"The strategy file must define exactly one class that inherits from PandasStrategy."
|
||||
)
|
||||
if len(subclasses) > 1:
|
||||
names = [c.__name__ for c in subclasses]
|
||||
raise ValueError(
|
||||
f"Multiple PandasStrategy subclasses found in {impl_path}: {names}. "
|
||||
"Define exactly one concrete strategy class per file."
|
||||
)
|
||||
|
||||
return subclasses[0]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Metrics extraction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _compute_metrics(
|
||||
engine,
|
||||
venue_strs: list[str],
|
||||
initial_capital: float,
|
||||
all_bars: list,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Extract performance metrics from a completed BacktestEngine.
|
||||
|
||||
Returns dict with:
|
||||
total_return float — fractional (0.15 = +15%)
|
||||
sharpe_ratio float — annualized; 0.0 if no trades or constant equity
|
||||
max_drawdown float — max peak-to-trough as fraction (0.10 = 10% drawdown)
|
||||
win_rate float — fraction of trades with positive realized PnL
|
||||
trade_count int
|
||||
equity_curve list[{timestamp: int_unix_s, equity: float}]
|
||||
"""
|
||||
# Reconstruct equity curve from fills
|
||||
equity_points: list[dict] = []
|
||||
if all_bars:
|
||||
equity_points.append({
|
||||
"timestamp": all_bars[0].ts_event // 1_000_000_000,
|
||||
"equity": initial_capital,
|
||||
})
|
||||
|
||||
running_equity = initial_capital
|
||||
trade_count = 0
|
||||
winning_trades = 0
|
||||
|
||||
try:
|
||||
fills_df = engine.trader.generate_order_fills_report()
|
||||
except Exception as exc:
|
||||
log.debug("generate_order_fills_report() failed: %s", exc)
|
||||
fills_df = None
|
||||
|
||||
if fills_df is not None and len(fills_df) > 0:
|
||||
# Sort by event time
|
||||
if "ts_event" in fills_df.columns:
|
||||
fills_df = fills_df.sort_values("ts_event")
|
||||
|
||||
for _, fill in fills_df.iterrows():
|
||||
rpnl = fill.get("realized_pnl") if hasattr(fill, "get") else None
|
||||
if rpnl is None:
|
||||
continue
|
||||
|
||||
# Nautilus Money objects: str form is "15.32 USDT"
|
||||
rpnl_float: float | None = None
|
||||
try:
|
||||
if hasattr(rpnl, "as_decimal"):
|
||||
rpnl_float = float(rpnl.as_decimal())
|
||||
elif rpnl is not None:
|
||||
rpnl_str = str(rpnl).strip()
|
||||
if rpnl_str and rpnl_str.lower() not in ("none", "nan"):
|
||||
rpnl_float = float(rpnl_str.split()[0])
|
||||
except (ValueError, TypeError, IndexError):
|
||||
pass
|
||||
|
||||
if rpnl_float is not None and rpnl_float != 0.0:
|
||||
ts_s: int | None = None
|
||||
raw_ts = fill.get("ts_event") if hasattr(fill, "get") else None
|
||||
if raw_ts is not None:
|
||||
try:
|
||||
ts_s = int(raw_ts) // 1_000_000_000
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
running_equity += rpnl_float
|
||||
trade_count += 1
|
||||
if rpnl_float > 0:
|
||||
winning_trades += 1
|
||||
|
||||
if ts_s is not None:
|
||||
equity_points.append({"timestamp": ts_s, "equity": running_equity})
|
||||
|
||||
if all_bars:
|
||||
equity_points.append({
|
||||
"timestamp": all_bars[-1].ts_event // 1_000_000_000,
|
||||
"equity": running_equity,
|
||||
})
|
||||
|
||||
# Try to get actual final balance from the account (more accurate than fill reconstruction)
|
||||
try:
|
||||
from nautilus_trader.model.identifiers import Venue
|
||||
for venue_str in venue_strs:
|
||||
account = engine.cache.account_for_venue(Venue(venue_str))
|
||||
if account is None:
|
||||
continue
|
||||
# Sum all balances (quote currency is what we started with)
|
||||
for bal in account.balances().values():
|
||||
total = getattr(bal, "total", None)
|
||||
if total is not None:
|
||||
final_val = float(str(total).split()[0]) if not hasattr(total, "as_decimal") else float(total.as_decimal())
|
||||
# Use the account balance as the definitive final equity
|
||||
running_equity = final_val
|
||||
if equity_points:
|
||||
equity_points[-1]["equity"] = running_equity
|
||||
break
|
||||
except Exception as exc:
|
||||
log.debug("Account balance extraction failed: %s", exc)
|
||||
|
||||
# Core metrics
|
||||
total_return = (running_equity - initial_capital) / initial_capital if initial_capital else 0.0
|
||||
win_rate = winning_trades / trade_count if trade_count > 0 else 0.0
|
||||
|
||||
# Sharpe ratio (annualized) from equity curve returns
|
||||
sharpe = 0.0
|
||||
if len(equity_points) > 2 and all_bars and len(all_bars) > 1:
|
||||
equity_series = pd.Series([p["equity"] for p in equity_points])
|
||||
returns = equity_series.pct_change().dropna()
|
||||
if len(returns) > 1 and returns.std() > 0:
|
||||
bar_duration_ns = (all_bars[-1].ts_event - all_bars[0].ts_event) / max(len(all_bars) - 1, 1)
|
||||
if bar_duration_ns > 0:
|
||||
bars_per_year = (365 * 24 * 3600 * 1e9) / bar_duration_ns
|
||||
sharpe = float((returns.mean() / returns.std()) * (bars_per_year ** 0.5))
|
||||
|
||||
# Max drawdown
|
||||
max_drawdown = 0.0
|
||||
if len(equity_points) > 1:
|
||||
equity_arr = pd.Series([p["equity"] for p in equity_points])
|
||||
rolling_max = equity_arr.cummax()
|
||||
drawdowns = (equity_arr - rolling_max) / rolling_max.replace(0, float("nan"))
|
||||
max_drawdown = float(abs(drawdowns.min())) if len(drawdowns) > 0 else 0.0
|
||||
|
||||
return {
|
||||
"total_return": round(total_return, 6),
|
||||
"sharpe_ratio": round(sharpe, 4),
|
||||
"max_drawdown": round(max_drawdown, 6),
|
||||
"win_rate": round(win_rate, 4),
|
||||
"trade_count": trade_count,
|
||||
"equity_curve": equity_points,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def run_backtest(
|
||||
strategy_class: type,
|
||||
feeds: list[tuple[str, int]],
|
||||
ohlc_dfs: dict[str, pd.DataFrame],
|
||||
initial_capital: float = 10_000.0,
|
||||
paper: bool = True,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Configure and run a BacktestEngine synchronously.
|
||||
|
||||
Designed to be called from asyncio via loop.run_in_executor() since
|
||||
BacktestEngine does not support async.
|
||||
|
||||
Args:
|
||||
strategy_class: Concrete PandasStrategy subclass to instantiate
|
||||
feeds: List of (ticker, period_seconds) pairs, e.g. [("BTC/USDT.BINANCE", 3600)]
|
||||
ohlc_dfs: Dict of feed_key → full OHLC+ DataFrame (with buy_vol, sell_vol, etc.)
|
||||
initial_capital: Starting account balance in quote currency
|
||||
paper: Always True for historical backtest (flag reserved for forward testing)
|
||||
|
||||
Returns:
|
||||
Dict of performance metrics (see _compute_metrics)
|
||||
"""
|
||||
from nautilus_trader.backtest.engine import BacktestEngine, BacktestEngineConfig
|
||||
from nautilus_trader.backtest.models import FillModel
|
||||
from nautilus_trader.config import LoggingConfig
|
||||
from nautilus_trader.model.enums import OmsType, AccountType
|
||||
from nautilus_trader.model.identifiers import Venue
|
||||
from nautilus_trader.model.objects import Money
|
||||
|
||||
from dexorder.nautilus.pandas_strategy import PandasStrategyConfig, make_feed_key
|
||||
from dexorder.nautilus.data_adapter import (
|
||||
make_instrument_from_metadata,
|
||||
make_bar_type,
|
||||
df_to_bars,
|
||||
extras_lookup,
|
||||
)
|
||||
|
||||
# --- Engine config ---
|
||||
engine_config = BacktestEngineConfig(
|
||||
trader_id="DEXORDER-BACKTEST-001",
|
||||
logging=LoggingConfig(log_level="ERROR"),
|
||||
)
|
||||
engine = BacktestEngine(config=engine_config)
|
||||
|
||||
# --- Per-venue setup (unique venues from feeds) ---
|
||||
venues_seen: set[str] = set()
|
||||
all_bars: list = []
|
||||
feed_keys: list[str] = []
|
||||
|
||||
instruments: dict[str, Any] = {}
|
||||
price_precisions: dict[str, int] = {}
|
||||
size_precisions: dict[str, int] = {}
|
||||
|
||||
for ticker, period_seconds in feeds:
|
||||
feed_key = make_feed_key(ticker, period_seconds)
|
||||
feed_keys.append(feed_key)
|
||||
|
||||
from dexorder.symbol_metadata_client import parse_ticker
|
||||
exchange_id, _ = parse_ticker(ticker)
|
||||
|
||||
if exchange_id not in venues_seen:
|
||||
venues_seen.add(exchange_id)
|
||||
# Determine quote currency from ticker (e.g. USDT from BTC/USDT)
|
||||
_, market_id = parse_ticker(ticker)
|
||||
quote_str = market_id.split("/")[1] if "/" in market_id else "USDT"
|
||||
from nautilus_trader.model.currencies import Currency
|
||||
quote_currency = Currency.from_str(quote_str)
|
||||
engine.add_venue(
|
||||
venue=Venue(exchange_id),
|
||||
oms_type=OmsType.NETTING,
|
||||
account_type=AccountType.CASH,
|
||||
base_currency=None,
|
||||
starting_balances=[Money(initial_capital, quote_currency)],
|
||||
fill_model=FillModel(),
|
||||
)
|
||||
|
||||
# Instrument and bars
|
||||
instrument, pp, sp = make_instrument_from_metadata(ticker)
|
||||
instruments[feed_key] = instrument
|
||||
price_precisions[feed_key] = pp
|
||||
size_precisions[feed_key] = sp
|
||||
|
||||
engine.add_instrument(instrument)
|
||||
|
||||
df = ohlc_dfs.get(feed_key)
|
||||
if df is not None and not df.empty:
|
||||
bar_type = make_bar_type(ticker, period_seconds)
|
||||
bars = df_to_bars(df, bar_type, pp, sp)
|
||||
engine.add_data(bars)
|
||||
all_bars.extend(bars)
|
||||
else:
|
||||
log.warning("No OHLC data for feed %s — strategy will receive no bars", feed_key)
|
||||
|
||||
if not all_bars:
|
||||
return {
|
||||
"total_return": 0.0, "sharpe_ratio": 0.0, "max_drawdown": 0.0,
|
||||
"win_rate": 0.0, "trade_count": 0, "equity_curve": [],
|
||||
}
|
||||
|
||||
# Sort combined bars by timestamp for metrics computation
|
||||
all_bars.sort(key=lambda b: b.ts_event)
|
||||
|
||||
# --- Instantiate and configure strategy ---
|
||||
strategy_config = PandasStrategyConfig(
|
||||
strategy_id=f"{strategy_class.__name__}-001",
|
||||
feed_keys=tuple(feed_keys),
|
||||
initial_capital=initial_capital,
|
||||
)
|
||||
strategy = strategy_class(config=strategy_config)
|
||||
|
||||
# Inject OHLC+ extras before run
|
||||
for feed_key, df in ohlc_dfs.items():
|
||||
if df is not None and not df.empty:
|
||||
strategy._inject_extras(feed_key, extras_lookup(df))
|
||||
|
||||
engine.add_strategy(strategy)
|
||||
|
||||
# --- Run ---
|
||||
engine.run()
|
||||
|
||||
# --- Extract metrics ---
|
||||
metrics = _compute_metrics(engine, list(venues_seen), initial_capital, all_bars)
|
||||
engine.dispose()
|
||||
|
||||
return metrics
|
||||
235
sandbox/dexorder/nautilus/data_adapter.py
Normal file
235
sandbox/dexorder/nautilus/data_adapter.py
Normal file
@@ -0,0 +1,235 @@
|
||||
"""
|
||||
Data adapter — converts our OHLC DataFrames to Nautilus objects.
|
||||
|
||||
Functions
|
||||
---------
|
||||
make_instrument — CurrencyPair from ticker string
|
||||
make_bar_type — BarType from ticker + period_seconds
|
||||
df_to_bars — OHLC DataFrame → list[Bar]
|
||||
extras_lookup — extract OHLC+ extras dict from DataFrame
|
||||
make_instrument_from_metadata — instrument with best-effort precision
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from nautilus_trader.model.currencies import Currency
|
||||
from nautilus_trader.model.data import Bar, BarType, BarSpecification
|
||||
from nautilus_trader.model.enums import BarAggregation, PriceType, AggregationSource
|
||||
from nautilus_trader.model.identifiers import InstrumentId, Symbol, Venue
|
||||
from nautilus_trader.model.instruments import CurrencyPair
|
||||
from nautilus_trader.model.objects import Price, Quantity
|
||||
|
||||
from dexorder.symbol_metadata_client import parse_ticker
|
||||
from dexorder.nautilus.pandas_strategy import (
|
||||
bar_type_from_feed_key,
|
||||
_PERIOD_TO_AGGREGATION,
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Columns in our OHLC+ DataFrames that are extras (not part of Nautilus Bar)
|
||||
_EXTRA_COLS = ("buy_vol", "sell_vol", "open_interest")
|
||||
|
||||
|
||||
def make_bar_type(ticker: str, period_seconds: int) -> BarType:
|
||||
"""
|
||||
Construct a Nautilus BarType from our ticker and period_seconds.
|
||||
|
||||
Period mapping:
|
||||
period_seconds < 60 → SECOND, step = period_seconds
|
||||
period_seconds < 3600 → MINUTE, step = period_seconds // 60
|
||||
period_seconds < 86400 → HOUR, step = period_seconds // 3600
|
||||
else → DAY, step = period_seconds // 86400
|
||||
|
||||
Price type = MID (standard for crypto OHLC).
|
||||
Source = EXTERNAL (we supply pre-aggregated data, not Nautilus aggregation).
|
||||
"""
|
||||
exchange_id, market_id = parse_ticker(ticker)
|
||||
instrument_id = InstrumentId(Symbol(market_id), Venue(exchange_id))
|
||||
|
||||
for threshold, agg, divisor in _PERIOD_TO_AGGREGATION:
|
||||
if period_seconds < threshold:
|
||||
step = max(1, period_seconds // divisor)
|
||||
break
|
||||
else:
|
||||
agg = BarAggregation.DAY
|
||||
step = max(1, period_seconds // 86400)
|
||||
|
||||
spec = BarSpecification(step=step, aggregation=agg, price_type=PriceType.MID)
|
||||
return BarType(instrument_id=instrument_id, bar_spec=spec,
|
||||
aggregation_source=AggregationSource.EXTERNAL)
|
||||
|
||||
|
||||
def make_instrument(
|
||||
ticker: str,
|
||||
price_precision: int = 8,
|
||||
size_precision: int = 8,
|
||||
tick_size: Optional[float] = None,
|
||||
lot_size: Optional[float] = None,
|
||||
maker_fee: float = 0.001,
|
||||
taker_fee: float = 0.001,
|
||||
margin_init: float = 0.0,
|
||||
margin_maint: float = 0.0,
|
||||
) -> CurrencyPair:
|
||||
"""
|
||||
Create a minimal CurrencyPair instrument from a Nautilus-format ticker.
|
||||
|
||||
Args:
|
||||
ticker: e.g. "BTC/USDT.BINANCE"
|
||||
price_precision: decimal places for price (default 8)
|
||||
size_precision: decimal places for quantity (default 8)
|
||||
tick_size: minimum price increment (defaults to 10^-price_precision)
|
||||
lot_size: minimum order size (defaults to 10^-size_precision)
|
||||
maker_fee, taker_fee: fee rates as fractions (0.001 = 0.1%)
|
||||
margin_init, margin_maint: margin ratios (0.0 = spot/no margin)
|
||||
"""
|
||||
exchange_id, market_id = parse_ticker(ticker)
|
||||
base_str, quote_str = market_id.split("/")
|
||||
|
||||
instrument_id = InstrumentId(Symbol(market_id), Venue(exchange_id))
|
||||
|
||||
if tick_size is None:
|
||||
tick_size = 10.0 ** (-price_precision)
|
||||
if lot_size is None:
|
||||
lot_size = 10.0 ** (-size_precision)
|
||||
|
||||
ts_now = 0 # static instrument — timestamp not relevant for backtesting
|
||||
|
||||
return CurrencyPair(
|
||||
instrument_id=instrument_id,
|
||||
raw_symbol=Symbol(market_id),
|
||||
base_currency=Currency.from_str(base_str),
|
||||
quote_currency=Currency.from_str(quote_str),
|
||||
price_precision=price_precision,
|
||||
size_precision=size_precision,
|
||||
price_increment=Price(tick_size, price_precision),
|
||||
size_increment=Quantity(lot_size, size_precision),
|
||||
lot_size=Quantity(lot_size, size_precision),
|
||||
max_quantity=None,
|
||||
min_quantity=Quantity(lot_size, size_precision),
|
||||
max_notional=None,
|
||||
min_notional=None,
|
||||
max_price=None,
|
||||
min_price=None,
|
||||
margin_init=margin_init,
|
||||
margin_maint=margin_maint,
|
||||
maker_fee=maker_fee,
|
||||
taker_fee=taker_fee,
|
||||
ts_event=ts_now,
|
||||
ts_init=ts_now,
|
||||
)
|
||||
|
||||
|
||||
def make_instrument_from_metadata(ticker: str) -> tuple[CurrencyPair, int, int]:
|
||||
"""
|
||||
Create a CurrencyPair using SymbolMetadata when available.
|
||||
|
||||
Returns:
|
||||
(instrument, price_precision, size_precision)
|
||||
|
||||
Falls back to (instrument with 8/8 defaults) if metadata is unavailable.
|
||||
"""
|
||||
try:
|
||||
from dexorder.symbol_metadata_client import SymbolMetadataClient
|
||||
from dexorder.api import get_api
|
||||
# DataAPIImpl stores the catalog URI as an attribute on the OHLCClient
|
||||
api = get_api()
|
||||
ohlc_client = getattr(api.data, '_ohlc_client', None) or getattr(api.data, 'ohlc_client', None)
|
||||
iceberg_client = getattr(ohlc_client, 'iceberg', None) if ohlc_client else None
|
||||
catalog_uri = getattr(iceberg_client, 'catalog_uri', None) if iceberg_client else None
|
||||
|
||||
if catalog_uri:
|
||||
meta_client = SymbolMetadataClient(catalog_uri=catalog_uri)
|
||||
meta = meta_client.get_metadata(ticker)
|
||||
pp = meta.price_precision or 8
|
||||
sp = meta.size_precision or 8
|
||||
instrument = make_instrument(
|
||||
ticker,
|
||||
price_precision=pp,
|
||||
size_precision=sp,
|
||||
tick_size=meta.tick_size,
|
||||
lot_size=meta.lot_size,
|
||||
maker_fee=meta.maker_fee or 0.001,
|
||||
taker_fee=meta.taker_fee or 0.001,
|
||||
margin_init=meta.margin_init or 0.0,
|
||||
margin_maint=meta.margin_maint or 0.0,
|
||||
)
|
||||
return instrument, pp, sp
|
||||
except Exception:
|
||||
log.debug("make_instrument_from_metadata: metadata unavailable for %s, using defaults", ticker)
|
||||
|
||||
instrument = make_instrument(ticker)
|
||||
return instrument, 8, 8
|
||||
|
||||
|
||||
def df_to_bars(
|
||||
df: pd.DataFrame,
|
||||
bar_type: BarType,
|
||||
price_precision: int = 8,
|
||||
size_precision: int = 8,
|
||||
) -> list[Bar]:
|
||||
"""
|
||||
Convert an OHLC DataFrame to a list of Nautilus Bar objects.
|
||||
|
||||
Args:
|
||||
df: DataFrame with columns [timestamp (ns), open, high, low, close].
|
||||
volume column is optional; defaults to 0.0 if absent.
|
||||
bar_type: BarType to tag each bar with.
|
||||
price_precision: decimal precision for Price construction.
|
||||
size_precision: decimal precision for Quantity construction.
|
||||
|
||||
Returns:
|
||||
list[Bar] sorted ascending by timestamp.
|
||||
"""
|
||||
has_volume = "volume" in df.columns
|
||||
bars = []
|
||||
|
||||
for row in df.itertuples(index=False):
|
||||
ts_ns = int(row.timestamp)
|
||||
volume = float(row.volume) if has_volume else 0.0
|
||||
|
||||
bar = Bar(
|
||||
bar_type=bar_type,
|
||||
open=Price(float(row.open), price_precision),
|
||||
high=Price(float(row.high), price_precision),
|
||||
low=Price(float(row.low), price_precision),
|
||||
close=Price(float(row.close), price_precision),
|
||||
volume=Quantity(volume, size_precision),
|
||||
ts_event=ts_ns,
|
||||
ts_init=ts_ns,
|
||||
)
|
||||
bars.append(bar)
|
||||
|
||||
return bars
|
||||
|
||||
|
||||
def extras_lookup(df: pd.DataFrame) -> dict[int, dict]:
|
||||
"""
|
||||
Build a {ts_event_ns → {buy_vol, sell_vol, open_interest}} mapping.
|
||||
|
||||
Values are None for columns absent from the DataFrame.
|
||||
|
||||
Used by PandasStrategy._inject_extras() to enrich each bar with
|
||||
OHLC+ fields that Nautilus Bar does not carry natively.
|
||||
"""
|
||||
result: dict[int, dict] = {}
|
||||
|
||||
present = {col: col in df.columns for col in _EXTRA_COLS}
|
||||
|
||||
for row in df.itertuples(index=False):
|
||||
ts_ns = int(row.timestamp)
|
||||
entry: dict = {}
|
||||
for col in _EXTRA_COLS:
|
||||
if present[col]:
|
||||
val = getattr(row, col)
|
||||
entry[col] = None if (val is None or (isinstance(val, float) and pd.isna(val))) else float(val)
|
||||
else:
|
||||
entry[col] = None
|
||||
result[ts_ns] = entry
|
||||
|
||||
return result
|
||||
315
sandbox/dexorder/nautilus/pandas_strategy.py
Normal file
315
sandbox/dexorder/nautilus/pandas_strategy.py
Normal file
@@ -0,0 +1,315 @@
|
||||
"""
|
||||
PandasStrategy — Nautilus Strategy base class with a DataFrame-oriented API.
|
||||
|
||||
Quants subclass PandasStrategy and implement evaluate(dfs) — the same function
|
||||
they'd write in a research notebook. No Nautilus objects appear in quant code.
|
||||
|
||||
Features:
|
||||
- Multiple data feeds: subscribe to N (ticker, period_seconds) pairs
|
||||
- evaluate(dfs) receives a dict[feed_key, DataFrame] where feed_key is "TICKER:period_seconds"
|
||||
- Every feed's DataFrame includes OHLC + volume, buy_vol, sell_vol, open_interest
|
||||
- Timer hook (on_timer) reserved as extension point — TBD
|
||||
|
||||
Feed key format: "BTC/USDT.BINANCE:3600"
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import abstractmethod
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from nautilus_trader.config import StrategyConfig
|
||||
from nautilus_trader.model.data import Bar, BarType, BarSpecification
|
||||
from nautilus_trader.model.enums import BarAggregation, PriceType, AggregationSource, OrderSide, TimeInForce
|
||||
from nautilus_trader.model.identifiers import InstrumentId, Symbol, Venue
|
||||
from nautilus_trader.model.objects import Quantity
|
||||
from nautilus_trader.trading.strategy import Strategy
|
||||
|
||||
from dexorder.symbol_metadata_client import parse_ticker
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Feed key helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_PERIOD_TO_AGGREGATION: list[tuple[int, BarAggregation, int]] = [
|
||||
# (threshold_exclusive, aggregation, divisor)
|
||||
# period_seconds < 60 → SECOND, step = period_seconds
|
||||
(60, BarAggregation.SECOND, 1),
|
||||
# 60 <= period_seconds < 3600 → MINUTE
|
||||
(3600, BarAggregation.MINUTE, 60),
|
||||
# 3600 <= period_seconds < 86400 → HOUR
|
||||
(86400, BarAggregation.HOUR, 3600),
|
||||
]
|
||||
|
||||
_AGG_TO_SECONDS: dict[BarAggregation, int] = {
|
||||
BarAggregation.SECOND: 1,
|
||||
BarAggregation.MINUTE: 60,
|
||||
BarAggregation.HOUR: 3600,
|
||||
BarAggregation.DAY: 86400,
|
||||
}
|
||||
|
||||
|
||||
def make_feed_key(ticker: str, period_seconds: int) -> str:
|
||||
"""Return canonical feed key, e.g. 'BTC/USDT.BINANCE:3600'."""
|
||||
return f"{ticker}:{period_seconds}"
|
||||
|
||||
|
||||
def parse_feed_key(feed_key: str) -> tuple[str, int]:
|
||||
"""Split 'BTC/USDT.BINANCE:3600' → ('BTC/USDT.BINANCE', 3600)."""
|
||||
ticker, period_str = feed_key.rsplit(":", 1)
|
||||
return ticker, int(period_str)
|
||||
|
||||
|
||||
def bar_type_from_feed_key(feed_key: str) -> BarType:
|
||||
"""Build a Nautilus BarType from a feed key string."""
|
||||
ticker, period_seconds = parse_feed_key(feed_key)
|
||||
exchange_id, market_id = parse_ticker(ticker)
|
||||
instrument_id = InstrumentId(Symbol(market_id), Venue(exchange_id))
|
||||
|
||||
for threshold, agg, divisor in _PERIOD_TO_AGGREGATION:
|
||||
if period_seconds < threshold:
|
||||
step = period_seconds // divisor
|
||||
break
|
||||
else:
|
||||
agg = BarAggregation.DAY
|
||||
step = period_seconds // 86400
|
||||
|
||||
spec = BarSpecification(step=step, aggregation=agg, price_type=PriceType.MID)
|
||||
return BarType(instrument_id=instrument_id, bar_spec=spec,
|
||||
aggregation_source=AggregationSource.EXTERNAL)
|
||||
|
||||
|
||||
def feed_key_from_bar_type(bar_type: BarType) -> str:
|
||||
"""Reconstruct the feed key from a BarType."""
|
||||
iid = bar_type.instrument_id
|
||||
ticker = f"{iid.symbol}.{iid.venue}"
|
||||
multiplier = _AGG_TO_SECONDS.get(bar_type.spec.aggregation, 1)
|
||||
period_seconds = bar_type.spec.step * multiplier
|
||||
return f"{ticker}:{period_seconds}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Config
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class PandasStrategyConfig(StrategyConfig, frozen=True):
|
||||
"""
|
||||
Configuration for PandasStrategy.
|
||||
|
||||
feed_keys: tuple of feed key strings, e.g. ("BTC/USDT.BINANCE:3600",)
|
||||
Set by the backtest/activate runner — not by the quant's code.
|
||||
initial_capital: informational; actual account balance is set in BacktestEngine.
|
||||
"""
|
||||
feed_keys: tuple[str, ...] = ()
|
||||
initial_capital: float = 10_000.0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Base class
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class PandasStrategy(Strategy):
|
||||
"""
|
||||
Base class for quant strategies.
|
||||
|
||||
Quants implement evaluate(dfs) — the same function they'd write in a research
|
||||
notebook. All bar accumulation, OHLC+ field injection, and DataFrame management
|
||||
is handled internally.
|
||||
|
||||
Example
|
||||
-------
|
||||
::
|
||||
|
||||
from dexorder.nautilus import PandasStrategy
|
||||
import pandas as pd
|
||||
import pandas_ta as ta
|
||||
|
||||
class MyStrategy(PandasStrategy):
|
||||
def evaluate(self, dfs):
|
||||
df = dfs.get("BTC/USDT.BINANCE:3600")
|
||||
if df is None or len(df) < 14:
|
||||
return
|
||||
rsi = ta.rsi(df["close"], length=14)
|
||||
if rsi.iloc[-1] < 30:
|
||||
self.buy(0.01)
|
||||
elif rsi.iloc[-1] > 70:
|
||||
self.sell(0.01)
|
||||
"""
|
||||
|
||||
def __init__(self, config: PandasStrategyConfig) -> None:
|
||||
super().__init__(config)
|
||||
# Per-feed row accumulator
|
||||
self._rows: dict[str, list[dict]] = {}
|
||||
# Per-feed DataFrame (updated after each bar)
|
||||
self._dfs: dict[str, pd.DataFrame] = {}
|
||||
# Per-feed extras lookup: {ts_event_ns: {buy_vol, sell_vol, open_interest}}
|
||||
self._extras: dict[str, dict[int, dict]] = {}
|
||||
# Resolved BarType objects (populated in on_start)
|
||||
self._bar_types: dict[str, BarType] = {}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Nautilus lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def on_start(self) -> None:
|
||||
for feed_key in self.config.feed_keys:
|
||||
bar_type = bar_type_from_feed_key(feed_key)
|
||||
self._bar_types[feed_key] = bar_type
|
||||
self.subscribe_bars(bar_type)
|
||||
|
||||
def on_bar(self, bar: Bar) -> None:
|
||||
feed_key = feed_key_from_bar_type(bar.bar_type)
|
||||
ts_ns = bar.ts_event
|
||||
|
||||
# Merge OHLC+ extras (buy_vol, sell_vol, open_interest) by timestamp
|
||||
extras = self._extras.get(feed_key, {}).get(ts_ns, {})
|
||||
|
||||
row = {
|
||||
"timestamp": ts_ns,
|
||||
"open": float(bar.open),
|
||||
"high": float(bar.high),
|
||||
"low": float(bar.low),
|
||||
"close": float(bar.close),
|
||||
"volume": float(bar.volume),
|
||||
"buy_vol": extras.get("buy_vol"),
|
||||
"sell_vol": extras.get("sell_vol"),
|
||||
"open_interest": extras.get("open_interest"),
|
||||
}
|
||||
|
||||
if feed_key not in self._rows:
|
||||
self._rows[feed_key] = []
|
||||
self._rows[feed_key].append(row)
|
||||
self._dfs[feed_key] = pd.DataFrame(self._rows[feed_key])
|
||||
|
||||
self.evaluate(self._dfs)
|
||||
|
||||
def on_stop(self) -> None:
|
||||
pass
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Quant API — override in subclass
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@abstractmethod
|
||||
def evaluate(self, dfs: dict[str, pd.DataFrame]) -> None:
|
||||
"""
|
||||
Implement your strategy logic here.
|
||||
|
||||
Called after every new bar on any subscribed feed.
|
||||
|
||||
Args:
|
||||
dfs: Dict mapping feed_key → DataFrame.
|
||||
Feed key format: "TICKER:period_seconds", e.g. "BTC/USDT.BINANCE:3600".
|
||||
DataFrame columns: timestamp (ns), open, high, low, close, volume,
|
||||
buy_vol, sell_vol, open_interest.
|
||||
All rows up to and including the latest bar are included.
|
||||
A feed's DataFrame is absent (key missing) until its first bar arrives.
|
||||
|
||||
Trading methods available:
|
||||
self.buy(quantity, feed_key=None) — market buy
|
||||
self.sell(quantity, feed_key=None) — market sell
|
||||
self.flatten(feed_key=None) — close all positions for feed
|
||||
"""
|
||||
|
||||
def on_timer(self, timer_name: str) -> None:
|
||||
"""
|
||||
Called on timer ticks (TBD — timer wiring not yet implemented).
|
||||
|
||||
Override to handle time-based evaluation independent of bar arrival.
|
||||
Default implementation calls evaluate() with current DataFrames.
|
||||
"""
|
||||
self.evaluate(self._dfs)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Order helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _resolve_feed_key(self, feed_key: str | None) -> str | None:
|
||||
"""Return feed_key if given, else the first configured feed key."""
|
||||
if feed_key is not None:
|
||||
return feed_key
|
||||
keys = self.config.feed_keys
|
||||
return keys[0] if keys else None
|
||||
|
||||
def _instrument_id_for_feed(self, feed_key: str) -> InstrumentId | None:
|
||||
ticker, _ = parse_feed_key(feed_key)
|
||||
try:
|
||||
exchange_id, market_id = parse_ticker(ticker)
|
||||
return InstrumentId(Symbol(market_id), Venue(exchange_id))
|
||||
except ValueError:
|
||||
log.error("Cannot parse ticker from feed key: %s", feed_key)
|
||||
return None
|
||||
|
||||
def buy(self, quantity: float, feed_key: str | None = None) -> None:
|
||||
"""Submit a market buy order. Defaults to the first configured feed."""
|
||||
fk = self._resolve_feed_key(feed_key)
|
||||
if not fk:
|
||||
log.error("buy(): no feed key available")
|
||||
return
|
||||
instrument_id = self._instrument_id_for_feed(fk)
|
||||
if instrument_id is None:
|
||||
return
|
||||
instrument = self.cache.instrument(instrument_id)
|
||||
if instrument is None:
|
||||
log.error("buy(): instrument not found for %s", instrument_id)
|
||||
return
|
||||
order = self.order_factory.market(
|
||||
instrument_id=instrument_id,
|
||||
order_side=OrderSide.BUY,
|
||||
quantity=Quantity(quantity, instrument.size_precision),
|
||||
time_in_force=TimeInForce.GTC,
|
||||
)
|
||||
self.submit_order(order)
|
||||
|
||||
def sell(self, quantity: float, feed_key: str | None = None) -> None:
|
||||
"""Submit a market sell order. Defaults to the first configured feed."""
|
||||
fk = self._resolve_feed_key(feed_key)
|
||||
if not fk:
|
||||
log.error("sell(): no feed key available")
|
||||
return
|
||||
instrument_id = self._instrument_id_for_feed(fk)
|
||||
if instrument_id is None:
|
||||
return
|
||||
instrument = self.cache.instrument(instrument_id)
|
||||
if instrument is None:
|
||||
log.error("sell(): instrument not found for %s", instrument_id)
|
||||
return
|
||||
order = self.order_factory.market(
|
||||
instrument_id=instrument_id,
|
||||
order_side=OrderSide.SELL,
|
||||
quantity=Quantity(quantity, instrument.size_precision),
|
||||
time_in_force=TimeInForce.GTC,
|
||||
)
|
||||
self.submit_order(order)
|
||||
|
||||
def flatten(self, feed_key: str | None = None) -> None:
|
||||
"""Close all open positions for the specified feed (defaults to first feed)."""
|
||||
fk = self._resolve_feed_key(feed_key)
|
||||
if not fk:
|
||||
return
|
||||
instrument_id = self._instrument_id_for_feed(fk)
|
||||
if instrument_id is None:
|
||||
return
|
||||
positions = self.cache.positions_open(instrument_id=instrument_id)
|
||||
for pos in positions:
|
||||
self.close_position(pos)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Runner API — called by backtest_runner, not by quant code
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _inject_extras(self, feed_key: str, extras: dict[int, dict]) -> None:
|
||||
"""
|
||||
Pre-load OHLC+ extras for a feed before the backtest runs.
|
||||
|
||||
Args:
|
||||
feed_key: e.g. "BTC/USDT.BINANCE:3600"
|
||||
extras: {ts_event_ns: {"buy_vol": float|None, "sell_vol": float|None,
|
||||
"open_interest": float|None}}
|
||||
"""
|
||||
self._extras[feed_key] = extras
|
||||
@@ -141,39 +141,6 @@ class OHLCClient:
|
||||
# Step 5: Query Iceberg again for complete dataset
|
||||
df = self.iceberg.query_ohlc(ticker, period_seconds, start_time, end_time)
|
||||
|
||||
return self._forward_fill_gaps(df, period_seconds)
|
||||
|
||||
def _forward_fill_gaps(self, df: pd.DataFrame, period_seconds: int) -> pd.DataFrame:
|
||||
"""
|
||||
Forward-fill interior missing bars by carrying the last known close into
|
||||
open, high, low, and close of any gap bar.
|
||||
|
||||
Only interior gaps (rows already present with null OHLC from the ingestor,
|
||||
or timestamp slots missing between real bars) are filled. Edge gaps (before
|
||||
the first real bar or after the last real bar) are left as-is.
|
||||
"""
|
||||
if df.empty:
|
||||
return df
|
||||
|
||||
df = df.sort_index()
|
||||
|
||||
# Identify rows that are gap bars (null close)
|
||||
is_gap = df['close'].isna()
|
||||
|
||||
if not is_gap.any():
|
||||
return df
|
||||
|
||||
# Forward-fill close across gap rows, then copy into open/high/low
|
||||
df['close'] = df['close'].ffill()
|
||||
price_cols = ['open', 'high', 'low']
|
||||
for col in price_cols:
|
||||
if col in df.columns:
|
||||
df[col] = df[col].where(~is_gap, df['close'])
|
||||
|
||||
# Zero out volume for filled gap rows
|
||||
if 'volume' in df.columns:
|
||||
df['volume'] = df['volume'].where(~is_gap, 0.0)
|
||||
|
||||
return df
|
||||
|
||||
async def __aenter__(self):
|
||||
|
||||
71
sandbox/dexorder/secrets_vault.py
Normal file
71
sandbox/dexorder/secrets_vault.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""
|
||||
User Secrets Vault
|
||||
|
||||
Stores user-owned API keys for live exchange execution. Secured with the user's
|
||||
password — Dexorder cannot read these secrets. This is entirely separate from
|
||||
secrets.yaml, which holds Dexorder infrastructure credentials (Iceberg, MinIO, etc.).
|
||||
|
||||
Currently a stub — raises NotImplementedError on all calls. Will be backed by
|
||||
a user-local encrypted store in a future iteration.
|
||||
"""
|
||||
|
||||
|
||||
class SecretsVault:
|
||||
"""
|
||||
Interface for the user secrets vault.
|
||||
|
||||
The vault is secured with the user's own password; the Dexorder platform
|
||||
cannot decrypt or access its contents. This distinguishes it from the
|
||||
system-level secrets.yaml, which stores infrastructure credentials managed
|
||||
by Dexorder operators.
|
||||
|
||||
Typical keys stored here:
|
||||
"BINANCE_API_KEY", "BINANCE_API_SECRET"
|
||||
"COINBASE_API_KEY", "COINBASE_API_SECRET"
|
||||
etc.
|
||||
"""
|
||||
|
||||
def get_secret(self, key: str) -> str:
|
||||
"""
|
||||
Retrieve a user secret by key.
|
||||
|
||||
Args:
|
||||
key: Identifier for the secret, e.g. "BINANCE_API_KEY"
|
||||
|
||||
Returns:
|
||||
The secret value as a string.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Always — vault not yet implemented.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"User secrets vault is not yet implemented. "
|
||||
"Live execution API key management is a future feature."
|
||||
)
|
||||
|
||||
def set_secret(self, key: str, value: str) -> None:
|
||||
"""
|
||||
Store a secret in the vault.
|
||||
|
||||
Args:
|
||||
key: Identifier for the secret
|
||||
value: Secret value to store (stored encrypted with user's password)
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Always — vault not yet implemented.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"User secrets vault is not yet implemented. "
|
||||
"Live execution API key management is a future feature."
|
||||
)
|
||||
|
||||
def delete_secret(self, key: str) -> None:
|
||||
"""
|
||||
Remove a secret from the vault.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Always — vault not yet implemented.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"User secrets vault is not yet implemented."
|
||||
)
|
||||
173
sandbox/dexorder/tools/activate_strategy.py
Normal file
173
sandbox/dexorder/tools/activate_strategy.py
Normal file
@@ -0,0 +1,173 @@
|
||||
"""
|
||||
activate_strategy / deactivate_strategy — start and stop live or paper trading.
|
||||
|
||||
paper=True (default): forward paper trading — strategy runs on live data with
|
||||
simulated fills. No API keys required.
|
||||
|
||||
paper=False: live trading — real order execution via user's exchange API keys,
|
||||
retrieved from the user secrets vault. Currently raises
|
||||
NotImplementedError until the vault is implemented.
|
||||
|
||||
Full live-data feed streaming for forward testing is TBD (requires a live bar
|
||||
source). This module establishes the interface and stubs the runtime loop.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Registry of active strategies: {strategy_name → runtime state dict}
|
||||
# In a future implementation this will hold live strategy runners.
|
||||
_active_strategies: dict[str, dict] = {}
|
||||
|
||||
|
||||
async def activate_strategy(
|
||||
strategy_name: str,
|
||||
feeds: list[dict],
|
||||
allocation: float,
|
||||
paper: bool = True,
|
||||
) -> list:
|
||||
"""
|
||||
Activate a strategy for live or paper forward trading.
|
||||
|
||||
Args:
|
||||
strategy_name: Display name as saved via python_write("strategy", ...)
|
||||
feeds: List of feed dicts, e.g. [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
|
||||
allocation: Capital allocated in quote currency (e.g. 5000.0 USDT)
|
||||
paper: True = paper/simulated fills (default); False = live execution
|
||||
|
||||
Returns:
|
||||
list[TextContent] with JSON:
|
||||
{"status": "activated", "strategy_name": str, "paper": bool, "allocation": float}
|
||||
|
||||
On error:
|
||||
{"error": str}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
def _err(msg: str) -> list:
|
||||
log.error("activate_strategy '%s': %s", strategy_name, msg)
|
||||
return [TextContent(type="text", text=json.dumps({"error": msg}))]
|
||||
|
||||
if strategy_name in _active_strategies:
|
||||
return _err(
|
||||
f"Strategy '{strategy_name}' is already active. "
|
||||
"Call deactivate_strategy first."
|
||||
)
|
||||
|
||||
if not paper:
|
||||
# Live execution requires the user secrets vault for API keys.
|
||||
# The vault is not yet implemented.
|
||||
try:
|
||||
from dexorder.secrets_vault import SecretsVault
|
||||
_vault = SecretsVault()
|
||||
_vault.get_secret("__probe__") # will raise NotImplementedError
|
||||
except NotImplementedError:
|
||||
return _err(
|
||||
"Live trading (paper=False) requires the user secrets vault, "
|
||||
"which is not yet implemented. Use paper=True for paper forward testing."
|
||||
)
|
||||
|
||||
# Validate feeds
|
||||
if not feeds:
|
||||
return _err("feeds list is empty")
|
||||
|
||||
parsed_feeds: list[tuple[str, int]] = []
|
||||
for f in feeds:
|
||||
sym = f.get("symbol", "")
|
||||
ps = f.get("period_seconds", 3600)
|
||||
if not sym:
|
||||
return _err(f"Feed entry missing 'symbol': {f}")
|
||||
parsed_feeds.append((sym, int(ps)))
|
||||
|
||||
# TODO: Full implementation — start a live/paper trading loop:
|
||||
# 1. Load strategy class from category files
|
||||
# 2. Set up custom indicators via _setup_custom_indicators()
|
||||
# 3. Subscribe to live bar stream for each feed
|
||||
# 4. Initialize paper account (Nautilus SimulatedExchange) or live account
|
||||
# 5. Run strategy event loop (on_bar → evaluate → submit orders)
|
||||
# This requires a live data feed adapter (TBD).
|
||||
|
||||
log.info(
|
||||
"activate_strategy: registering '%s' (paper=%s, allocation=%.2f) — "
|
||||
"live feed loop is TBD",
|
||||
strategy_name, paper, allocation,
|
||||
)
|
||||
|
||||
_active_strategies[strategy_name] = {
|
||||
"strategy_name": strategy_name,
|
||||
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
|
||||
"allocation": allocation,
|
||||
"paper": paper,
|
||||
"status": "registered",
|
||||
"pnl": 0.0,
|
||||
}
|
||||
|
||||
payload = {
|
||||
"status": "activated",
|
||||
"strategy_name": strategy_name,
|
||||
"paper": paper,
|
||||
"allocation": allocation,
|
||||
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
|
||||
"note": (
|
||||
"Strategy registered. Live data feed streaming is not yet implemented — "
|
||||
"forward trading will begin when the live feed adapter is available."
|
||||
),
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
|
||||
|
||||
async def deactivate_strategy(strategy_name: str) -> list:
|
||||
"""
|
||||
Deactivate a running strategy and return its final P&L summary.
|
||||
|
||||
Args:
|
||||
strategy_name: Display name of the active strategy
|
||||
|
||||
Returns:
|
||||
list[TextContent] with JSON:
|
||||
{"status": "deactivated", "strategy_name": str, "final_pnl": float}
|
||||
|
||||
On error:
|
||||
{"error": str}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
def _err(msg: str) -> list:
|
||||
log.error("deactivate_strategy '%s': %s", strategy_name, msg)
|
||||
return [TextContent(type="text", text=json.dumps({"error": msg}))]
|
||||
|
||||
if strategy_name not in _active_strategies:
|
||||
return _err(f"Strategy '{strategy_name}' is not active")
|
||||
|
||||
state = _active_strategies.pop(strategy_name)
|
||||
|
||||
# TODO: Stop the live feed loop and collect final P&L from the running engine.
|
||||
final_pnl = state.get("pnl", 0.0)
|
||||
|
||||
log.info("deactivate_strategy: stopped '%s', final_pnl=%.4f", strategy_name, final_pnl)
|
||||
|
||||
payload = {
|
||||
"status": "deactivated",
|
||||
"strategy_name": strategy_name,
|
||||
"final_pnl": final_pnl,
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
|
||||
|
||||
async def list_active_strategies() -> list:
|
||||
"""
|
||||
Return a list of currently active strategies and their status.
|
||||
|
||||
Returns:
|
||||
list[TextContent] with JSON:
|
||||
{"active_strategies": [{strategy_name, paper, allocation, feeds, pnl}, ...]}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
payload = {
|
||||
"active_strategies": list(_active_strategies.values()),
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
163
sandbox/dexorder/tools/backtest_strategy.py
Normal file
163
sandbox/dexorder/tools/backtest_strategy.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""
|
||||
backtest_strategy — run a PandasStrategy against historical OHLC data.
|
||||
|
||||
Called directly from the MCP server's async handle_tool_call.
|
||||
|
||||
Returns a JSON payload with backtest metrics and equity curve, following the
|
||||
same pattern as evaluate_indicator.py.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# All OHLC+ columns to request from the DataAPI
|
||||
_OHLC_EXTRA_COLUMNS = ["volume", "buy_vol", "sell_vol", "open_interest"]
|
||||
|
||||
|
||||
async def backtest_strategy(
|
||||
strategy_name: str,
|
||||
feeds: list[dict],
|
||||
from_time: Any,
|
||||
to_time: Any,
|
||||
initial_capital: float = 10_000.0,
|
||||
paper: bool = True,
|
||||
) -> list:
|
||||
"""
|
||||
Load a saved strategy, fetch OHLC+ data for each feed, and run a backtest.
|
||||
|
||||
Args:
|
||||
strategy_name: Display name as saved via python_write("strategy", ...)
|
||||
feeds: List of feed dicts, e.g. [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
|
||||
from_time: Backtest start (Unix timestamp or date string)
|
||||
to_time: Backtest end (Unix timestamp or date string)
|
||||
initial_capital: Starting balance in quote currency (default 10,000)
|
||||
paper: Always True for historical backtest (flag reserved for forward testing)
|
||||
|
||||
Returns:
|
||||
list[TextContent] with JSON payload:
|
||||
{
|
||||
"strategy_name": str,
|
||||
"feeds": [...],
|
||||
"initial_capital": float,
|
||||
"paper": bool,
|
||||
"total_candles": int,
|
||||
"total_return": float, # fractional (0.15 = +15%)
|
||||
"sharpe_ratio": float,
|
||||
"max_drawdown": float, # fractional (0.10 = 10% drawdown)
|
||||
"win_rate": float,
|
||||
"trade_count": int,
|
||||
"equity_curve": [{"timestamp": int, "equity": float}, ...]
|
||||
}
|
||||
|
||||
On error:
|
||||
{"error": str}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
def _err(msg: str) -> list:
|
||||
log.error("backtest_strategy '%s': %s", strategy_name, msg)
|
||||
return [TextContent(type="text", text=json.dumps({"error": msg}))]
|
||||
|
||||
# --- 1. Validate feeds input ---
|
||||
if not feeds:
|
||||
return _err("feeds list is empty — provide at least one {symbol, period_seconds} entry")
|
||||
|
||||
parsed_feeds: list[tuple[str, int]] = []
|
||||
for f in feeds:
|
||||
sym = f.get("symbol", "")
|
||||
ps = f.get("period_seconds", 3600)
|
||||
if not sym:
|
||||
return _err(f"Feed entry missing 'symbol': {f}")
|
||||
parsed_feeds.append((sym, int(ps)))
|
||||
|
||||
# --- 2. Resolve strategy implementation file ---
|
||||
try:
|
||||
from dexorder.tools.python_tools import get_category_manager, sanitize_name
|
||||
category_manager = get_category_manager()
|
||||
safe_name = sanitize_name(strategy_name)
|
||||
impl_path = category_manager.src_dir / "strategy" / safe_name / "implementation.py"
|
||||
if not impl_path.exists():
|
||||
return _err(f"Strategy '{strategy_name}' not found (looked at {impl_path})")
|
||||
except Exception as exc:
|
||||
return _err(f"Failed to locate strategy: {exc}")
|
||||
|
||||
# --- 3. Register custom indicators with pandas-ta ---
|
||||
try:
|
||||
from dexorder.nautilus.backtest_runner import _setup_custom_indicators
|
||||
_setup_custom_indicators(category_manager.src_dir)
|
||||
except Exception as exc:
|
||||
log.warning("backtest_strategy: custom indicator setup failed: %s", exc)
|
||||
|
||||
# --- 4. Load strategy class ---
|
||||
try:
|
||||
from dexorder.nautilus.backtest_runner import _load_strategy_class
|
||||
strategy_class = _load_strategy_class(impl_path)
|
||||
except Exception as exc:
|
||||
log.exception("backtest_strategy: strategy load failed")
|
||||
return _err(f"Strategy load failed: {exc}")
|
||||
|
||||
# --- 5. Fetch OHLC+ data for each feed ---
|
||||
try:
|
||||
from dexorder.api import get_api
|
||||
api = get_api()
|
||||
except Exception as exc:
|
||||
return _err(f"API not available: {exc}")
|
||||
|
||||
ohlc_dfs: dict[str, Any] = {}
|
||||
total_candles = 0
|
||||
|
||||
for ticker, period_seconds in parsed_feeds:
|
||||
from dexorder.nautilus.pandas_strategy import make_feed_key
|
||||
feed_key = make_feed_key(ticker, period_seconds)
|
||||
try:
|
||||
df = await api.data.historical_ohlc(
|
||||
ticker=ticker,
|
||||
period_seconds=period_seconds,
|
||||
start_time=from_time,
|
||||
end_time=to_time,
|
||||
extra_columns=_OHLC_EXTRA_COLUMNS,
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception("backtest_strategy: OHLC fetch failed for %s", feed_key)
|
||||
return _err(f"OHLC fetch failed for {feed_key}: {exc}")
|
||||
|
||||
if df.empty:
|
||||
return _err(f"No OHLC data for {feed_key} in the requested range")
|
||||
|
||||
ohlc_dfs[feed_key] = df
|
||||
total_candles += len(df)
|
||||
|
||||
# --- 6. Run backtest in thread executor (BacktestEngine is synchronous) ---
|
||||
try:
|
||||
import asyncio
|
||||
from dexorder.nautilus.backtest_runner import run_backtest
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
metrics = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: run_backtest(
|
||||
strategy_class=strategy_class,
|
||||
feeds=parsed_feeds,
|
||||
ohlc_dfs=ohlc_dfs,
|
||||
initial_capital=initial_capital,
|
||||
paper=paper,
|
||||
),
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception("backtest_strategy: backtest run failed")
|
||||
return _err(f"Backtest failed: {exc}")
|
||||
|
||||
# --- 7. Return results ---
|
||||
payload = {
|
||||
"strategy_name": strategy_name,
|
||||
"feeds": [{"symbol": t, "period_seconds": p} for t, p in parsed_feeds],
|
||||
"initial_capital": initial_capital,
|
||||
"paper": paper,
|
||||
"total_candles": total_candles,
|
||||
**metrics,
|
||||
}
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
243
sandbox/dexorder/tools/evaluate_indicator.py
Normal file
243
sandbox/dexorder/tools/evaluate_indicator.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""
|
||||
evaluate_indicator — runs a pandas-ta (or custom) indicator against real OHLC data.
|
||||
|
||||
Called directly from the MCP server's async handle_tool_call, so it can await
|
||||
the DataAPI without subprocess overhead.
|
||||
|
||||
Returns a JSON object with a `values` array of {timestamp, ...} records, where
|
||||
timestamp is a Unix second integer and value fields hold floats (or null for NaN).
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import pandas as pd
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Input routing — which series each pandas-ta function expects
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Maps pandas_ta_name → tuple of column names from the OHLCV dataframe
|
||||
# Columns available: open, high, low, close, volume
|
||||
# "volume" is fetched via extra_columns=["volume"]
|
||||
|
||||
_INPUTS: dict[str, tuple[str, ...]] = {
|
||||
# Close only
|
||||
"sma": ("close",),
|
||||
"ema": ("close",),
|
||||
"wma": ("close",),
|
||||
"dema": ("close",),
|
||||
"tema": ("close",),
|
||||
"trima": ("close",),
|
||||
"kama": ("close",),
|
||||
"t3": ("close",),
|
||||
"hma": ("close",),
|
||||
"alma": ("close",),
|
||||
"midpoint": ("close",),
|
||||
"rsi": ("close",),
|
||||
"macd": ("close",),
|
||||
"mom": ("close",),
|
||||
"roc": ("close",),
|
||||
"trix": ("close",),
|
||||
"cmo": ("close",),
|
||||
"ao": ("high", "low"), # ao uses midprice (high, low)
|
||||
"apo": ("close",),
|
||||
"coppock": ("close",),
|
||||
"dpo": ("close",),
|
||||
"fisher": ("high", "low"),
|
||||
"rvgi": ("open", "high", "low", "close"),
|
||||
"kst": ("close",),
|
||||
"stdev": ("close",),
|
||||
"linreg": ("close",),
|
||||
"slope": ("close",),
|
||||
"vwma": ("close", "volume"),
|
||||
"obv": ("close", "volume"),
|
||||
"pvt": ("close", "volume"),
|
||||
"efi": ("close", "volume"),
|
||||
# High + Low
|
||||
"hl2": ("high", "low"),
|
||||
"midprice": ("high", "low"),
|
||||
# High + Low + Close
|
||||
"hlc3": ("high", "low", "close"),
|
||||
"atr": ("high", "low", "close"),
|
||||
"kc": ("high", "low", "close"),
|
||||
"donchian": ("high", "low", "close"),
|
||||
"stoch": ("high", "low", "close"),
|
||||
"stochrsi": ("high", "low", "close"),
|
||||
"cci": ("high", "low", "close"),
|
||||
"willr": ("high", "low", "close"),
|
||||
"adx": ("high", "low", "close"),
|
||||
"aroon": ("high", "low", "close"),
|
||||
"uo": ("high", "low", "close"),
|
||||
"psar": ("high", "low", "close"),
|
||||
"vortex": ("high", "low", "close"),
|
||||
"chop": ("high", "low", "close"),
|
||||
"supertrend": ("high", "low", "close"),
|
||||
"ichimoku": ("high", "low", "close"),
|
||||
# Open + High + Low + Close
|
||||
"ohlc4": ("open", "high", "low", "close"),
|
||||
"bop": ("open", "high", "low", "close"),
|
||||
# High + Low + Close + Volume
|
||||
"mfi": ("high", "low", "close", "volume"),
|
||||
"ad": ("high", "low", "close", "volume"),
|
||||
"adosc": ("high", "low", "close", "volume"),
|
||||
"cmf": ("high", "low", "close", "volume"),
|
||||
"eom": ("high", "low", "close", "volume"),
|
||||
"kvo": ("high", "low", "close", "volume"),
|
||||
# VWAP needs datetime index — handled specially
|
||||
"vwap": ("high", "low", "close", "volume"),
|
||||
}
|
||||
|
||||
_NEEDS_VOLUME = {name for name, cols in _INPUTS.items() if "volume" in cols}
|
||||
|
||||
|
||||
async def evaluate_indicator(
|
||||
symbol: str,
|
||||
from_time: Any,
|
||||
to_time: Any,
|
||||
period_seconds: int,
|
||||
pandas_ta_name: str,
|
||||
parameters: dict,
|
||||
) -> list:
|
||||
"""
|
||||
Fetch OHLC data and evaluate a pandas-ta indicator.
|
||||
|
||||
Returns a list containing a single MCP TextContent with JSON:
|
||||
{
|
||||
"symbol": ...,
|
||||
"period_seconds": ...,
|
||||
"pandas_ta_name": ...,
|
||||
"parameters": {...},
|
||||
"candle_count": N,
|
||||
"columns": ["timestamp", "value"] or ["timestamp", "col1", "col2", ...],
|
||||
"values": [{"timestamp": <unix_s>, "value": <float|null>}, ...]
|
||||
}
|
||||
"""
|
||||
from mcp.types import TextContent
|
||||
|
||||
try:
|
||||
import pandas_ta as ta
|
||||
except ImportError:
|
||||
return [TextContent(type="text", text=json.dumps({"error": "pandas_ta not installed"}))]
|
||||
|
||||
name_lower = pandas_ta_name.lower()
|
||||
|
||||
# For custom indicators, register them with pandas-ta first, then resolve
|
||||
# input columns from their stored metadata.
|
||||
if name_lower.startswith("custom_"):
|
||||
import os
|
||||
from dexorder.tools.python_tools import setup_custom_indicators, get_category_manager
|
||||
setup_custom_indicators(Path(os.environ.get("DATA_DIR", "data")))
|
||||
|
||||
fn = getattr(ta, name_lower, None)
|
||||
if fn is None:
|
||||
return [TextContent(type="text", text=json.dumps({
|
||||
"error": (
|
||||
f"Custom indicator '{pandas_ta_name}' not found after registering "
|
||||
"custom indicators. Make sure the indicator was created with "
|
||||
"python_write(category='indicator', name='...') and that its "
|
||||
"implementation.py defines a function matching the sanitized name."
|
||||
)
|
||||
}))]
|
||||
|
||||
# Get input_series from the indicator's metadata
|
||||
indicator_name = pandas_ta_name[len("custom_"):]
|
||||
mgr = get_category_manager()
|
||||
read_result = mgr.read("indicator", indicator_name)
|
||||
if read_result.get("exists") and read_result.get("metadata"):
|
||||
raw_series = read_result["metadata"].get("input_series") or ["close"]
|
||||
input_cols = tuple(raw_series)
|
||||
else:
|
||||
input_cols = ("close",)
|
||||
else:
|
||||
# Look up the pandas-ta function for built-in indicators
|
||||
fn = getattr(ta, name_lower, None)
|
||||
if fn is None:
|
||||
return [TextContent(type="text", text=json.dumps({
|
||||
"error": f"Unknown indicator '{pandas_ta_name}'. Check pandas_ta_name against the supported list."
|
||||
}))]
|
||||
|
||||
# Determine required columns
|
||||
input_cols = _INPUTS.get(name_lower, ("close",))
|
||||
needs_volume = "volume" in input_cols
|
||||
|
||||
# Fetch OHLC
|
||||
try:
|
||||
from dexorder.api import get_api
|
||||
api = get_api()
|
||||
df = await api.data.historical_ohlc(
|
||||
ticker=symbol,
|
||||
period_seconds=period_seconds,
|
||||
start_time=from_time,
|
||||
end_time=to_time,
|
||||
extra_columns=["volume"] if needs_volume else [],
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception("evaluate_indicator: OHLC fetch failed")
|
||||
return [TextContent(type="text", text=json.dumps({"error": f"OHLC fetch failed: {exc}"}))]
|
||||
|
||||
if df.empty:
|
||||
return [TextContent(type="text", text=json.dumps({
|
||||
"error": f"No OHLC data for {symbol} in the requested range"
|
||||
}))]
|
||||
|
||||
# VWAP already requires a DatetimeIndex — the OHLC df index is already a
|
||||
# DatetimeIndex, so no extra work needed here.
|
||||
|
||||
# Build positional args
|
||||
args = []
|
||||
for col in input_cols:
|
||||
if col not in df.columns:
|
||||
return [TextContent(type="text", text=json.dumps({
|
||||
"error": f"Column '{col}' not in fetched dataframe (columns: {list(df.columns)})"
|
||||
}))]
|
||||
args.append(df[col])
|
||||
|
||||
# Compute
|
||||
try:
|
||||
result = fn(*args, **parameters)
|
||||
except Exception as exc:
|
||||
log.exception("evaluate_indicator: computation failed")
|
||||
return [TextContent(type="text", text=json.dumps({
|
||||
"error": f"Indicator computation failed: {exc}"
|
||||
}))]
|
||||
|
||||
# Convert DatetimeIndex → Unix seconds
|
||||
timestamps = (df.index.astype("int64") // 1_000_000_000).tolist()
|
||||
|
||||
# Serialize output
|
||||
if isinstance(result, pd.DataFrame):
|
||||
columns = ["timestamp"] + list(result.columns)
|
||||
values = []
|
||||
for i, ts in enumerate(timestamps):
|
||||
row: dict[str, Any] = {"timestamp": int(ts)}
|
||||
for col in result.columns:
|
||||
v = result.iloc[i][col]
|
||||
row[col] = None if (isinstance(v, float) and pd.isna(v)) else float(v)
|
||||
values.append(row)
|
||||
elif isinstance(result, pd.Series):
|
||||
columns = ["timestamp", "value"]
|
||||
values = [
|
||||
{"timestamp": int(ts), "value": None if pd.isna(v) else float(v)}
|
||||
for ts, v in zip(timestamps, result.tolist())
|
||||
]
|
||||
else:
|
||||
return [TextContent(type="text", text=json.dumps({
|
||||
"error": f"Unexpected indicator output type: {type(result).__name__}"
|
||||
}))]
|
||||
|
||||
payload = {
|
||||
"symbol": symbol,
|
||||
"period_seconds": period_seconds,
|
||||
"pandas_ta_name": pandas_ta_name,
|
||||
"parameters": parameters,
|
||||
"candle_count": len(df),
|
||||
"columns": columns,
|
||||
"values": values,
|
||||
}
|
||||
|
||||
return [TextContent(type="text", text=json.dumps(payload))]
|
||||
182
sandbox/dexorder/tools/indicator_harness.py
Normal file
182
sandbox/dexorder/tools/indicator_harness.py
Normal file
@@ -0,0 +1,182 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Indicator harness — tests a custom indicator against synthetic OHLC data.
|
||||
|
||||
Runs in a subprocess so the indicator code is isolated from the MCP server process.
|
||||
|
||||
Usage: python indicator_harness.py <impl_path> <metadata_path>
|
||||
|
||||
Outputs JSON to stdout:
|
||||
{
|
||||
"success": bool,
|
||||
"output": str, # human-readable summary of the indicator output
|
||||
"error": str # error message / traceback if failed (null on success)
|
||||
}
|
||||
"""
|
||||
import importlib.util
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure dexorder package is importable (same as research_harness.py)
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Synthetic OHLCV data — 200 deterministic bars, no network required
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def make_synthetic_ohlcv(n: int = 200):
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
rng = np.random.default_rng(42)
|
||||
|
||||
# Realistic BTC-style price random walk
|
||||
returns = rng.normal(0, 0.015, n)
|
||||
closes = 40_000.0 * np.cumprod(1.0 + returns)
|
||||
|
||||
opens = np.empty(n)
|
||||
opens[0] = closes[0]
|
||||
opens[1:] = closes[:-1] # open = previous close
|
||||
|
||||
noise = np.abs(rng.normal(0, 0.005, n))
|
||||
highs = np.maximum(opens, closes) * (1.0 + noise)
|
||||
lows = np.minimum(opens, closes) * (1.0 - noise)
|
||||
volumes = rng.uniform(1e6, 1e8, n)
|
||||
|
||||
return pd.DataFrame({
|
||||
"open": opens,
|
||||
"high": highs,
|
||||
"low": lows,
|
||||
"close": closes,
|
||||
"volume": volumes,
|
||||
})
|
||||
|
||||
|
||||
def summarize(result, n: int) -> str:
|
||||
import pandas as pd
|
||||
|
||||
if isinstance(result, pd.Series):
|
||||
nan_count = int(result.isna().sum())
|
||||
valid = result.dropna()
|
||||
sample = [round(float(v), 4) for v in valid.tail(5).values] if len(valid) else []
|
||||
return (
|
||||
f"Series({n} bars), NaN: {nan_count}/{n}, "
|
||||
f"last 5 valid values: {sample}"
|
||||
)
|
||||
elif isinstance(result, pd.DataFrame):
|
||||
cols = list(result.columns)
|
||||
nan_counts = {c: int(result[c].isna().sum()) for c in cols}
|
||||
sample = {}
|
||||
for col in cols:
|
||||
valid = result[col].dropna()
|
||||
if len(valid):
|
||||
sample[col] = [round(float(v), 4) for v in valid.tail(3).values]
|
||||
return (
|
||||
f"DataFrame({n} bars × {len(cols)} cols {cols}), "
|
||||
f"NaN counts: {nan_counts}, last 3 valid per col: {sample}"
|
||||
)
|
||||
else:
|
||||
return f"Unexpected return type: {type(result).__name__}"
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print(json.dumps({"success": False, "error": "Usage: indicator_harness.py <impl_path> <metadata_path>"}))
|
||||
sys.exit(1)
|
||||
|
||||
impl_path = sys.argv[1]
|
||||
metadata_path = sys.argv[2]
|
||||
|
||||
# --- Load metadata ---
|
||||
input_series = ["close"]
|
||||
parameters: dict = {}
|
||||
try:
|
||||
with open(metadata_path) as f:
|
||||
meta = json.load(f)
|
||||
input_series = meta.get("input_series") or ["close"]
|
||||
param_schema = meta.get("parameters") or {}
|
||||
for pname, pinfo in param_schema.items():
|
||||
if isinstance(pinfo, dict) and "default" in pinfo:
|
||||
parameters[pname] = pinfo["default"]
|
||||
elif not isinstance(pinfo, dict):
|
||||
# bare value (legacy)
|
||||
parameters[pname] = pinfo
|
||||
except Exception as e:
|
||||
print(json.dumps({"success": False, "error": f"Failed to read metadata: {e}"}))
|
||||
sys.exit(0)
|
||||
|
||||
# --- Generate synthetic data ---
|
||||
try:
|
||||
import numpy # noqa: F401 — verify numpy available
|
||||
import pandas as pd
|
||||
except ImportError as e:
|
||||
print(json.dumps({"success": False, "error": f"Missing required package: {e}"}))
|
||||
sys.exit(0)
|
||||
|
||||
df = make_synthetic_ohlcv(n=200)
|
||||
n = len(df)
|
||||
|
||||
# --- Load implementation ---
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location("_indicator_impl", impl_path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module) # type: ignore[union-attr]
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
print(json.dumps({"success": False, "error": f"Import failed:\n{tb}"}))
|
||||
sys.exit(0)
|
||||
|
||||
# --- Find the indicator function ---
|
||||
# Prefer a function whose name matches the sanitized directory name,
|
||||
# fall back to the first public function in the module.
|
||||
fn_name = os.path.basename(os.path.dirname(impl_path)).lower()
|
||||
fn = getattr(module, fn_name, None)
|
||||
if fn is None:
|
||||
candidates = [
|
||||
v for k, v in vars(module).items()
|
||||
if isinstance(v, types.FunctionType) and not k.startswith("_")
|
||||
]
|
||||
fn = candidates[0] if candidates else None
|
||||
|
||||
if fn is None:
|
||||
print(json.dumps({"success": False, "error": "No callable function found in implementation.py"}))
|
||||
sys.exit(0)
|
||||
|
||||
# --- Build positional args from input_series ---
|
||||
args = []
|
||||
for col in input_series:
|
||||
if col not in df.columns:
|
||||
print(json.dumps({"success": False, "error": f"input_series '{col}' not in synthetic df columns {list(df.columns)}"}))
|
||||
sys.exit(0)
|
||||
args.append(df[col])
|
||||
|
||||
# --- Execute ---
|
||||
try:
|
||||
result = fn(*args, **parameters)
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
print(json.dumps({"success": False, "error": f"Execution failed:\n{tb}"}))
|
||||
sys.exit(0)
|
||||
|
||||
# --- Validate output type ---
|
||||
if not isinstance(result, (pd.Series, pd.DataFrame)):
|
||||
print(json.dumps({
|
||||
"success": False,
|
||||
"error": (
|
||||
f"Indicator must return pd.Series or pd.DataFrame, "
|
||||
f"got {type(result).__name__}. "
|
||||
"Wrap the output if using pandas-ta internally."
|
||||
),
|
||||
}))
|
||||
sys.exit(0)
|
||||
|
||||
print(json.dumps({"success": True, "output": summarize(result, n)}))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -30,8 +30,9 @@ from typing import Any, Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Path to the research harness script (written to disk, not inline)
|
||||
# Path to the harness scripts (written to disk, not inline)
|
||||
_RESEARCH_HARNESS = Path(__file__).parent / "research_harness.py"
|
||||
_INDICATOR_HARNESS = Path(__file__).parent / "indicator_harness.py"
|
||||
|
||||
# Import conda manager for package installation
|
||||
try:
|
||||
@@ -62,12 +63,15 @@ class BaseMetadata:
|
||||
@dataclass
|
||||
class StrategyMetadata(BaseMetadata):
|
||||
"""Metadata for trading strategies."""
|
||||
data_feeds: list[str] = None # Required data feeds (e.g., ["BTC/USD", "ETH/USD"])
|
||||
data_feeds: list[dict] = None # Required data feeds: [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600, "description": "..."}]
|
||||
parameters: dict = None # Strategy parameters: {"param_name": {"default": value, "description": "..."}}
|
||||
conda_packages: list[str] = None # Additional conda packages required
|
||||
|
||||
def __post_init__(self):
|
||||
if self.data_feeds is None:
|
||||
self.data_feeds = []
|
||||
if self.parameters is None:
|
||||
self.parameters = {}
|
||||
if self.conda_packages is None:
|
||||
self.conda_packages = []
|
||||
|
||||
@@ -75,12 +79,78 @@ class StrategyMetadata(BaseMetadata):
|
||||
@dataclass
|
||||
class IndicatorMetadata(BaseMetadata):
|
||||
"""Metadata for technical indicators."""
|
||||
default_length: int = 14 # Default period/length parameter
|
||||
conda_packages: list[str] = None # Additional conda packages required
|
||||
|
||||
# Fields for TradingView custom study auto-construction:
|
||||
parameters: dict = None
|
||||
# Parameter schema: {param_name: {type: "int"|"float"|"bool"|"string",
|
||||
# default: value, description?: str, min?: num, max?: num}}
|
||||
# Example: {"length": {"type": "int", "default": 14, "min": 1, "max": 500}}
|
||||
|
||||
input_series: list = None
|
||||
# OHLCV columns the indicator function receives as positional args.
|
||||
# Valid values: "open", "high", "low", "close", "volume"
|
||||
# Example: ["close"] or ["high", "low", "close", "volume"]
|
||||
|
||||
output_columns: list = None
|
||||
# Output series produced by the function. Each entry:
|
||||
# {
|
||||
# name: str, # column name (or "value" for plain Series)
|
||||
# display_name?: str, # label shown in TV legend
|
||||
# description?: str,
|
||||
# plot?: {
|
||||
# style: int, # LineStudyPlotStyle: 0=Line, 1=Histogram, 3=Dots/Cross,
|
||||
# # 4=Area, 5=Columns, 6=Circles, 9=StepLine
|
||||
# color?: str, # CSS hex e.g. "#2196F3" (auto-assigned if omitted)
|
||||
# linewidth?: int, # 1–4 (default 2)
|
||||
# visible?: bool # default true
|
||||
# }
|
||||
# }
|
||||
# Example (single line): [{"name": "value", "display_name": "My Indicator"}]
|
||||
# Example (multi-line): [{"name": "upper", "plot": {"style": 0}}, {"name": "lower", "plot": {"style": 0}}]
|
||||
# Example (histogram): [{"name": "value", "plot": {"style": 1}}]
|
||||
# Example (MACD-style): [{"name": "macd", "plot": {"style": 0}}, {"name": "signal", "plot": {"style": 0}}, {"name": "hist", "plot": {"style": 1}}]
|
||||
|
||||
pane: str = "separate"
|
||||
# Where to render: "price" (overlaid on candles) or "separate" (sub-pane)
|
||||
|
||||
filled_areas: list = None
|
||||
# Optional shaded regions between two plots or two bands. Each entry:
|
||||
# {
|
||||
# id: str, # unique id e.g. "fill_upper_lower"
|
||||
# type: str, # "plot_plot" (between two series) or "hline_hline" (between two bands)
|
||||
# series1: str, # output_column name (for plot_plot) or band id (for hline_hline)
|
||||
# series2: str,
|
||||
# color?: str, # CSS hex fill color (default semi-transparent blue)
|
||||
# opacity?: float # 0.0–1.0 (default 0.1)
|
||||
# }
|
||||
# Example (Bollinger fill): [{"id": "fill", "type": "plot_plot", "series1": "upper", "series2": "lower", "color": "#2196F3", "opacity": 0.1}]
|
||||
|
||||
bands: list = None
|
||||
# Optional horizontal reference lines (e.g. RSI overbought/oversold). Each entry:
|
||||
# {
|
||||
# id: str, # unique id e.g. "ob"
|
||||
# value: float, # fixed y-level
|
||||
# color?: str, # CSS hex (default "#787B86")
|
||||
# linewidth?: int, # default 1
|
||||
# linestyle?: int, # 0=solid, 1=dotted, 2=dashed (default 2)
|
||||
# visible?: bool # default true
|
||||
# }
|
||||
# Example (RSI levels): [{"id": "ob", "value": 70}, {"id": "os", "value": 30}]
|
||||
|
||||
def __post_init__(self):
|
||||
if self.conda_packages is None:
|
||||
self.conda_packages = []
|
||||
if self.input_series is None:
|
||||
self.input_series = ["close"]
|
||||
if self.output_columns is None:
|
||||
self.output_columns = [{"name": "value"}]
|
||||
if self.parameters is None:
|
||||
self.parameters = {}
|
||||
if self.filled_areas is None:
|
||||
self.filled_areas = []
|
||||
if self.bands is None:
|
||||
self.bands = []
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -141,21 +211,212 @@ def get_category_path(data_dir: Path, category: Category, name: str) -> Path:
|
||||
return data_dir / category.value / safe_name
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Git Manager
|
||||
# =============================================================================
|
||||
|
||||
class GitManager:
|
||||
"""
|
||||
Thin wrapper around git subprocess calls for category revision tracking.
|
||||
All operations are non-fatal: errors are logged as warnings.
|
||||
"""
|
||||
|
||||
def __init__(self, repo_dir: Path):
|
||||
self.repo_dir = repo_dir
|
||||
|
||||
def _run(self, *args, check: bool = True) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(
|
||||
["git"] + list(args),
|
||||
cwd=self.repo_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=check,
|
||||
)
|
||||
|
||||
def ensure_init(self):
|
||||
"""Init git repo if not exists; initial commit if files already present."""
|
||||
if (self.repo_dir / ".git").exists():
|
||||
return
|
||||
self._run("init", "-b", "main")
|
||||
self._run("config", "user.email", "sandbox@dexorder.ai")
|
||||
self._run("config", "user.name", "Dexorder Sandbox")
|
||||
# Commit any pre-existing files (migrated from old layout)
|
||||
status = self._run("status", "--porcelain")
|
||||
if status.stdout.strip():
|
||||
self._run("add", "-A")
|
||||
self._run("commit", "-m", "init: migrate existing category files")
|
||||
log.info(f"Git repo initialized at {self.repo_dir}")
|
||||
|
||||
def commit(self, message: str) -> Optional[str]:
|
||||
"""Stage all changes and commit. Returns short hash or None if nothing to commit / on error."""
|
||||
try:
|
||||
self._run("add", "-A")
|
||||
status = self._run("status", "--porcelain")
|
||||
if not status.stdout.strip():
|
||||
return None # nothing changed
|
||||
self._run("commit", "-m", message)
|
||||
result = self._run("rev-parse", "--short", "HEAD")
|
||||
return result.stdout.strip()
|
||||
except Exception as e:
|
||||
log.warning(f"Git commit failed (non-fatal): {e}")
|
||||
return None
|
||||
|
||||
def log(self, path: Optional[Path] = None, n: int = 20) -> list[dict]:
|
||||
"""Return recent commits, optionally filtered to a path."""
|
||||
cmd = ["log", f"-{n}", "--pretty=format:%H|%h|%s|%ai"]
|
||||
if path:
|
||||
cmd += ["--", str(path.relative_to(self.repo_dir))]
|
||||
result = self._run(*cmd, check=False)
|
||||
entries = []
|
||||
for line in result.stdout.strip().splitlines():
|
||||
if line:
|
||||
parts = line.split("|", 3)
|
||||
if len(parts) == 4:
|
||||
entries.append({
|
||||
"hash": parts[0],
|
||||
"short_hash": parts[1],
|
||||
"message": parts[2],
|
||||
"date": parts[3],
|
||||
})
|
||||
return entries
|
||||
|
||||
def restore(self, revision: str, path: Optional[Path] = None) -> Optional[str]:
|
||||
"""Restore path (or entire tree) to revision state. Returns new commit hash."""
|
||||
try:
|
||||
rel = str(path.relative_to(self.repo_dir)) if path else "."
|
||||
self._run("checkout", revision, "--", rel)
|
||||
return self.commit(f"revert: restore to {revision[:8]}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise RuntimeError(e.stderr.strip()) from e
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Custom Indicator Setup
|
||||
# =============================================================================
|
||||
|
||||
def setup_custom_indicators(data_dir: Path) -> None:
|
||||
"""
|
||||
Register user's custom indicators with pandas-ta.
|
||||
|
||||
Loads each indicator's implementation.py directly via importlib and binds
|
||||
the function as ``ta.custom_{sanitized_name}`` so that evaluate_indicator
|
||||
can call it as ``getattr(ta, "custom_trendflex", None)``.
|
||||
|
||||
The binding is idempotent — indicators already registered are skipped.
|
||||
|
||||
Note: pandas-ta's ta.import_dir() requires a category-based directory
|
||||
structure (e.g. tmpdir/momentum/trendflex.py) plus a companion
|
||||
``{name}_method`` function. Our indicators don't follow that convention,
|
||||
so we bind directly instead.
|
||||
"""
|
||||
try:
|
||||
import pandas_ta as ta
|
||||
except ImportError:
|
||||
log.warning("pandas-ta not available — custom indicators will not be registered")
|
||||
return
|
||||
|
||||
src_dir = data_dir / "src"
|
||||
indicator_root = src_dir / "indicator"
|
||||
if not indicator_root.exists():
|
||||
return
|
||||
|
||||
import importlib.util
|
||||
import types
|
||||
|
||||
# Track which sanitized names we've seen to handle duplicate directories
|
||||
# (e.g. "TrendFlex" and "trendflex" both sanitise to "custom_trendflex").
|
||||
seen: set[str] = set()
|
||||
registered = 0
|
||||
|
||||
# Sort so that exact-lowercase names (e.g. "trendflex") come before mixed-case
|
||||
# variants (e.g. "TrendFlex") — when duplicates exist the lowercase one wins.
|
||||
for ind_dir in sorted(indicator_root.iterdir(), key=lambda p: (p.name != p.name.lower(), p.name.lower())):
|
||||
if not ind_dir.is_dir():
|
||||
continue
|
||||
impl = ind_dir / "implementation.py"
|
||||
if not impl.exists():
|
||||
continue
|
||||
|
||||
sanitized = ind_dir.name.lower().replace("-", "_").replace(" ", "_")
|
||||
ta_name = f"custom_{sanitized}"
|
||||
|
||||
if ta_name in seen:
|
||||
log.warning(
|
||||
"Duplicate custom indicator name '%s' from directory '%s' — skipping",
|
||||
ta_name, ind_dir.name,
|
||||
)
|
||||
continue
|
||||
seen.add(ta_name)
|
||||
|
||||
# Skip if already bound (e.g. called multiple times in a process)
|
||||
if getattr(ta, ta_name, None) is not None:
|
||||
continue
|
||||
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(ta_name, impl)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module) # type: ignore[union-attr]
|
||||
|
||||
# Find the callable: prefer the function whose name matches the
|
||||
# sanitized directory name, fall back to any top-level function.
|
||||
fn = getattr(module, sanitized, None)
|
||||
if fn is None:
|
||||
candidates = [
|
||||
v for k, v in vars(module).items()
|
||||
if isinstance(v, types.FunctionType) and not k.startswith("_")
|
||||
]
|
||||
fn = candidates[0] if candidates else None
|
||||
|
||||
if fn is None:
|
||||
log.warning("No callable found in %s — skipping", impl)
|
||||
continue
|
||||
|
||||
setattr(ta, ta_name, fn)
|
||||
registered += 1
|
||||
log.debug("Registered custom indicator '%s' from %s", ta_name, impl)
|
||||
|
||||
except Exception:
|
||||
log.warning("Could not register indicator '%s':", ind_dir.name, exc_info=True)
|
||||
|
||||
if registered > 0:
|
||||
log.info("Registered %d custom indicator(s) with pandas-ta", registered)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Category File Manager
|
||||
# =============================================================================
|
||||
|
||||
class CategoryFileManager:
|
||||
"""
|
||||
Manages category-based file operations with validation.
|
||||
Manages category-based file operations with validation and git revision tracking.
|
||||
Category files live under {data_dir}/src/ which is the git repo root.
|
||||
Workspace and other ephemeral data remain under {data_dir}/ but outside the repo.
|
||||
"""
|
||||
|
||||
def __init__(self, data_dir: Path):
|
||||
self.data_dir = data_dir
|
||||
|
||||
# Ensure category directories exist
|
||||
for category in Category:
|
||||
(data_dir / category.value).mkdir(parents=True, exist_ok=True)
|
||||
src = self.src_dir
|
||||
src.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Migrate: move existing top-level category dirs into src/ (one-time)
|
||||
for cat in Category:
|
||||
old = data_dir / cat.value
|
||||
new = src / cat.value
|
||||
if old.exists() and not new.exists():
|
||||
old.rename(new)
|
||||
log.info(f"Migrated {old} → {new}")
|
||||
else:
|
||||
new.mkdir(exist_ok=True)
|
||||
|
||||
# Init git repo in src/
|
||||
self.git = GitManager(src)
|
||||
self.git.ensure_init()
|
||||
|
||||
@property
|
||||
def src_dir(self) -> Path:
|
||||
"""Root of the versioned category code (git repo root)."""
|
||||
return self.data_dir / "src"
|
||||
|
||||
def write(
|
||||
self,
|
||||
@@ -191,7 +452,7 @@ class CategoryFileManager:
|
||||
}
|
||||
|
||||
# Get item directory
|
||||
item_dir = get_category_path(self.data_dir, cat, name)
|
||||
item_dir = get_category_path(self.src_dir, cat, name)
|
||||
item_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write implementation
|
||||
@@ -228,11 +489,19 @@ class CategoryFileManager:
|
||||
"validation": validation,
|
||||
}
|
||||
|
||||
# Auto-execute research scripts after successful write
|
||||
if cat == Category.RESEARCH and validation["success"]:
|
||||
log.info(f"Auto-executing research script: {name}")
|
||||
execution_result = self.execute_research(name)
|
||||
result["execution"] = execution_result
|
||||
# Auto-execute after successful write to give the agent immediate runtime feedback
|
||||
if validation["success"]:
|
||||
if cat == Category.RESEARCH:
|
||||
log.info(f"Auto-executing research script: {name}")
|
||||
result["execution"] = self.execute_research(name)
|
||||
elif cat == Category.INDICATOR:
|
||||
log.info(f"Auto-executing indicator test: {name}")
|
||||
result["execution"] = self._execute_indicator(item_dir)
|
||||
|
||||
# Commit to git
|
||||
commit_hash = self.git.commit(f"create({category}): {name}")
|
||||
if commit_hash:
|
||||
result["revision"] = commit_hash
|
||||
|
||||
return result
|
||||
|
||||
@@ -241,6 +510,7 @@ class CategoryFileManager:
|
||||
category: str,
|
||||
name: str,
|
||||
code: Optional[str] = None,
|
||||
patches: Optional[list[dict]] = None,
|
||||
description: Optional[str] = None,
|
||||
metadata: Optional[dict] = None
|
||||
) -> dict[str, Any]:
|
||||
@@ -250,7 +520,8 @@ class CategoryFileManager:
|
||||
Args:
|
||||
category: Category name
|
||||
name: Display name for the item
|
||||
code: Python implementation code (optional, omit to keep existing)
|
||||
code: Full Python implementation code to replace existing (optional)
|
||||
patches: List of {old_string, new_string} replacements (optional, preferred for small changes)
|
||||
description: Updated description (optional, omit to keep existing)
|
||||
metadata: Additional metadata updates (optional)
|
||||
|
||||
@@ -261,12 +532,15 @@ class CategoryFileManager:
|
||||
- validation: dict - results from test harness (if code updated)
|
||||
- error: str (if any)
|
||||
"""
|
||||
if code is not None and patches is not None:
|
||||
return {"success": False, "error": "Provide either 'code' or 'patches', not both"}
|
||||
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {"success": False, "error": f"Invalid category '{category}'"}
|
||||
|
||||
item_dir = get_category_path(self.data_dir, cat, name)
|
||||
item_dir = get_category_path(self.src_dir, cat, name)
|
||||
|
||||
if not item_dir.exists():
|
||||
return {"success": False, "error": f"Item '{name}' does not exist in category '{category}'"}
|
||||
@@ -282,8 +556,28 @@ class CategoryFileManager:
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to read existing metadata: {e}"}
|
||||
|
||||
# Update code if provided
|
||||
if code is not None:
|
||||
# Apply string-replacement patches if provided
|
||||
if patches is not None:
|
||||
if not impl_path.exists():
|
||||
return {"success": False, "error": "Cannot patch: implementation file does not exist"}
|
||||
try:
|
||||
current_code = impl_path.read_text()
|
||||
for i, patch in enumerate(patches):
|
||||
old = patch.get("old_string", "")
|
||||
new = patch.get("new_string", "")
|
||||
if old not in current_code:
|
||||
return {"success": False, "error": f"Patch {i}: old_string not found in file"}
|
||||
if current_code.count(old) > 1:
|
||||
return {"success": False, "error": f"Patch {i}: old_string is not unique — add more surrounding context"}
|
||||
current_code = current_code.replace(old, new, 1)
|
||||
impl_path.write_text(current_code)
|
||||
log.info(f"Applied {len(patches)} patch(es) to {impl_path}")
|
||||
code = current_code # trigger validation below
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Failed to apply patches: {e}"}
|
||||
|
||||
# Update code if provided (full replace)
|
||||
if code is not None and patches is None:
|
||||
try:
|
||||
impl_path.write_text(code)
|
||||
log.info(f"Updated {cat.value} implementation: {impl_path}")
|
||||
@@ -321,11 +615,21 @@ class CategoryFileManager:
|
||||
result["validation"] = validation
|
||||
result["success"] = validation["success"]
|
||||
|
||||
# Auto-execute research scripts after successful edit (if code was updated)
|
||||
if cat == Category.RESEARCH and code is not None and result["success"]:
|
||||
log.info(f"Auto-executing research script after edit: {name}")
|
||||
execution_result = self.execute_research(name)
|
||||
result["execution"] = execution_result
|
||||
# Auto-execute after successful edit to give the agent immediate runtime feedback
|
||||
if code is not None and result["success"]:
|
||||
if cat == Category.RESEARCH:
|
||||
log.info(f"Auto-executing research script after edit: {name}")
|
||||
result["execution"] = self.execute_research(name)
|
||||
elif cat == Category.INDICATOR:
|
||||
log.info(f"Auto-executing indicator test after edit: {name}")
|
||||
result["execution"] = self._execute_indicator(item_dir)
|
||||
|
||||
# Commit to git if code changed
|
||||
if code is not None and result["success"]:
|
||||
action = "patch" if patches is not None else "edit"
|
||||
commit_hash = self.git.commit(f"{action}({category}): {name}")
|
||||
if commit_hash:
|
||||
result["revision"] = commit_hash
|
||||
|
||||
return result
|
||||
|
||||
@@ -349,7 +653,7 @@ class CategoryFileManager:
|
||||
except ValueError:
|
||||
return {"exists": False, "error": f"Invalid category '{category}'"}
|
||||
|
||||
item_dir = get_category_path(self.data_dir, cat, name)
|
||||
item_dir = get_category_path(self.src_dir, cat, name)
|
||||
|
||||
if not item_dir.exists():
|
||||
return {"exists": False}
|
||||
@@ -385,7 +689,7 @@ class CategoryFileManager:
|
||||
except ValueError:
|
||||
return {"error": f"Invalid category '{category}'"}
|
||||
|
||||
cat_dir = self.data_dir / cat.value
|
||||
cat_dir = self.src_dir / cat.value
|
||||
items = []
|
||||
|
||||
for item_dir in cat_dir.iterdir():
|
||||
@@ -487,33 +791,58 @@ class CategoryFileManager:
|
||||
|
||||
def _validate_indicator(self, impl_path: Path) -> dict[str, Any]:
|
||||
"""
|
||||
Validate an indicator implementation.
|
||||
Validate an indicator by running it against synthetic OHLC data.
|
||||
|
||||
Runs basic syntax check and imports.
|
||||
Uses indicator_harness.py in a subprocess so the indicator code is
|
||||
isolated from the MCP server process. Catches import errors, runtime
|
||||
errors, and wrong return types — not just syntax.
|
||||
"""
|
||||
meta_path = impl_path.parent / "metadata.json"
|
||||
return self._execute_indicator(impl_path.parent, timeout=30)
|
||||
|
||||
def _execute_indicator(self, item_dir: Path, timeout: int = 30) -> dict[str, Any]:
|
||||
"""
|
||||
Run an indicator against synthetic OHLC data via indicator_harness.py.
|
||||
|
||||
Returns:
|
||||
dict with success, output (human-readable summary), error
|
||||
"""
|
||||
impl_path = item_dir / "implementation.py"
|
||||
meta_path = item_dir / "metadata.json"
|
||||
|
||||
if not impl_path.exists():
|
||||
return {"success": False, "error": "implementation.py not found"}
|
||||
if not meta_path.exists():
|
||||
return {"success": False, "error": "metadata.json not found"}
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "py_compile", str(impl_path)],
|
||||
[sys.executable, str(_INDICATOR_HARNESS), str(impl_path), str(meta_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
timeout=timeout,
|
||||
cwd=str(item_dir),
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
return {
|
||||
"success": True,
|
||||
"output": "Indicator syntax valid",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"output": result.stderr,
|
||||
"error": "Syntax error in indicator",
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"success": False, "error": "Validation timeout"}
|
||||
return {"success": False, "error": f"Indicator test timed out after {timeout}s"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"Validation failed: {e}"}
|
||||
return {"success": False, "error": f"Harness launch failed: {e}"}
|
||||
|
||||
if result.returncode != 0:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Harness process failed:\n{result.stderr}",
|
||||
}
|
||||
|
||||
try:
|
||||
data = json.loads(result.stdout)
|
||||
except json.JSONDecodeError:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Harness produced invalid JSON:\n{result.stdout[:500]}",
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
def _run_research_harness(self, impl_path: Path, item_dir: Path, timeout: int = 30) -> dict[str, Any]:
|
||||
"""
|
||||
@@ -594,7 +923,7 @@ class CategoryFileManager:
|
||||
- content: list of TextContent and ImageContent objects (MCP format)
|
||||
- error: str (if any)
|
||||
"""
|
||||
item_dir = get_category_path(self.data_dir, Category.RESEARCH, name)
|
||||
item_dir = get_category_path(self.src_dir, Category.RESEARCH, name)
|
||||
|
||||
if not item_dir.exists():
|
||||
return {"error": f"Research script '{name}' does not exist"}
|
||||
@@ -654,6 +983,66 @@ class CategoryFileManager:
|
||||
return {"content": content}
|
||||
|
||||
|
||||
def git_log(
|
||||
self,
|
||||
category: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
limit: int = 20
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List recent git commits, optionally filtered to a category or item.
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- commits: list of {hash, short_hash, message, date}
|
||||
"""
|
||||
path = None
|
||||
if category:
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {"success": False, "error": f"Invalid category '{category}'"}
|
||||
if name:
|
||||
path = get_category_path(self.src_dir, cat, name)
|
||||
else:
|
||||
path = self.src_dir / cat.value
|
||||
entries = self.git.log(path=path, n=limit)
|
||||
return {"success": True, "commits": entries}
|
||||
|
||||
def git_revert(self, revision: str, category: str, name: str) -> dict[str, Any]:
|
||||
"""
|
||||
Restore a category item to a previous git revision (creates a new commit).
|
||||
|
||||
Returns:
|
||||
dict with:
|
||||
- success: bool
|
||||
- revision: str - new commit hash
|
||||
- validation: dict
|
||||
- error: str (if any)
|
||||
"""
|
||||
try:
|
||||
cat = Category(category)
|
||||
except ValueError:
|
||||
return {"success": False, "error": f"Invalid category '{category}'"}
|
||||
|
||||
item_dir = get_category_path(self.src_dir, cat, name)
|
||||
if not item_dir.exists():
|
||||
return {"success": False, "error": f"Item '{name}' not found in '{category}'"}
|
||||
|
||||
try:
|
||||
commit_hash = self.git.restore(revision, path=item_dir)
|
||||
except RuntimeError as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
validation = self._validate(cat, item_dir)
|
||||
return {
|
||||
"success": validation["success"],
|
||||
"revision": commit_hash,
|
||||
"validation": validation,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Global Manager Instance
|
||||
# =============================================================================
|
||||
@@ -3,7 +3,7 @@
|
||||
Research script harness - runs implementation.py in a subprocess with API
|
||||
initialization, stdout/stderr capture, and matplotlib figure capture.
|
||||
|
||||
This file is written to disk and invoked by category_tools.py rather than
|
||||
This file is written to disk and invoked by python_tools.py rather than
|
||||
being passed inline via `python -c`, so the harness code is inspectable and
|
||||
not regenerated on every call.
|
||||
|
||||
@@ -77,6 +77,16 @@ try:
|
||||
except Exception as e:
|
||||
print(f"WARNING: API initialization failed: {e}", file=sys.stderr)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Register custom indicators so research scripts can use df.ta.my_indicator()
|
||||
# ---------------------------------------------------------------------------
|
||||
try:
|
||||
from dexorder.tools.python_tools import setup_custom_indicators
|
||||
_data_dir = Path(os.environ.get("DATA_DIR", "/app/data"))
|
||||
setup_custom_indicators(_data_dir)
|
||||
except Exception as e:
|
||||
print(f"WARNING: Custom indicator registration failed: {e}", file=sys.stderr)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
|
||||
@@ -43,6 +43,23 @@ class WorkspaceStore:
|
||||
# Map of "store_name/json/pointer/path" -> list of callbacks
|
||||
self._triggers: dict[str, list[Callable[[Any, Any], None]]] = {}
|
||||
|
||||
def _ensure_intermediate_paths(self, state: dict, patch: list[dict]) -> dict:
|
||||
"""Create missing intermediate objects for deep patch paths (mirrors gateway logic)."""
|
||||
import copy
|
||||
state = copy.deepcopy(state)
|
||||
for op in patch:
|
||||
if op.get("op") not in ("add", "replace"):
|
||||
continue
|
||||
parts = [p for p in op.get("path", "").split("/") if p]
|
||||
if len(parts) <= 1:
|
||||
continue
|
||||
current = state
|
||||
for part in parts[:-1]:
|
||||
if not isinstance(current.get(part), dict):
|
||||
current[part] = {}
|
||||
current = current[part]
|
||||
return state
|
||||
|
||||
def _store_path(self, store_name: str) -> Path:
|
||||
"""Get the filesystem path for a store."""
|
||||
# Sanitize store name to prevent directory traversal
|
||||
@@ -136,6 +153,9 @@ class WorkspaceStore:
|
||||
with open(path, "r") as f:
|
||||
old_state = json.load(f)
|
||||
|
||||
# Create missing intermediate objects for deep paths (mirrors gateway logic)
|
||||
old_state = self._ensure_intermediate_paths(old_state, patch)
|
||||
|
||||
# Apply patch
|
||||
new_state = jsonpatch.apply_patch(old_state, patch)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user