backend redesign
This commit is contained in:
179
backend.old/src/indicator/__init__.py
Normal file
179
backend.old/src/indicator/__init__.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
Composable Indicator System.
|
||||
|
||||
Provides a framework for building DAGs of data transformation pipelines
|
||||
that process time-series data incrementally. Indicators can consume
|
||||
DataSources or other Indicators as inputs, composing into arbitrarily
|
||||
complex processing graphs.
|
||||
|
||||
Key Components:
|
||||
---------------
|
||||
|
||||
Indicator (base.py):
|
||||
Abstract base class for all indicator implementations.
|
||||
Declares input/output schemas and implements synchronous compute().
|
||||
|
||||
IndicatorRegistry (registry.py):
|
||||
Central catalog of available indicators with rich metadata
|
||||
for AI agent discovery and tool generation.
|
||||
|
||||
Pipeline (pipeline.py):
|
||||
Execution engine that builds DAGs, resolves dependencies,
|
||||
and orchestrates incremental data flow through indicator chains.
|
||||
|
||||
Schema Types (schema.py):
|
||||
Type definitions for input/output schemas, computation context,
|
||||
and metadata for AI-native documentation.
|
||||
|
||||
Usage Example:
|
||||
--------------
|
||||
|
||||
from indicator import Indicator, IndicatorRegistry, Pipeline
|
||||
from indicator.schema import (
|
||||
InputSchema, OutputSchema, ComputeContext, ComputeResult,
|
||||
IndicatorMetadata, IndicatorParameter
|
||||
)
|
||||
|
||||
# Define an indicator
|
||||
class SimpleMovingAverage(Indicator):
|
||||
@classmethod
|
||||
def get_metadata(cls):
|
||||
return IndicatorMetadata(
|
||||
name="SMA",
|
||||
display_name="Simple Moving Average",
|
||||
description="Arithmetic mean of prices over N periods",
|
||||
category="trend",
|
||||
parameters=[
|
||||
IndicatorParameter(
|
||||
name="period",
|
||||
type="int",
|
||||
description="Number of periods to average",
|
||||
default=20,
|
||||
min_value=1
|
||||
)
|
||||
],
|
||||
tags=["moving-average", "trend-following"]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_input_schema(cls):
|
||||
return InputSchema(
|
||||
required_columns=[
|
||||
ColumnInfo(name="close", type="float", description="Closing price")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_output_schema(cls, **params):
|
||||
return OutputSchema(
|
||||
columns=[
|
||||
ColumnInfo(
|
||||
name="sma",
|
||||
type="float",
|
||||
description=f"Simple moving average over {params.get('period', 20)} periods"
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
def compute(self, context: ComputeContext) -> ComputeResult:
|
||||
period = self.params["period"]
|
||||
closes = context.get_column("close")
|
||||
times = context.get_times()
|
||||
|
||||
sma_values = []
|
||||
for i in range(len(closes)):
|
||||
if i < period - 1:
|
||||
sma_values.append(None)
|
||||
else:
|
||||
window = closes[i - period + 1 : i + 1]
|
||||
sma_values.append(sum(window) / period)
|
||||
|
||||
return ComputeResult(
|
||||
data=[
|
||||
{"time": times[i], "sma": sma_values[i]}
|
||||
for i in range(len(times))
|
||||
]
|
||||
)
|
||||
|
||||
# Register the indicator
|
||||
registry = IndicatorRegistry()
|
||||
registry.register(SimpleMovingAverage)
|
||||
|
||||
# Create a pipeline
|
||||
pipeline = Pipeline(datasource_registry)
|
||||
pipeline.add_datasource("price_data", "ccxt", "BTC/USD", "1D")
|
||||
|
||||
sma_indicator = registry.create_instance("SMA", "sma_20", period=20)
|
||||
pipeline.add_indicator("sma_20", sma_indicator, input_node_ids=["price_data"])
|
||||
|
||||
# Execute
|
||||
results = pipeline.execute(datasource_data={"price_data": price_bars})
|
||||
sma_output = results["sma_20"] # Contains columns: time, close, sma_20_sma
|
||||
|
||||
Design Philosophy:
|
||||
------------------
|
||||
|
||||
1. **Schema-based composition**: Indicators declare inputs/outputs via schemas,
|
||||
enabling automatic validation and flexible composition.
|
||||
|
||||
2. **Synchronous execution**: All computation is synchronous for simplicity.
|
||||
Async handling happens at the event/strategy layer.
|
||||
|
||||
3. **Incremental updates**: Indicators receive context about what changed,
|
||||
allowing optimized recomputation of only affected values.
|
||||
|
||||
4. **AI-native metadata**: Rich descriptions, use cases, and parameter specs
|
||||
make indicators discoverable and usable by AI agents.
|
||||
|
||||
5. **Generic data flow**: Indicators work with any data source that matches
|
||||
their input schema, not specific DataSource instances.
|
||||
|
||||
6. **Event-driven**: Designed to react to DataSource updates and propagate
|
||||
changes through the DAG efficiently.
|
||||
"""
|
||||
|
||||
from .base import DataSourceAdapter, Indicator
|
||||
from .pipeline import Pipeline, PipelineNode
|
||||
from .registry import IndicatorRegistry
|
||||
from .schema import (
|
||||
ComputeContext,
|
||||
ComputeResult,
|
||||
IndicatorMetadata,
|
||||
IndicatorParameter,
|
||||
InputSchema,
|
||||
OutputSchema,
|
||||
)
|
||||
from .talib_adapter import (
|
||||
TALibIndicator,
|
||||
register_all_talib_indicators,
|
||||
is_talib_available,
|
||||
get_talib_version,
|
||||
)
|
||||
from .custom_indicators import (
|
||||
register_custom_indicators,
|
||||
CUSTOM_INDICATORS,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Core classes
|
||||
"Indicator",
|
||||
"IndicatorRegistry",
|
||||
"Pipeline",
|
||||
"PipelineNode",
|
||||
"DataSourceAdapter",
|
||||
# Schema types
|
||||
"InputSchema",
|
||||
"OutputSchema",
|
||||
"ComputeContext",
|
||||
"ComputeResult",
|
||||
"IndicatorMetadata",
|
||||
"IndicatorParameter",
|
||||
# TA-Lib integration
|
||||
"TALibIndicator",
|
||||
"register_all_talib_indicators",
|
||||
"is_talib_available",
|
||||
"get_talib_version",
|
||||
# Custom indicators
|
||||
"register_custom_indicators",
|
||||
"CUSTOM_INDICATORS",
|
||||
]
|
||||
Reference in New Issue
Block a user