Files
ai/test/history_client/client_ohlc_api.py
2026-03-11 18:47:11 -04:00

127 lines
4.0 KiB
Python
Executable File

#!/usr/bin/env python3
"""
Simple test client using the high-level OHLCClient API.
Demonstrates smart caching - checks Iceberg first, requests missing data automatically.
"""
import asyncio
import sys
import os
from datetime import datetime, timezone
# Add client library to path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../client-py'))
from dexorder import OHLCClient
async def main():
"""
Test the high-level OHLC client API with smart caching.
"""
print("=== DexOrder OHLC Client Test ===\n")
# Initialize client
client = OHLCClient(
iceberg_catalog_uri="http://localhost:8181",
relay_endpoint="tcp://localhost:5559", # Client request port
notification_endpoint="tcp://localhost:5558", # Market data pub port
namespace="trading",
s3_endpoint="http://localhost:9000", # Port-forwarded MinIO
s3_access_key="minio",
s3_secret_key="minio123",
)
try:
# Start background notification listener
await client.start()
print("✅ Client started\n")
# Request parameters
ticker = "BINANCE:BTC/USDT"
period_seconds = 3600 # 1-hour candles
# Request 7 days of data (Jan 1-7, 2026)
start_time_us = 1735689600 * 1_000_000 # Jan 1, 2026 00:00:00 UTC
end_time_us = 1736294399 * 1_000_000 # Jan 7, 2026 23:59:59 UTC
start_dt = datetime.fromtimestamp(start_time_us / 1_000_000, tz=timezone.utc)
end_dt = datetime.fromtimestamp(end_time_us / 1_000_000, tz=timezone.utc)
print(f"Requesting data:")
print(f" Ticker: {ticker}")
print(f" Period: {period_seconds}s ({period_seconds // 3600}h)")
print(f" Start: {start_dt.isoformat()}")
print(f" End: {end_dt.isoformat()}")
print(f" Expected candles: ~{(end_time_us - start_time_us) // (period_seconds * 1_000_000)}")
print()
# Fetch OHLC data (automatically handles caching)
print("⏳ Fetching data (checking cache, requesting if needed)...\n")
df = await client.fetch_ohlc(
ticker=ticker,
period_seconds=period_seconds,
start_time=start_time_us,
end_time=end_time_us,
request_timeout=60.0
)
# Display results
print(f"✅ Success! Fetched {len(df)} candles\n")
if not df.empty:
print("First 5 candles:")
print(df[['timestamp', 'open', 'high', 'low', 'close', 'volume']].head())
print()
print("Last 5 candles:")
print(df[['timestamp', 'open', 'high', 'low', 'close', 'volume']].tail())
print()
# Data quality check
expected_count = (end_time_us - start_time_us) // (period_seconds * 1_000_000)
actual_count = len(df)
coverage = (actual_count / expected_count) * 100 if expected_count > 0 else 0
print(f"Data coverage: {coverage:.1f}% ({actual_count}/{expected_count} candles)")
if coverage < 100:
print(f"⚠️ Missing {expected_count - actual_count} candles")
else:
print("✅ Complete data coverage")
else:
print("⚠️ No data returned")
except asyncio.TimeoutError:
print("\n❌ Request timed out")
print("Possible reasons:")
print(" - Ingestor still fetching from exchange")
print(" - Flink processing backlog")
print(" - Network issues")
except ValueError as e:
print(f"\n❌ Request failed: {e}")
except ConnectionError as e:
print(f"\n❌ Connection error: {e}")
print("Make sure relay and Flink are running")
except KeyboardInterrupt:
print("\n\n⚠️ Interrupted by user")
except Exception as e:
print(f"\n❌ Unexpected error: {e}")
import traceback
traceback.print_exc()
finally:
await client.stop()
print("\n🔌 Client stopped")
if __name__ == '__main__':
asyncio.run(main())