329 lines
8.0 KiB
Protocol Buffer
329 lines
8.0 KiB
Protocol Buffer
syntax = "proto3";
|
|
|
|
option java_multiple_files = true;
|
|
option java_package = "com.dexorder.proto";
|
|
|
|
// Request for data ingestion (used in Relay → Ingestor work queue)
|
|
message DataRequest {
|
|
// Unique request ID for tracking
|
|
string request_id = 1;
|
|
|
|
// Type of request
|
|
RequestType type = 2;
|
|
|
|
// Market identifier
|
|
string ticker = 3;
|
|
|
|
// For historical requests
|
|
optional HistoricalParams historical = 4;
|
|
|
|
// For realtime requests
|
|
optional RealtimeParams realtime = 5;
|
|
|
|
// Optional client ID for notification routing (async architecture)
|
|
// Flink uses this to determine notification topic
|
|
optional string client_id = 6;
|
|
|
|
enum RequestType {
|
|
HISTORICAL_OHLC = 0;
|
|
REALTIME_TICKS = 1;
|
|
}
|
|
}
|
|
|
|
message HistoricalParams {
|
|
// Start time (microseconds since epoch)
|
|
uint64 start_time = 1;
|
|
|
|
// End time (microseconds since epoch)
|
|
uint64 end_time = 2;
|
|
|
|
// OHLC period in seconds (e.g., 60 = 1m, 300 = 5m, 3600 = 1h, 86400 = 1d)
|
|
uint32 period_seconds = 3;
|
|
|
|
// Maximum number of candles to return (optional limit)
|
|
optional uint32 limit = 4;
|
|
}
|
|
|
|
message RealtimeParams {
|
|
// Whether to include tick data
|
|
bool include_ticks = 1;
|
|
|
|
// Whether to include aggregated OHLC
|
|
bool include_ohlc = 2;
|
|
|
|
// OHLC periods to generate in seconds (e.g., [60, 300, 900] for 1m, 5m, 15m)
|
|
repeated uint32 ohlc_period_seconds = 3;
|
|
}
|
|
|
|
// Control messages for ingestors (Flink → Ingestor control channel)
|
|
message IngestorControl {
|
|
// Control action type
|
|
ControlAction action = 1;
|
|
|
|
// Request ID to cancel (for CANCEL action)
|
|
optional string request_id = 2;
|
|
|
|
// Configuration updates (for CONFIG_UPDATE action)
|
|
optional IngestorConfig config = 3;
|
|
|
|
enum ControlAction {
|
|
CANCEL = 0; // Cancel a specific request
|
|
SHUTDOWN = 1; // Graceful shutdown signal
|
|
CONFIG_UPDATE = 2; // Update ingestor configuration
|
|
HEARTBEAT = 3; // Keep-alive signal
|
|
}
|
|
}
|
|
|
|
message IngestorConfig {
|
|
// Maximum concurrent requests per ingestor
|
|
optional uint32 max_concurrent = 1;
|
|
|
|
// Request timeout in seconds
|
|
optional uint32 timeout_seconds = 2;
|
|
|
|
// Kafka topic for output
|
|
optional string kafka_topic = 3;
|
|
}
|
|
|
|
// Historical data response from ingestor to Flink (Ingestor → Flink response channel)
|
|
message DataResponse {
|
|
// Request ID this is responding to
|
|
string request_id = 1;
|
|
|
|
// Status of the request
|
|
ResponseStatus status = 2;
|
|
|
|
// Error message if status is not OK
|
|
optional string error_message = 3;
|
|
|
|
// Serialized OHLC data (repeated OHLCV protobuf messages)
|
|
repeated bytes ohlc_data = 4;
|
|
|
|
// Total number of candles returned
|
|
uint32 total_records = 5;
|
|
|
|
enum ResponseStatus {
|
|
OK = 0;
|
|
NOT_FOUND = 1;
|
|
ERROR = 2;
|
|
}
|
|
}
|
|
|
|
// Client request submission for historical OHLC data (Client → Relay)
|
|
// Relay immediately responds with SubmitResponse containing request_id
|
|
message SubmitHistoricalRequest {
|
|
// Client-generated request ID for tracking
|
|
string request_id = 1;
|
|
|
|
// Market identifier (e.g., "BINANCE:BTC/USDT")
|
|
string ticker = 2;
|
|
|
|
// Start time (microseconds since epoch)
|
|
uint64 start_time = 3;
|
|
|
|
// End time (microseconds since epoch)
|
|
uint64 end_time = 4;
|
|
|
|
// OHLC period in seconds (e.g., 60 = 1m, 300 = 5m, 3600 = 1h)
|
|
uint32 period_seconds = 5;
|
|
|
|
// Optional limit on number of candles
|
|
optional uint32 limit = 6;
|
|
|
|
// Optional client ID for notification routing (e.g., "client-abc-123")
|
|
// Notifications will be published to topic: "RESPONSE:{client_id}"
|
|
optional string client_id = 7;
|
|
}
|
|
|
|
// Immediate response to SubmitHistoricalRequest (Relay → Client)
|
|
message SubmitResponse {
|
|
// Request ID (echoed from request)
|
|
string request_id = 1;
|
|
|
|
// Status of submission
|
|
SubmitStatus status = 2;
|
|
|
|
// Error message if status is not QUEUED
|
|
optional string error_message = 3;
|
|
|
|
// Topic to subscribe to for result notification
|
|
// e.g., "RESPONSE:client-abc-123" or "HISTORY_READY:{request_id}"
|
|
string notification_topic = 4;
|
|
|
|
enum SubmitStatus {
|
|
QUEUED = 0; // Request queued successfully
|
|
DUPLICATE = 1; // Request ID already exists
|
|
INVALID = 2; // Invalid parameters
|
|
ERROR = 3; // Internal error
|
|
}
|
|
}
|
|
|
|
// Historical data ready notification (Flink → Relay → Client via pub/sub)
|
|
// Published after Flink writes data to Iceberg
|
|
message HistoryReadyNotification {
|
|
// Request ID
|
|
string request_id = 1;
|
|
|
|
// Market identifier
|
|
string ticker = 2;
|
|
|
|
// OHLC period in seconds
|
|
uint32 period_seconds = 3;
|
|
|
|
// Start time (microseconds since epoch)
|
|
uint64 start_time = 4;
|
|
|
|
// End time (microseconds since epoch)
|
|
uint64 end_time = 5;
|
|
|
|
// Status of the data fetch
|
|
NotificationStatus status = 6;
|
|
|
|
// Error message if status is not OK
|
|
optional string error_message = 7;
|
|
|
|
// Iceberg table information for client queries
|
|
string iceberg_namespace = 10;
|
|
string iceberg_table = 11;
|
|
|
|
// Number of records written
|
|
uint32 row_count = 12;
|
|
|
|
// Timestamp when data was written (microseconds since epoch)
|
|
uint64 completed_at = 13;
|
|
|
|
enum NotificationStatus {
|
|
OK = 0; // Data successfully written to Iceberg
|
|
NOT_FOUND = 1; // No data found for the requested period
|
|
ERROR = 2; // Error during fetch or processing
|
|
TIMEOUT = 3; // Request timed out
|
|
}
|
|
}
|
|
|
|
// Legacy message for backward compatibility (Client → Relay)
|
|
message OHLCRequest {
|
|
// Request ID for tracking
|
|
string request_id = 1;
|
|
|
|
// Market identifier
|
|
string ticker = 2;
|
|
|
|
// Start time (microseconds since epoch)
|
|
uint64 start_time = 3;
|
|
|
|
// End time (microseconds since epoch)
|
|
uint64 end_time = 4;
|
|
|
|
// OHLC period in seconds (e.g., 60 = 1m, 300 = 5m, 3600 = 1h)
|
|
uint32 period_seconds = 5;
|
|
|
|
// Optional limit on number of candles
|
|
optional uint32 limit = 6;
|
|
}
|
|
|
|
// Generic response for any request (Flink → Client)
|
|
message Response {
|
|
// Request ID this is responding to
|
|
string request_id = 1;
|
|
|
|
// Status of the request
|
|
ResponseStatus status = 2;
|
|
|
|
// Error message if status is not OK
|
|
optional string error_message = 3;
|
|
|
|
// Generic payload data (serialized protobuf messages)
|
|
repeated bytes data = 4;
|
|
|
|
// Total number of records
|
|
optional uint32 total_records = 5;
|
|
|
|
// Whether this is the final response (for paginated results)
|
|
bool is_final = 6;
|
|
|
|
enum ResponseStatus {
|
|
OK = 0;
|
|
NOT_FOUND = 1;
|
|
ERROR = 2;
|
|
}
|
|
}
|
|
|
|
// CEP trigger registration (Client → Flink)
|
|
message CEPTriggerRequest {
|
|
// Unique trigger ID
|
|
string trigger_id = 1;
|
|
|
|
// Flink SQL CEP pattern/condition
|
|
string sql_pattern = 2;
|
|
|
|
// Markets to monitor
|
|
repeated string tickers = 3;
|
|
|
|
// Callback endpoint (for DEALER/ROUTER routing)
|
|
optional string callback_id = 4;
|
|
|
|
// Optional parameters for the CEP query
|
|
map<string, string> parameters = 5;
|
|
}
|
|
|
|
// CEP trigger acknowledgment (Flink → Client)
|
|
message CEPTriggerAck {
|
|
// Trigger ID being acknowledged
|
|
string trigger_id = 1;
|
|
|
|
// Status of registration
|
|
TriggerStatus status = 2;
|
|
|
|
// Error message if status is not OK
|
|
optional string error_message = 3;
|
|
|
|
enum TriggerStatus {
|
|
REGISTERED = 0;
|
|
ALREADY_REGISTERED = 1;
|
|
INVALID_SQL = 2;
|
|
ERROR = 3;
|
|
}
|
|
}
|
|
|
|
// CEP trigger event callback (Flink → Client)
|
|
message CEPTriggerEvent {
|
|
// Trigger ID that fired
|
|
string trigger_id = 1;
|
|
|
|
// Timestamp when trigger fired (microseconds since epoch)
|
|
uint64 timestamp = 2;
|
|
|
|
// Schema information for the result rows
|
|
ResultSchema schema = 3;
|
|
|
|
// Result rows from the Flink SQL query
|
|
repeated ResultRow rows = 4;
|
|
|
|
// Additional context from the CEP pattern
|
|
map<string, string> context = 5;
|
|
}
|
|
|
|
message ResultSchema {
|
|
// Column names in order
|
|
repeated string column_names = 1;
|
|
|
|
// Column types (using Flink SQL type names)
|
|
repeated string column_types = 2;
|
|
}
|
|
|
|
message ResultRow {
|
|
// Encoded row data (one bytes field per column, in schema order)
|
|
// Each value is encoded as a protobuf-serialized FieldValue
|
|
repeated bytes values = 1;
|
|
}
|
|
|
|
message FieldValue {
|
|
oneof value {
|
|
string string_val = 1;
|
|
int64 int_val = 2;
|
|
double double_val = 3;
|
|
bool bool_val = 4;
|
|
bytes bytes_val = 5;
|
|
uint64 timestamp_val = 6;
|
|
}
|
|
} |