chore: remove unused substreams protobuf files (#171)

This commit is contained in:
Louise Poole
2025-03-06 12:07:29 +02:00
committed by GitHub
parent f13c1c263b
commit ae10195bca
9 changed files with 0 additions and 684 deletions

View File

@@ -1,36 +0,0 @@
syntax = "proto3";
package sf.substreams.internal.v2;
import "google/protobuf/any.proto";
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/internal/v2;pbssinternal";
message StoreDeltas {
repeated StoreDelta store_deltas = 1;
}
message StoreDelta {
enum Operation {
UNSET = 0;
CREATE = 1;
UPDATE = 2;
DELETE = 3;
}
Operation operation = 1;
uint64 ordinal = 2;
string key = 3;
bytes old_value = 4;
bytes new_value = 5;
}
message ModuleOutput {
string module_name = 1;
oneof data {
google.protobuf.Any map_output = 2;
StoreDeltas store_deltas = 3;
}
repeated string logs = 4;
bool debug_logs_truncated = 5;
bool cached = 6;
}

View File

@@ -1,88 +0,0 @@
syntax = "proto3";
package sf.substreams.internal.v2;
import "sf/substreams/v1/modules.proto";
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/internal/v2;pbssinternal";
service Substreams {
rpc ProcessRange(ProcessRangeRequest) returns (stream ProcessRangeResponse);
}
message ProcessRangeRequest {
uint64 start_block_num = 1;
uint64 stop_block_num = 2;
string output_module = 3;
sf.substreams.v1.Modules modules = 4;
uint32 stage = 5; // 0-based index of stage to execute up to
}
message ProcessRangeResponse {
reserved 1; // previously string module_name = 1;
reserved 2; // previously in oneof(type): BlockRange processed_range
reserved 3; // previously in oneof(type): ProcessedBytes processed_bytes
oneof type {
Failed failed = 4;
Completed completed = 5;
Update update = 6;
}
}
message Update {
uint64 duration_ms = 1;
uint64 processed_blocks = 2;
uint64 total_bytes_read = 3;
uint64 total_bytes_written = 4;
repeated ModuleStats modules_stats = 5;
}
message ModuleStats {
string name = 1;
uint64 processing_time_ms = 2;
uint64 store_operation_time_ms = 3;
uint64 store_read_count = 4;
repeated ExternalCallMetric external_call_metrics = 5;
// store-specific (will be 0 on mappers)
uint64 store_write_count = 10;
uint64 store_deleteprefix_count = 11;
uint64 store_size_bytes = 12;
}
message ExternalCallMetric {
string name = 1;
uint64 count = 2;
uint64 time_ms = 3;
}
message Completed {
repeated BlockRange all_processed_ranges = 1;
// TraceId represents the producer's trace id that produced the partial files.
// This is present here so that the consumer can use it to identify the
// right partial files that needs to be squashed together.
//
// The TraceId can be empty in which case it should be assumed by the tier1
// consuming this message that the tier2 that produced those partial files
// is not yet updated to produce a trace id and a such, the tier1 should
// generate a legacy partial file name.
string trace_id = 2;
}
message Failed {
string reason = 1;
repeated string logs = 2;
// FailureLogsTruncated is a flag that tells you if you received all the logs or if they
// were truncated because you logged too much (fixed limit currently is set to 128 KiB).
bool logs_truncated = 3;
}
message BlockRange {
uint64 start_block = 2;
uint64 end_block = 3;
}

View File

@@ -1,20 +0,0 @@
syntax = "proto3";
package sf.substreams;
import "google/protobuf/descriptor.proto";
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams;pbsubstreams";
message FieldOptions {
// this option informs the `substreams pack` command that it should treat the corresponding manifest value as a path to a file, putting its content as bytes in this field.
// must be applied to a `bytes` or `string` field
bool load_from_file = 1;
// this option informs the `substreams pack` command that it should treat the corresponding manifest value as a path to a folder, zipping its content and putting the zip content as bytes in this field.
// must be applied to a `bytes` field
bool zip_from_folder = 2;
}
extend google.protobuf.FieldOptions {
optional FieldOptions options = 2200;
}

View File

@@ -1,235 +0,0 @@
syntax = "proto3";
package sf.substreams.rpc.v2;
import "google/protobuf/any.proto";
import "sf/substreams/v1/clock.proto";
import "sf/substreams/v1/modules.proto";
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/rpc/v2;pbsubstreamsrpc";
service Stream {
rpc Blocks(Request) returns (stream Response);
}
message Request {
int64 start_block_num = 1;
string start_cursor = 2;
uint64 stop_block_num = 3;
// With final_block_only, you only receive blocks that are irreversible:
// 'final_block_height' will be equal to current block and no 'undo_signal' will ever be sent
bool final_blocks_only = 4;
// Substreams has two mode when executing your module(s) either development mode or production
// mode. Development and production modes impact the execution of Substreams, important aspects
// of execution include:
// * The time required to reach the first byte.
// * The speed that large ranges get executed.
// * The module logs and outputs sent back to the client.
//
// By default, the engine runs in developer mode, with richer and deeper output. Differences
// between production and development modes include:
// * Forward parallel execution is enabled in production mode and disabled in development mode
// * The time required to reach the first byte in development mode is faster than in production mode.
//
// Specific attributes of development mode include:
// * The client will receive all of the executed module's logs.
// * It's possible to request specific store snapshots in the execution tree (via `debug_initial_store_snapshot_for_modules`).
// * Multiple module's output is possible.
//
// With production mode`, however, you trade off functionality for high speed enabling forward
// parallel execution of module ahead of time.
bool production_mode = 5;
string output_module = 6;
sf.substreams.v1.Modules modules = 7;
// Available only in developer mode
repeated string debug_initial_store_snapshot_for_modules = 10;
}
message Response {
oneof message {
SessionInit session = 1; // Always sent first
ModulesProgress progress = 2; // Progress of data preparation, before sending in the stream of `data` events.
BlockScopedData block_scoped_data = 3;
BlockUndoSignal block_undo_signal = 4;
Error fatal_error = 5;
// Available only in developer mode, and only if `debug_initial_store_snapshot_for_modules` is set.
InitialSnapshotData debug_snapshot_data = 10;
// Available only in developer mode, and only if `debug_initial_store_snapshot_for_modules` is set.
InitialSnapshotComplete debug_snapshot_complete = 11;
}
}
// BlockUndoSignal informs you that every bit of data
// with a block number above 'last_valid_block' has been reverted
// on-chain. Delete that data and restart from 'last_valid_cursor'
message BlockUndoSignal {
sf.substreams.v1.BlockRef last_valid_block = 1;
string last_valid_cursor = 2;
}
message BlockScopedData {
MapModuleOutput output = 1;
sf.substreams.v1.Clock clock = 2;
string cursor = 3;
// Non-deterministic, allows substreams-sink to let go of their undo data.
uint64 final_block_height = 4;
repeated MapModuleOutput debug_map_outputs = 10;
repeated StoreModuleOutput debug_store_outputs = 11;
}
message SessionInit {
string trace_id = 1;
uint64 resolved_start_block = 2;
uint64 linear_handoff_block = 3;
uint64 max_parallel_workers = 4;
}
message InitialSnapshotComplete {
string cursor = 1;
}
message InitialSnapshotData {
string module_name = 1;
repeated StoreDelta deltas = 2;
uint64 sent_keys = 4;
uint64 total_keys = 3;
}
message MapModuleOutput {
string name = 1;
google.protobuf.Any map_output = 2;
// DebugOutputInfo is available in non-production mode only
OutputDebugInfo debug_info = 10;
}
// StoreModuleOutput are produced for store modules in development mode.
// It is not possible to retrieve store models in production, with parallelization
// enabled. If you need the deltas directly, write a pass through mapper module
// that will get them down to you.
message StoreModuleOutput {
string name = 1;
repeated StoreDelta debug_store_deltas = 2;
OutputDebugInfo debug_info = 10;
}
message OutputDebugInfo {
repeated string logs = 1;
// LogsTruncated is a flag that tells you if you received all the logs or if they
// were truncated because you logged too much (fixed limit currently is set to 128 KiB).
bool logs_truncated = 2;
bool cached = 3;
}
// ModulesProgress is a message that is sent every 500ms
message ModulesProgress {
// previously: repeated ModuleProgress modules = 1;
// these previous `modules` messages were sent in bursts and are not sent anymore.
reserved 1;
// List of jobs running on tier2 servers
repeated Job running_jobs = 2;
// Execution statistics for each module
repeated ModuleStats modules_stats = 3;
// Stages definition and completed block ranges
repeated Stage stages = 4;
ProcessedBytes processed_bytes = 5;
}
message ProcessedBytes {
uint64 total_bytes_read = 1;
uint64 total_bytes_written = 2;
}
message Error {
string module = 1;
string reason = 2;
repeated string logs = 3;
// FailureLogsTruncated is a flag that tells you if you received all the logs or if they
// were truncated because you logged too much (fixed limit currently is set to 128 KiB).
bool logs_truncated = 4;
}
message Job {
uint32 stage = 1;
uint64 start_block = 2;
uint64 stop_block = 3;
uint64 processed_blocks = 4;
uint64 duration_ms = 5;
}
message Stage {
repeated string modules = 1;
repeated BlockRange completed_ranges = 2;
}
// ModuleStats gathers metrics and statistics from each module, running on tier1 or tier2
// All the 'count' and 'time_ms' values may include duplicate for each stage going over that module
message ModuleStats {
// name of the module
string name = 1;
// total_processed_blocks is the sum of blocks sent to that module code
uint64 total_processed_block_count = 2;
// total_processing_time_ms is the sum of all time spent running that module code
uint64 total_processing_time_ms = 3;
//// external_calls are chain-specific intrinsics, like "Ethereum RPC calls".
repeated ExternalCallMetric external_call_metrics = 4;
// total_store_operation_time_ms is the sum of all time spent running that module code waiting for a store operation (ex: read, write, delete...)
uint64 total_store_operation_time_ms = 5;
// total_store_read_count is the sum of all the store Read operations called from that module code
uint64 total_store_read_count = 6;
// total_store_write_count is the sum of all store Write operations called from that module code (store-only)
uint64 total_store_write_count = 10;
// total_store_deleteprefix_count is the sum of all store DeletePrefix operations called from that module code (store-only)
// note that DeletePrefix can be a costly operation on large stores
uint64 total_store_deleteprefix_count = 11;
// store_size_bytes is the uncompressed size of the full KV store for that module, from the last 'merge' operation (store-only)
uint64 store_size_bytes = 12;
// total_store_merging_time_ms is the time spent merging partial stores into a full KV store for that module (store-only)
uint64 total_store_merging_time_ms = 13;
// store_currently_merging is true if there is a merging operation (partial store to full KV store) on the way.
bool store_currently_merging = 14;
// highest_contiguous_block is the highest block in the highest merged full KV store of that module (store-only)
uint64 highest_contiguous_block = 15;
}
message ExternalCallMetric {
string name = 1;
uint64 count = 2;
uint64 time_ms = 3;
}
message StoreDelta {
enum Operation {
UNSET = 0;
CREATE = 1;
UPDATE = 2;
DELETE = 3;
}
Operation operation = 1;
uint64 ordinal = 2;
string key = 3;
bytes old_value = 4;
bytes new_value = 5;
}
message BlockRange {
uint64 start_block = 2;
uint64 end_block = 3;
}

View File

@@ -1,142 +0,0 @@
syntax = "proto3";
package sf.substreams.sink.service.v1;
import "sf/substreams/v1/package.proto";
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/sink/service/v1;pbsinksvc";
service Provider {
rpc Deploy(DeployRequest) returns (DeployResponse);
rpc Update(UpdateRequest) returns (UpdateResponse);
rpc Info(InfoRequest) returns (InfoResponse);
rpc List(ListRequest) returns (ListResponse);
rpc Pause(PauseRequest) returns (PauseResponse);
rpc Stop(StopRequest) returns (StopResponse);
rpc Resume(ResumeRequest) returns (ResumeResponse);
rpc Remove(RemoveRequest) returns (RemoveResponse);
}
message DeployRequest {
sf.substreams.v1.Package substreams_package = 1;
bool development_mode = 2;
repeated Parameter parameters = 3;
}
message Parameter {
string key = 1;
string value = 2;
}
message DeployResponse {
DeploymentStatus status = 1;
// deployment_id is a short name (max 8 characters) that uniquely identifies your deployment
string deployment_id = 2;
map<string, string> services = 3;
string reason = 4;
string motd = 5;
}
message UpdateRequest {
sf.substreams.v1.Package substreams_package = 1;
string deployment_id = 2;
bool reset = 3;
}
message UpdateResponse {
DeploymentStatus status = 1;
map<string, string> services = 2;
string reason = 3;
string motd = 4;
}
message InfoRequest {
string deployment_id = 1;
}
message InfoResponse {
DeploymentStatus status = 1;
map<string, string> services = 2;
string reason = 3;
PackageInfo package_info = 4;
SinkProgress progress = 5;
string motd = 6;
}
message SinkProgress {
uint64 last_processed_block = 1;
}
message PackageInfo {
string name = 1;
string version = 2;
string output_module_name = 3;
string output_module_hash = 4;
}
message ListRequest {}
message ListResponse {
repeated DeploymentWithStatus deployments = 1;
}
message DeploymentWithStatus {
string id = 1;
DeploymentStatus status = 2;
string reason = 3;
PackageInfo package_info = 4;
SinkProgress progress = 5;
string motd = 6;
}
enum DeploymentStatus {
UNKNOWN = 0;
RUNNING = 1;
FAILING = 2;
PAUSED = 3;
STOPPED = 4;
STARTING = 5;
PAUSING = 6;
STOPPING = 7;
REMOVING = 8;
RESUMING = 9;
}
message RemoveRequest {
string deployment_id = 1;
}
message RemoveResponse {
DeploymentStatus previous_status = 1;
}
message PauseRequest {
string deployment_id = 1;
}
message PauseResponse {
DeploymentStatus previous_status = 1;
DeploymentStatus new_status = 2;
}
message StopRequest {
string deployment_id = 1;
}
message StopResponse {
DeploymentStatus previous_status = 1;
DeploymentStatus new_status = 2;
}
message ResumeRequest {
string deployment_id = 1;
}
message ResumeResponse {
DeploymentStatus previous_status = 1;
DeploymentStatus new_status = 2;
}

View File

@@ -1,20 +0,0 @@
syntax = "proto3";
package sf.substreams.v1;
import "google/protobuf/timestamp.proto";
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/v1;pbsubstreams";
// Clock is a pointer to a block with added timestamp
message Clock {
string id = 1;
uint64 number = 2;
google.protobuf.Timestamp timestamp = 3;
}
// BlockRef is a pointer to a block to which we don't know the timestamp
message BlockRef {
string id = 1;
uint64 number = 2;
}

View File

@@ -1,98 +0,0 @@
syntax = "proto3";
package sf.substreams.v1;
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/v1;pbsubstreams";
message Modules {
repeated Module modules = 1;
repeated Binary binaries = 2;
}
// Binary represents some code compiled to its binary form.
message Binary {
string type = 1;
bytes content = 2;
}
message Module {
string name = 1;
oneof kind {
KindMap kind_map = 2;
KindStore kind_store = 3;
}
uint32 binary_index = 4;
string binary_entrypoint = 5;
repeated Input inputs = 6;
Output output = 7;
uint64 initial_block = 8;
message KindMap {
string output_type = 1;
}
message KindStore {
// The `update_policy` determines the functions available to mutate the store
// (like `set()`, `set_if_not_exists()` or `sum()`, etc..) in
// order to ensure that parallel operations are possible and deterministic
//
// Say a store cumulates keys from block 0 to 1M, and a second store
// cumulates keys from block 1M to 2M. When we want to use this
// store as a dependency for a downstream module, we will merge the
// two stores according to this policy.
UpdatePolicy update_policy = 1;
string value_type = 2;
enum UpdatePolicy {
UPDATE_POLICY_UNSET = 0;
// Provides a store where you can `set()` keys, and the latest key wins
UPDATE_POLICY_SET = 1;
// Provides a store where you can `set_if_not_exists()` keys, and the first key wins
UPDATE_POLICY_SET_IF_NOT_EXISTS = 2;
// Provides a store where you can `add_*()` keys, where two stores merge by summing its values.
UPDATE_POLICY_ADD = 3;
// Provides a store where you can `min_*()` keys, where two stores merge by leaving the minimum value.
UPDATE_POLICY_MIN = 4;
// Provides a store where you can `max_*()` keys, where two stores merge by leaving the maximum value.
UPDATE_POLICY_MAX = 5;
// Provides a store where you can `append()` keys, where two stores merge by concatenating the bytes in order.
UPDATE_POLICY_APPEND = 6;
}
}
message Input {
oneof input {
Source source = 1;
Map map = 2;
Store store = 3;
Params params = 4;
}
message Source {
string type = 1; // ex: "sf.ethereum.type.v1.Block"
}
message Map {
string module_name = 1; // ex: "block_to_pairs"
}
message Store {
string module_name = 1;
Mode mode = 2;
enum Mode {
UNSET = 0;
GET = 1;
DELTAS = 2;
}
}
message Params {
string value = 1;
}
}
message Output {
string type = 1;
}
}

View File

@@ -1,42 +0,0 @@
syntax = "proto3";
package sf.substreams.v1;
import "google/protobuf/any.proto";
import "google/protobuf/descriptor.proto";
import "sf/substreams/v1/modules.proto";
option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/v1;pbsubstreams";
message Package {
// Needs to be one so this file can be used _directly_ as a
// buf `Image` andor a ProtoSet for grpcurl and other tools
repeated google.protobuf.FileDescriptorProto proto_files = 1;
reserved 2 to 4; // Reserved for future: in case protosets adds fields
uint64 version = 5;
sf.substreams.v1.Modules modules = 6;
repeated ModuleMetadata module_meta = 7;
repeated PackageMetadata package_meta = 8;
// Source network for Substreams to fetch its data from.
string network = 9;
google.protobuf.Any sink_config = 10;
string sink_module = 11;
// image is the bytes to a JPEG, WebP or PNG file. Max size is 2 MiB
bytes image = 12;
}
message PackageMetadata {
string version = 1;
string url = 2;
string name = 3;
string doc = 4;
}
message ModuleMetadata {
// Corresponds to the index in `Package.metadata.package_meta`
uint64 package_index = 1;
string doc = 2;
}

View File

@@ -4,9 +4,6 @@ Some shared functionality that is used to create tycho substream packages.
## Protobuf Models
Protobuf models are manually synced from the `tycho-indexer` repository whenever they
changed.
To generate the rust structs run the following command from within the root
directory: