feat(substreams): add substreams for Uniswap v2 and v3

This commit is contained in:
zizou
2024-10-11 12:57:34 +02:00
parent 58455a1188
commit 73d48236ba
70 changed files with 16697 additions and 1 deletions

View File

@@ -0,0 +1,99 @@
use std::str::FromStr;
use ethabi::ethereum_types::Address;
use serde::Deserialize;
use substreams::prelude::BigInt;
use substreams_ethereum::pb::eth::v2::{self as eth};
use substreams_helper::{event_handler::EventHandler, hex::Hexable};
use crate::{
abi::factory::events::PairCreated,
pb::tycho::evm::v1::{
Attribute, Block, BlockChanges, ChangeType, EntityChanges, FinancialType,
ImplementationType, ProtocolComponent, ProtocolType, Transaction, TransactionChanges,
},
};
#[derive(Debug, Deserialize)]
struct Params {
factory_address: String,
protocol_type_name: String,
}
#[substreams::handlers::map]
pub fn map_pools_created(
params: String,
block: eth::Block,
) -> Result<BlockChanges, substreams::errors::Error> {
let mut new_pools: Vec<TransactionChanges> = vec![];
let params: Params = serde_qs::from_str(params.as_str()).expect("Unable to deserialize params");
get_pools(&block, &mut new_pools, &params);
let tycho_block: Block = block.into();
Ok(BlockChanges { block: Some(tycho_block), changes: new_pools })
}
fn get_pools(block: &eth::Block, new_pools: &mut Vec<TransactionChanges>, params: &Params) {
// Extract new pools from PairCreated events
let mut on_pair_created = |event: PairCreated, _tx: &eth::TransactionTrace, _log: &eth::Log| {
let tycho_tx: Transaction = _tx.into();
new_pools.push(TransactionChanges {
tx: Some(tycho_tx.clone()),
contract_changes: vec![],
entity_changes: vec![EntityChanges {
component_id: event.pair.to_hex(),
attributes: vec![
Attribute {
name: "reserve0".to_string(),
value: BigInt::from(0).to_signed_bytes_le(),
change: ChangeType::Creation.into(),
},
Attribute {
name: "reserve1".to_string(),
value: BigInt::from(0).to_signed_bytes_le(),
change: ChangeType::Creation.into(),
},
],
}],
component_changes: vec![ProtocolComponent {
id: event.pair.to_hex(),
tokens: vec![event.token0, event.token1],
contracts: vec![],
static_att: vec![
// Trading Fee is hardcoded to 0.3%, saved as int in bps (basis points)
Attribute {
name: "fee".to_string(),
value: BigInt::from(30).to_signed_bytes_le(),
change: ChangeType::Creation.into(),
},
Attribute {
name: "pool_address".to_string(),
value: event.pair,
change: ChangeType::Creation.into(),
},
],
change: i32::from(ChangeType::Creation),
protocol_type: Some(ProtocolType {
name: params.protocol_type_name.to_string(),
financial_type: FinancialType::Swap.into(),
attribute_schema: vec![],
implementation_type: ImplementationType::Custom.into(),
}),
tx: Some(tycho_tx),
}],
balance_changes: vec![],
})
};
let mut eh = EventHandler::new(block);
eh.filter_by_address(vec![Address::from_str(&params.factory_address).unwrap()]);
eh.on::<PairCreated, _>(&mut on_pair_created);
eh.handle_events();
}

View File

@@ -0,0 +1,25 @@
use substreams::store::{StoreNew, StoreSetIfNotExists, StoreSetIfNotExistsProto};
use crate::{
pb::tycho::evm::v1::{BlockChanges, ProtocolComponent},
store_key::StoreKey,
};
#[substreams::handlers::store]
pub fn store_pools(
pools_created: BlockChanges,
store: StoreSetIfNotExistsProto<ProtocolComponent>,
) {
// Store pools. Required so the next steps can match any event to a known pool by their address
for change in pools_created.changes {
for new_protocol_component in change.component_changes {
// Use ordinal 0 because the address should be unique, so ordering doesn't matter.
store.set_if_not_exists(
0,
StoreKey::Pool.get_unique_pool_key(&new_protocol_component.id),
&new_protocol_component,
);
}
}
}

View File

@@ -0,0 +1,232 @@
use itertools::Itertools;
use std::collections::HashMap;
use substreams::store::{StoreGet, StoreGetProto};
use substreams_ethereum::pb::eth::v2::{self as eth};
use substreams_helper::{event_handler::EventHandler, hex::Hexable};
use crate::{
abi::pool::events::Sync,
pb::tycho::evm::{
v1,
v1::{
Attribute, BalanceChange, BlockChanges, ChangeType, EntityChanges, ProtocolComponent,
TransactionChanges,
},
},
store_key::StoreKey,
traits::PoolAddresser,
};
// Auxiliary struct to serve as a key for the HashMaps.
#[derive(Clone, Hash, Eq, PartialEq)]
struct ComponentKey<T> {
component_id: String,
name: T,
}
impl<T> ComponentKey<T> {
fn new(component_id: String, name: T) -> Self {
ComponentKey { component_id, name }
}
}
#[derive(Clone)]
struct PartialChanges {
transaction: v1::Transaction,
entity_changes: HashMap<ComponentKey<String>, Attribute>,
balance_changes: HashMap<ComponentKey<Vec<u8>>, BalanceChange>,
}
impl PartialChanges {
// Consolidate the entity changes into a vector of EntityChanges. Initially, the entity changes
// are in a map to prevent duplicates. For each transaction, we need to have only one final
// state change, per state. Example:
// If we have two sync events for the same pool (in the same tx), we need to have only one final
// state change for the reserves. This will be the last sync event, as it is the final state
// of the pool after the transaction.
fn consolidate_entity_changes(self) -> Vec<EntityChanges> {
self.entity_changes
.into_iter()
.map(|(key, attribute)| (key.component_id, attribute))
.into_group_map()
.into_iter()
.map(|(component_id, attributes)| EntityChanges { component_id, attributes })
.collect()
}
}
#[substreams::handlers::map]
pub fn map_pool_events(
block: eth::Block,
block_entity_changes: BlockChanges,
pools_store: StoreGetProto<ProtocolComponent>,
) -> Result<BlockChanges, substreams::errors::Error> {
// Sync event is sufficient for our use-case. Since it's emitted on every reserve-altering
// function call, we can use it as the only event to update the reserves of a pool.
let mut block_entity_changes = block_entity_changes;
let mut tx_changes: HashMap<Vec<u8>, PartialChanges> = HashMap::new();
handle_sync(&block, &mut tx_changes, &pools_store);
merge_block(&mut tx_changes, &mut block_entity_changes);
Ok(block_entity_changes)
}
/// Handle the sync events and update the reserves of the pools.
///
/// This function is called for each block, and it will handle the sync events for each transaction.
/// On UniswapV2, Sync events are emitted on every reserve-altering function call, so we can use
/// only this event to keep track of the pool state.
///
/// This function also relies on an intermediate HashMap to store the changes for each transaction.
/// This is necessary because we need to consolidate the changes for each transaction before adding
/// them to the block_entity_changes. This HashMap prevents us from having duplicate changes for the
/// same pool and token. See the PartialChanges struct for more details.
fn handle_sync(
block: &eth::Block,
tx_changes: &mut HashMap<Vec<u8>, PartialChanges>,
store: &StoreGetProto<ProtocolComponent>,
) {
let mut on_sync = |event: Sync, _tx: &eth::TransactionTrace, _log: &eth::Log| {
let pool_address_hex = _log.address.to_hex();
let pool =
store.must_get_last(StoreKey::Pool.get_unique_pool_key(pool_address_hex.as_str()));
// Convert reserves to bytes
let reserves_bytes = [event.reserve0, event.reserve1];
let tx_change = tx_changes
.entry(_tx.hash.clone())
.or_insert_with(|| PartialChanges {
transaction: _tx.into(),
entity_changes: HashMap::new(),
balance_changes: HashMap::new(),
});
for (i, reserve_bytes) in reserves_bytes.iter().enumerate() {
let attribute_name = format!("reserve{}", i);
// By using a HashMap, we can overwrite the previous value of the reserve attribute if
// it is for the same pool and the same attribute name (reserves).
tx_change.entity_changes.insert(
ComponentKey::new(pool_address_hex.clone(), attribute_name.clone()),
Attribute {
name: attribute_name,
value: reserve_bytes
.clone()
.to_signed_bytes_le(), //TODO: Unify bytes encoding (either be or le)
change: ChangeType::Update.into(),
},
);
}
// Update balance changes for each token
for (index, token) in pool.tokens.iter().enumerate() {
let balance = &reserves_bytes[index];
// HashMap also prevents having duplicate balance changes for the same pool and token.
tx_change.balance_changes.insert(
ComponentKey::new(pool_address_hex.clone(), token.clone()),
BalanceChange {
token: token.clone(),
balance: balance.clone().to_signed_bytes_be(),
component_id: pool_address_hex.as_bytes().to_vec(),
},
);
}
};
let mut eh = EventHandler::new(block);
// Filter the sync events by the pool address, to make sure we don't process events for other
// Protocols that use the same event signature.
eh.filter_by_address(PoolAddresser { store });
eh.on::<Sync, _>(&mut on_sync);
eh.handle_events();
}
/// Merge the changes from the sync events with the create_pool events previously mapped on
/// block_entity_changes.
///
/// Parameters:
/// - tx_changes: HashMap with the changes for each transaction. This is the same HashMap used in
/// handle_sync
/// - block_entity_changes: The BlockChanges struct that will be updated with the changes from the
/// sync events.
/// This HashMap comes pre-filled with the changes for the create_pool events, mapped in
/// 1_map_pool_created.
///
/// This function is called after the handle_sync function, and it is expected that
/// block_entity_changes will be complete after this function ends.
fn merge_block(
tx_changes: &mut HashMap<Vec<u8>, PartialChanges>,
block_entity_changes: &mut BlockChanges,
) {
let mut tx_entity_changes_map = HashMap::new();
// Add created pools to the tx_changes_map
for change in block_entity_changes
.changes
.clone()
.into_iter()
{
let transaction = change.tx.as_ref().unwrap();
tx_entity_changes_map
.entry(transaction.hash.clone())
.and_modify(|c: &mut TransactionChanges| {
c.component_changes
.extend(change.component_changes.clone());
c.entity_changes
.extend(change.entity_changes.clone());
})
.or_insert(change);
}
// First, iterate through the previously created transactions, extracted from the
// map_pool_created step. If there are sync events for this transaction, add them to the
// block_entity_changes and the corresponding balance changes.
for change in tx_entity_changes_map.values_mut() {
let tx = change
.clone()
.tx
.expect("Transaction not found")
.clone();
// If there are sync events for this transaction, add them to the block_entity_changes
if let Some(partial_changes) = tx_changes.remove(&tx.hash) {
change.entity_changes = partial_changes
.clone()
.consolidate_entity_changes();
change.balance_changes = partial_changes
.balance_changes
.into_values()
.collect();
}
}
// If there are any transactions left in the tx_changes, it means that they are transactions
// that changed the state of the pools, but were not included in the block_entity_changes.
// This happens for every regular transaction that does not actually create a pool. By the
// end of this function, we expect block_entity_changes to be up-to-date with the changes
// for all sync and new_pools in the block.
for partial_changes in tx_changes.values() {
tx_entity_changes_map.insert(
partial_changes.transaction.hash.clone(),
TransactionChanges {
tx: Some(partial_changes.transaction.clone()),
contract_changes: vec![],
entity_changes: partial_changes
.clone()
.consolidate_entity_changes(),
balance_changes: partial_changes
.balance_changes
.clone()
.into_values()
.collect(),
component_changes: vec![],
},
);
}
block_entity_changes.changes = tx_entity_changes_map
.into_values()
.collect();
}

View File

@@ -0,0 +1,11 @@
pub use map_pool_created::map_pools_created;
pub use map_pool_events::map_pool_events;
pub use store_pools::store_pools;
#[path = "1_map_pool_created.rs"]
mod map_pool_created;
#[path = "2_store_pools.rs"]
mod store_pools;
#[path = "3_map_pool_events.rs"]
mod map_pool_events;