complete trigger rework; update SwapOrderStatus with Andrew's changes; not fully debugged
This commit is contained in:
@@ -72,7 +72,7 @@ def upgrade() -> None:
|
|||||||
sa.Column('id', sa.UUID(), nullable=False),
|
sa.Column('id', sa.UUID(), nullable=False),
|
||||||
sa.Column('chain', dexorder.database.column_types.Blockchain(), nullable=False),
|
sa.Column('chain', dexorder.database.column_types.Blockchain(), nullable=False),
|
||||||
sa.Column('height', sa.Integer(), nullable=False),
|
sa.Column('height', sa.Integer(), nullable=False),
|
||||||
sa.Column('state', sa.Enum('Requested', 'Signed', 'Sent', 'Mined', name='transactionjobstate'), nullable=False),
|
sa.Column('state', sa.Enum('Requested', 'Signed', 'Sent', 'Mined', 'Error', name='transactionjobstate'), nullable=False),
|
||||||
sa.Column('request', dexorder.database.column_types.DataclassDictBase(astext_type=sa.Text()), nullable=False),
|
sa.Column('request', dexorder.database.column_types.DataclassDictBase(astext_type=sa.Text()), nullable=False),
|
||||||
sa.Column('tx_id', postgresql.BYTEA(), nullable=True),
|
sa.Column('tx_id', postgresql.BYTEA(), nullable=True),
|
||||||
sa.Column('tx_data', postgresql.BYTEA(), nullable=True),
|
sa.Column('tx_data', postgresql.BYTEA(), nullable=True),
|
||||||
@@ -83,10 +83,24 @@ def upgrade() -> None:
|
|||||||
op.create_index(op.f('ix_transactionjob_height'), 'transactionjob', ['height'], unique=False)
|
op.create_index(op.f('ix_transactionjob_height'), 'transactionjob', ['height'], unique=False)
|
||||||
op.create_index(op.f('ix_transactionjob_state'), 'transactionjob', ['state'], unique=False)
|
op.create_index(op.f('ix_transactionjob_state'), 'transactionjob', ['state'], unique=False)
|
||||||
op.create_index(op.f('ix_transactionjob_tx_id'), 'transactionjob', ['tx_id'], unique=False)
|
op.create_index(op.f('ix_transactionjob_tx_id'), 'transactionjob', ['tx_id'], unique=False)
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
op.create_table('dbblock',
|
||||||
|
sa.Column('chain', dexorder.database.column_types.Blockchain(), nullable=False),
|
||||||
|
sa.Column('hash', postgresql.BYTEA(), nullable=False),
|
||||||
|
sa.Column('height', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('timestamp', sa.INTEGER(), nullable=False),
|
||||||
|
sa.Column('confirmed', sa.Boolean(), nullable=False),
|
||||||
|
sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('chain', 'hash')
|
||||||
|
)
|
||||||
|
op.create_index(op.f('ix_dbblock_height'), 'dbblock', ['height'], unique=False)
|
||||||
|
op.create_index(op.f('ix_dbblock_timestamp'), 'dbblock', ['timestamp'], unique=False)
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
def downgrade() -> None:
|
||||||
|
op.drop_index(op.f('ix_dbblock_timestamp'), table_name='dbblock')
|
||||||
|
op.drop_index(op.f('ix_dbblock_height'), table_name='dbblock')
|
||||||
|
op.drop_table('dbblock')
|
||||||
op.drop_index(op.f('ix_transactionjob_tx_id'), table_name='transactionjob')
|
op.drop_index(op.f('ix_transactionjob_tx_id'), table_name='transactionjob')
|
||||||
op.drop_index(op.f('ix_transactionjob_state'), table_name='transactionjob')
|
op.drop_index(op.f('ix_transactionjob_state'), table_name='transactionjob')
|
||||||
op.drop_index(op.f('ix_transactionjob_height'), table_name='transactionjob')
|
op.drop_index(op.f('ix_transactionjob_height'), table_name='transactionjob')
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
"""BlockIndex
|
|
||||||
|
|
||||||
Revision ID: ee22683693a5
|
|
||||||
Revises: 516b55c83144
|
|
||||||
Create Date: 2024-07-19 18:52:04.933167
|
|
||||||
|
|
||||||
"""
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
import sqlalchemy as sa
|
|
||||||
import dexorder.database
|
|
||||||
import dexorder.database.column_types
|
|
||||||
from sqlalchemy.dialects import postgresql
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = 'ee22683693a5'
|
|
||||||
down_revision: Union[str, None] = '516b55c83144'
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
op.create_table('dbblock',
|
|
||||||
sa.Column('chain', dexorder.database.column_types.Blockchain(), nullable=False),
|
|
||||||
sa.Column('hash', postgresql.BYTEA(), nullable=False),
|
|
||||||
sa.Column('height', sa.Integer(), nullable=False),
|
|
||||||
sa.Column('timestamp', sa.INTEGER(), nullable=False),
|
|
||||||
sa.Column('confirmed', sa.Boolean(), nullable=False),
|
|
||||||
sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
|
|
||||||
sa.PrimaryKeyConstraint('chain', 'hash')
|
|
||||||
)
|
|
||||||
op.create_index(op.f('ix_dbblock_height'), 'dbblock', ['height'], unique=False)
|
|
||||||
op.create_index(op.f('ix_dbblock_timestamp'), 'dbblock', ['timestamp'], unique=False)
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
op.drop_index(op.f('ix_dbblock_timestamp'), table_name='dbblock')
|
|
||||||
op.drop_index(op.f('ix_dbblock_height'), table_name='dbblock')
|
|
||||||
op.drop_table('dbblock')
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -16,3 +16,4 @@ eth-bloom
|
|||||||
python-dateutil
|
python-dateutil
|
||||||
eth_abi
|
eth_abi
|
||||||
pdpyras # pagerduty
|
pdpyras # pagerduty
|
||||||
|
numpy
|
||||||
|
|||||||
@@ -21,16 +21,13 @@ def warningAlert(title, message, dedup_key=NARG, log_level=logging.WARNING):
|
|||||||
return alert(title, message, dedup_key, log_level)
|
return alert(title, message, dedup_key, log_level)
|
||||||
|
|
||||||
|
|
||||||
async def spawn_alert(title, message, dedup_key):
|
|
||||||
alert_pagerduty(title,message,dedup_key)
|
|
||||||
|
|
||||||
|
|
||||||
pagerduty_session = None
|
pagerduty_session = None
|
||||||
hostname = None
|
hostname = None
|
||||||
|
|
||||||
def alert_pagerduty(title, message, dedup_key, log_level):
|
def alert_pagerduty(title, message, dedup_key, log_level):
|
||||||
if not config.pagerduty:
|
if not config.pagerduty:
|
||||||
return
|
return
|
||||||
|
# noinspection PyBroadException
|
||||||
try:
|
try:
|
||||||
global pagerduty_session
|
global pagerduty_session
|
||||||
global hostname
|
global hostname
|
||||||
|
|||||||
@@ -1,8 +1,22 @@
|
|||||||
from typing import TypedDict, Union
|
from dataclasses import dataclass
|
||||||
|
from typing import TypedDict, Union, Type
|
||||||
|
|
||||||
Address = str
|
Address = str
|
||||||
Quantity = Union[str,int]
|
Quantity = Union[str,int]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TransactionRequest:
|
||||||
|
"""
|
||||||
|
All members of TransactionRequest and its subclasses must be JSON-serializable. They get stored in the database
|
||||||
|
TransactionJob in a JSONB field, as handled by the DataclassDict column type.
|
||||||
|
"""
|
||||||
|
type: str
|
||||||
|
|
||||||
|
# subclasses of TransactionRequest must register their type code here so the appropriate dataclass may be constructed
|
||||||
|
transaction_request_registry: dict[str, Type[TransactionRequest]] = {}
|
||||||
|
|
||||||
|
|
||||||
TransactionDict = TypedDict( 'TransactionDict', {
|
TransactionDict = TypedDict( 'TransactionDict', {
|
||||||
'from': Address,
|
'from': Address,
|
||||||
'to': Address,
|
'to': Address,
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
import math
|
import math
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
# noinspection PyPackageRequirements
|
|
||||||
from contextvars import ContextVar
|
from contextvars import ContextVar
|
||||||
|
|
||||||
import dexorder
|
import dexorder
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import logging
|
import logging
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Optional, Type, Union
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -29,37 +28,3 @@ class TrancheKey (OrderKey):
|
|||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f'{self.vault}|{self.order_index}|{self.tranche_index}'
|
return f'{self.vault}|{self.order_index}|{self.tranche_index}'
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ExecutionRequest:
|
|
||||||
height: int
|
|
||||||
proof: None
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TransactionRequest:
|
|
||||||
type: str # 'te' for tranche execution
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TrancheExecutionRequest (TransactionRequest):
|
|
||||||
# type: str # 'te' for tranche execution
|
|
||||||
vault: str
|
|
||||||
order_index: int
|
|
||||||
tranche_index: int
|
|
||||||
price_proof: Union[None,dict,tuple[int]]
|
|
||||||
|
|
||||||
def new_tranche_execution_request(tk: TrancheKey, _proof: Optional[dict]) -> TrancheExecutionRequest:
|
|
||||||
return TrancheExecutionRequest('te', tk.vault, tk.order_index, tk.tranche_index, (0,)) # todo proof
|
|
||||||
|
|
||||||
def deserialize_transaction_request(**d):
|
|
||||||
t = d['type']
|
|
||||||
Class = transaction_request_registry.get(t)
|
|
||||||
if Class is None:
|
|
||||||
raise ValueError(f'No TransactionRequest for type "{t}"')
|
|
||||||
# noinspection PyArgumentList
|
|
||||||
return Class(**d)
|
|
||||||
|
|
||||||
transaction_request_registry: dict[str, Type[TransactionRequest]] = dict(
|
|
||||||
te = TrancheExecutionRequest,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -4,14 +4,21 @@ from dataclasses import dataclass
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from dexorder.util.convert import decode_IEEE754, encode_IEEE754
|
from dexorder.util import hexbytes
|
||||||
|
from dexorder.util.convert import decode_IEEE754
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
"""
|
||||||
|
These dataclasses are meant to closely mirror the raw data on-chain, using native Python types but serializing to
|
||||||
|
something JSON-able.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
class SwapOrderState (Enum):
|
class SwapOrderState (Enum):
|
||||||
|
# This includes on-chain codes as well as additional codes
|
||||||
Unknown = -1
|
Unknown = -1
|
||||||
Signing = 0 # only used by the web but here for completeness todo rework OrderLib.sol to remove offchain statuses
|
Signing = 0 # only used by the web but here for completeness
|
||||||
Underfunded = 1
|
Underfunded = 1
|
||||||
Open = 2
|
Open = 2
|
||||||
Canceled = 3
|
Canceled = 3
|
||||||
@@ -45,6 +52,26 @@ class Route:
|
|||||||
def dump(self):
|
def dump(self):
|
||||||
return self.exchange.value, self.fee
|
return self.exchange.value, self.fee
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Line:
|
||||||
|
intercept: float
|
||||||
|
slope: float
|
||||||
|
|
||||||
|
def value(self, timestamp):
|
||||||
|
return self.intercept + self.slope * timestamp
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load_from_chain(obj: tuple[int,int]):
|
||||||
|
return Line(decode_IEEE754(obj[0]), decode_IEEE754(obj[1]))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(obj: tuple[float,float]):
|
||||||
|
return Line(*obj)
|
||||||
|
|
||||||
|
def dump(self):
|
||||||
|
return self.intercept, self.slope
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class SwapOrder:
|
class SwapOrder:
|
||||||
tokenIn: str
|
tokenIn: str
|
||||||
@@ -57,6 +84,10 @@ class SwapOrder:
|
|||||||
conditionalOrder: int
|
conditionalOrder: int
|
||||||
tranches: list['Tranche']
|
tranches: list['Tranche']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def min_input_amount(self):
|
||||||
|
return self.minFillAmount if self.amountIsInput else 0
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(obj):
|
def load(obj):
|
||||||
return SwapOrder(obj[0], obj[1], Route.load(obj[2]), int(obj[3]), int(obj[4]), obj[5], obj[6], obj[7],
|
return SwapOrder(obj[0], obj[1], Route.load(obj[2]), int(obj[3]), int(obj[4]), obj[5], obj[6], obj[7],
|
||||||
@@ -72,7 +103,7 @@ SwapOrder
|
|||||||
in: {self.tokenIn}
|
in: {self.tokenIn}
|
||||||
out: {self.tokenOut}
|
out: {self.tokenOut}
|
||||||
exchange: {self.route.exchange, self.route.fee}
|
exchange: {self.route.exchange, self.route.fee}
|
||||||
amount: {"input" if self.amountIsInput else "output"} {self.amount} {"to owner" if self.outputDirectlyToOwner else ""}
|
amount: {"input" if self.amountIsInput else "output"} {self.amount}{" to owner" if self.outputDirectlyToOwner else ""}
|
||||||
minFill: {self.minFillAmount}
|
minFill: {self.minFillAmount}
|
||||||
tranches:
|
tranches:
|
||||||
'''
|
'''
|
||||||
@@ -80,118 +111,110 @@ SwapOrder
|
|||||||
msg += f' {tranche}\n'
|
msg += f' {tranche}\n'
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class SwapStatus:
|
class ElaboratedTrancheStatus:
|
||||||
# this is an elaborated version of the on-chain status
|
filledIn: int
|
||||||
|
filledOut: int
|
||||||
|
activationTime: int
|
||||||
|
startTime: int
|
||||||
|
endTime: int
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load_from_chain(obj: tuple[int,int,int,int]):
|
||||||
|
filled, activationTime, startTime, endTime = obj
|
||||||
|
return ElaboratedTrancheStatus(
|
||||||
|
# we do NOT grab the filled amount from the chain, because our process will handle the fill events
|
||||||
|
# separately by incrementing these status values as fills arrive.
|
||||||
|
0, 0,
|
||||||
|
activationTime, startTime, endTime,
|
||||||
|
)
|
||||||
|
|
||||||
|
def dump(self):
|
||||||
|
# filled fields can be larger than JSON-able ints, so we use strings.
|
||||||
|
return str(self.filledIn), str(self.filledOut), self.activationTime, self.startTime, self.endTime
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(obj: tuple[str,str,int,int,int]):
|
||||||
|
filledIn, filledOut, activationTime, startTime, endTime = obj
|
||||||
|
return ElaboratedTrancheStatus(int(filledIn), int(filledOut), activationTime, startTime, endTime)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ElaboratedSwapOrderStatus:
|
||||||
|
tx_id: bytes
|
||||||
|
order: SwapOrder
|
||||||
fillFeeHalfBps: int
|
fillFeeHalfBps: int
|
||||||
state: SwapOrderState
|
state: SwapOrderState
|
||||||
startTime: int
|
startTime: int
|
||||||
startPrice: int
|
startPrice: int
|
||||||
ocoGroup: Optional[int]
|
ocoGroup: Optional[int]
|
||||||
filledIn: Optional[int] # if None then look in the order_filled blockstate
|
filledIn: int
|
||||||
filledOut: Optional[int] # if None then look in the order_filled blockstate
|
filledOut: int
|
||||||
trancheFilledIn: Optional[list[int]] # if None then look in the tranche_filled blockstate
|
trancheStatus: list[ElaboratedTrancheStatus]
|
||||||
trancheFilledOut: Optional[list[int]] # if None then look in the tranche_filled blockstate
|
|
||||||
trancheActivationTime: list[int]
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class SwapOrderStatus(SwapStatus):
|
|
||||||
order: SwapOrder
|
|
||||||
|
|
||||||
def __init__(self, order: SwapOrder, *swapstatus_args):
|
|
||||||
""" init with order object first followed by the swap status args"""
|
|
||||||
super().__init__(*swapstatus_args)
|
|
||||||
self.order = order
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(obj, *, Class=None):
|
def load_from_chain(tx_id: bytes, obj):
|
||||||
if Class is None:
|
# 0 SwapOrder order
|
||||||
Class = SwapOrderStatus
|
|
||||||
order = SwapOrder.load(obj[0])
|
|
||||||
fillFeeHalfBps = int(obj[1])
|
|
||||||
state = SwapOrderState(obj[2])
|
|
||||||
startTime = obj[3]
|
|
||||||
startPrice = obj[4]
|
|
||||||
ocoGroup = None if obj[5] == NO_OCO else obj[5]
|
|
||||||
filledIn = int(obj[6])
|
|
||||||
filledOut = int(obj[7])
|
|
||||||
trancheFilledIn = [int(f) for f in obj[8]]
|
|
||||||
trancheFilledOut = [int(f) for f in obj[9]]
|
|
||||||
trancheActivationTime = [int(f) for f in obj[10]]
|
|
||||||
return Class(order, fillFeeHalfBps, state, startTime, startPrice, ocoGroup,
|
|
||||||
filledIn, filledOut, trancheFilledIn, trancheFilledOut, trancheActivationTime)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def load_from_chain(obj, *, Class=None):
|
|
||||||
if Class is None:
|
|
||||||
Class = SwapOrderStatus
|
|
||||||
# 0 SwapOrder order;
|
|
||||||
# 1 int fillFeeHalfBps
|
# 1 int fillFeeHalfBps
|
||||||
# 2 bool canceled;
|
# 2 bool canceled
|
||||||
# 3 uint32 startTime;
|
# 3 uint32 startTime
|
||||||
# 4 uint32 startPrice;
|
# 4 uint32 startPrice
|
||||||
# 5 uint64 ocoGroup;
|
# 5 uint64 ocoGroup
|
||||||
# 6 uint256 filled; // total
|
# 6 uint256 filled
|
||||||
# 7 uint256[] trancheFilled; // sum(trancheFilled) == filled
|
# 7 ElaboratedTrancheStatus[] trancheStatus
|
||||||
# 8 uint32[] trancheActivationTime;
|
|
||||||
|
|
||||||
order = SwapOrder.load(obj[0])
|
item = iter(obj)
|
||||||
fillFeeHalfBps = obj[1]
|
|
||||||
state = SwapOrderState.Canceled if obj[2] else SwapOrderState.Open
|
order = SwapOrder.load(next(item))
|
||||||
startTime = obj[3]
|
fillFeeHalfBps = int(next(item))
|
||||||
startPrice = obj[4]
|
canceled = next(item)
|
||||||
ocoGroup = None if obj[5] == NO_OCO else obj[5]
|
state = SwapOrderState.Canceled if canceled else SwapOrderState.Open
|
||||||
# we ignore any fill values from the on-chain struct, because we will subsequently detect the DexorderSwapFilled events and add them in
|
startTime = next(item)
|
||||||
filledIn = 0
|
startPrice = next(item)
|
||||||
filledOut = 0
|
ocoGroup = next(item)
|
||||||
trancheFilledIn = [0 for _ in range(len(obj[7]))]
|
if ocoGroup == NO_OCO:
|
||||||
trancheFilledOut = [0 for _ in range(len(obj[7]))]
|
ocoGroup = None
|
||||||
trancheActivationTime = [int(i) for i in obj[8]]
|
# we ignore any fill values from the on-chain struct, because we will subsequently detect the
|
||||||
return Class(order, fillFeeHalfBps, state, startTime, startPrice, ocoGroup,
|
# DexorderSwapFilled events and add them in
|
||||||
filledIn, filledOut, trancheFilledIn, trancheFilledOut, trancheActivationTime)
|
_ignore_filled = next(item)
|
||||||
|
trancheStatuses = [ElaboratedTrancheStatus.load_from_chain(ts) for ts in next(item)]
|
||||||
|
for ts in trancheStatuses:
|
||||||
|
ts.filledIn = 0
|
||||||
|
ts.filledOut = 0
|
||||||
|
|
||||||
|
return ElaboratedSwapOrderStatus(tx_id, order, fillFeeHalfBps, state, startTime, startPrice,
|
||||||
|
ocoGroup, 0, 0, trancheStatuses)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(obj):
|
||||||
|
item = iter(obj)
|
||||||
|
tx_id = hexbytes(next(item))
|
||||||
|
order = SwapOrder.load(next(item))
|
||||||
|
fillFeeHalfBps = int(next(item))
|
||||||
|
state = SwapOrderState(next(item))
|
||||||
|
startTime = next(item)
|
||||||
|
startPrice = int(next(item))
|
||||||
|
ocoGroup = next(item)
|
||||||
|
if ocoGroup == NO_OCO:
|
||||||
|
ocoGroup = None
|
||||||
|
filledIn = int(next(item)) # convert from str
|
||||||
|
filledOut = int(next(item))
|
||||||
|
trancheStatus = [ElaboratedTrancheStatus.load(ts) for ts in next(item)]
|
||||||
|
return ElaboratedSwapOrderStatus(tx_id, order, fillFeeHalfBps, state, startTime, startPrice,
|
||||||
|
ocoGroup, filledIn, filledOut, trancheStatus)
|
||||||
|
|
||||||
def dump(self):
|
def dump(self):
|
||||||
return (
|
return (
|
||||||
self.order.dump(), self.fillFeeHalfBps, self.state.value, self.startTime, self.startPrice, self.ocoGroup,
|
self.tx_id, self.order.dump(), self.fillFeeHalfBps, self.state.value, self.startTime, str(self.startPrice),
|
||||||
str(self.filledIn), str(self.filledOut),
|
self.ocoGroup, str(self.filledIn), str(self.filledOut), [ts.dump() for ts in self.trancheStatus]
|
||||||
[str(f) for f in self.trancheFilledIn], [str(f) for f in self.trancheFilledOut],
|
|
||||||
[int(i) for i in self.trancheActivationTime]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def copy(self):
|
def copy(self):
|
||||||
return copy.deepcopy(self)
|
return copy.deepcopy(self)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ElaboratedSwapOrderStatus (SwapOrderStatus):
|
|
||||||
@staticmethod
|
|
||||||
def load_from_tx(tx_id: bytes, obj):
|
|
||||||
# noinspection PyTypeChecker
|
|
||||||
status: ElaboratedSwapOrderStatus = SwapOrderStatus.load_from_chain(obj, Class=ElaboratedSwapOrderStatus)
|
|
||||||
status.tx_id = tx_id
|
|
||||||
return status
|
|
||||||
|
|
||||||
# noinspection PyMethodOverriding
|
|
||||||
@staticmethod
|
|
||||||
def load(obj):
|
|
||||||
tx_id, *swaporder_args = obj
|
|
||||||
result = SwapOrderStatus.load(obj[1:], Class=ElaboratedSwapOrderStatus)
|
|
||||||
result.tx_id = tx_id
|
|
||||||
return result
|
|
||||||
|
|
||||||
# noinspection PyMissingConstructor
|
|
||||||
def __init__(self, order: SwapOrder, *swapstatus_args, tx_id=b''):
|
|
||||||
super().__init__(order, *swapstatus_args)
|
|
||||||
self.tx_id: bytes = tx_id
|
|
||||||
|
|
||||||
def dump(self):
|
|
||||||
return self.tx_id, *super().dump()
|
|
||||||
|
|
||||||
def copy(self):
|
|
||||||
return super().copy()
|
|
||||||
|
|
||||||
|
|
||||||
NO_OCO = 18446744073709551615 # max uint64
|
NO_OCO = 18446744073709551615 # max uint64
|
||||||
|
|
||||||
|
|
||||||
@@ -224,10 +247,8 @@ class Tranche:
|
|||||||
startTime: int
|
startTime: int
|
||||||
endTime: int
|
endTime: int
|
||||||
|
|
||||||
minIntercept: float
|
minLine: Line
|
||||||
minSlope: float
|
maxLine: Line
|
||||||
maxIntercept: float
|
|
||||||
maxSlope: float
|
|
||||||
|
|
||||||
|
|
||||||
def fraction_of(self, amount):
|
def fraction_of(self, amount):
|
||||||
@@ -250,40 +271,34 @@ class Tranche:
|
|||||||
obj[10], # rateLimitPeriod
|
obj[10], # rateLimitPeriod
|
||||||
obj[11], # startTime
|
obj[11], # startTime
|
||||||
obj[12], # endTime
|
obj[12], # endTime
|
||||||
decode_IEEE754(obj[13]), # minIntercept
|
Line.load(obj[13]), # minLine
|
||||||
decode_IEEE754(obj[14]), # minSlope
|
Line.load(obj[14]), # maxLine
|
||||||
decode_IEEE754(obj[15]), # maxIntercept
|
|
||||||
decode_IEEE754(obj[16]), # maxSlope
|
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def dump(self):
|
def dump(self):
|
||||||
minB = encode_IEEE754(self.minIntercept)
|
|
||||||
minM = encode_IEEE754(self.minSlope)
|
|
||||||
maxB = encode_IEEE754(self.maxIntercept)
|
|
||||||
maxM = encode_IEEE754(self.maxSlope)
|
|
||||||
return (
|
return (
|
||||||
self.fraction, self.startTimeIsRelative, self.endTimeIsRelative, self.minIsBarrier, self.maxIsBarrier, self.marketOrder,
|
self.fraction, self.startTimeIsRelative, self.endTimeIsRelative, self.minIsBarrier, self.maxIsBarrier, self.marketOrder,
|
||||||
self.minIsRatio, self.maxIsRatio,
|
self.minIsRatio, self.maxIsRatio,
|
||||||
False, # _reserved7
|
False, # _reserved7
|
||||||
self.rateLimitFraction, self.rateLimitPeriod,
|
self.rateLimitFraction, self.rateLimitPeriod,
|
||||||
self.startTime, self.endTime, minB, minM, maxB, maxM,
|
self.startTime, self.endTime, self.minLine.dump(), self.maxLine.dump(),
|
||||||
)
|
)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
msg = f'{self.fraction/MAX_FRACTION:.1%} {"start+" if self.startTimeIsRelative else ""}{self.startTime} to {"start+" if self.startTimeIsRelative else ""}{self.endTime}'
|
msg = f'{self.fraction/MAX_FRACTION:.1%} {"start+" if self.startTimeIsRelative else ""}{self.startTime} to {"start+" if self.startTimeIsRelative else ""}{self.endTime}'
|
||||||
if self.marketOrder:
|
if self.marketOrder:
|
||||||
# for marketOrders, minIntercept is the slippage
|
# for marketOrders, minLine.intercept is the slippage
|
||||||
msg += f' market order slippage {self.minIntercept:.2%}'
|
msg += f' market order slippage {self.minLine.intercept:.2%}'
|
||||||
else:
|
else:
|
||||||
if self.minIntercept or self.minSlope:
|
if self.minLine.intercept or self.minLine.slope:
|
||||||
msg += f' >{self.minIntercept:.5g}'
|
msg += f' >{self.minLine.intercept:.5g}'
|
||||||
if self.minSlope:
|
if self.minLine.slope:
|
||||||
msg += f'{self.minSlope:+.5g}'
|
msg += f'{self.minLine.slope:+.5g}'
|
||||||
if self.maxIntercept or self.maxSlope:
|
if self.maxLine.intercept or self.maxLine.slope:
|
||||||
msg += f' <{self.maxIntercept:.5g}'
|
msg += f' <{self.maxLine.intercept:.5g}'
|
||||||
if self.maxSlope:
|
if self.maxLine.slope:
|
||||||
msg += f'{self.maxSlope:+.5g}'
|
msg += f'{self.maxLine.slope:+.5g}'
|
||||||
if self.rateLimitPeriod:
|
if self.rateLimitPeriod:
|
||||||
msg += f' {self.rateLimitFraction/MAX_FRACTION:.1%} every {self.rateLimitPeriod/60:.0} minutes'
|
msg += f' {self.rateLimitFraction/MAX_FRACTION:.1%} every {self.rateLimitPeriod/60:.0} minutes'
|
||||||
return msg
|
return msg
|
||||||
@@ -293,6 +308,9 @@ class Tranche:
|
|||||||
class PriceProof:
|
class PriceProof:
|
||||||
proof: int
|
proof: int
|
||||||
|
|
||||||
|
def dump(self):
|
||||||
|
return (self.proof,)
|
||||||
|
|
||||||
|
|
||||||
class OcoMode (Enum):
|
class OcoMode (Enum):
|
||||||
NO_OCO = 0
|
NO_OCO = 0
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ async def main():
|
|||||||
|
|
||||||
runner = BlockStateRunner(state, publish_all=publish_all if redis_state else None, timer_period=0)
|
runner = BlockStateRunner(state, publish_all=publish_all if redis_state else None, timer_period=0)
|
||||||
runner.add_event_trigger(handle_uniswap_swaps, get_contract_event('IUniswapV3PoolEvents', 'Swap'), multi=True)
|
runner.add_event_trigger(handle_uniswap_swaps, get_contract_event('IUniswapV3PoolEvents', 'Swap'), multi=True)
|
||||||
runner.postprocess_cbs.append(check_ohlc_rollover)
|
runner.add_callback(check_ohlc_rollover)
|
||||||
runner.on_promotion.append(finalize_callback)
|
runner.on_promotion.append(finalize_callback)
|
||||||
if db:
|
if db:
|
||||||
# noinspection PyUnboundLocalVariable
|
# noinspection PyUnboundLocalVariable
|
||||||
|
|||||||
@@ -23,6 +23,8 @@ while True:
|
|||||||
def bits(b0, b1):
|
def bits(b0, b1):
|
||||||
bit(b0); bit(b1)
|
bit(b0); bit(b1)
|
||||||
|
|
||||||
|
|
||||||
|
# noinspection PyBroadException
|
||||||
try:
|
try:
|
||||||
i = int(i)
|
i = int(i)
|
||||||
assert 1 <= i <= 6
|
assert 1 <= i <= 6
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from asyncio import CancelledError
|
from asyncio import CancelledError
|
||||||
|
|
||||||
from dexorder import db, blockchain, config
|
from dexorder import db, blockchain
|
||||||
from dexorder.base.chain import current_chain
|
from dexorder.base.chain import current_chain
|
||||||
from dexorder.bin.executable import execute
|
from dexorder.bin.executable import execute
|
||||||
from dexorder.blockstate import current_blockstate
|
from dexorder.blockstate import current_blockstate
|
||||||
@@ -9,16 +9,16 @@ from dexorder.blockstate.blockdata import BlockData
|
|||||||
from dexorder.blockstate.db_state import DbState
|
from dexorder.blockstate.db_state import DbState
|
||||||
from dexorder.blockstate.fork import current_fork
|
from dexorder.blockstate.fork import current_fork
|
||||||
from dexorder.contract import get_contract_event
|
from dexorder.contract import get_contract_event
|
||||||
from dexorder.contract.dexorder import get_factory_contract, get_dexorder_contract
|
from dexorder.contract.dexorder import get_dexorder_contract
|
||||||
from dexorder.event_handler import init, dump_log, handle_vault_created, handle_order_placed, \
|
from dexorder.event_handler import (init, dump_log, handle_vault_created, handle_order_placed,
|
||||||
handle_transfer, handle_swap_filled, handle_order_canceled, handle_order_cancel_all, handle_dexorderexecutions, \
|
handle_transfer, handle_swap_filled, handle_order_canceled, handle_order_cancel_all,
|
||||||
activate_time_triggers, activate_price_triggers, \
|
handle_uniswap_swaps, handle_vault_logic_changed)
|
||||||
process_active_tranches, process_execution_requests, check_ohlc_rollover, handle_uniswap_swaps, handle_vault_logic_changed
|
|
||||||
from dexorder.memcache import memcache
|
from dexorder.memcache import memcache
|
||||||
from dexorder.memcache.memcache_state import RedisState, publish_all
|
from dexorder.memcache.memcache_state import RedisState, publish_all
|
||||||
from dexorder.order.triggers import activate_orders
|
from dexorder.order.executionhandler import handle_dexorderexecutions, execute_tranches
|
||||||
|
from dexorder.order.triggers import activate_orders, end_trigger_updates
|
||||||
from dexorder.runner import BlockStateRunner
|
from dexorder.runner import BlockStateRunner
|
||||||
from dexorder.transaction import handle_transaction_receipts, finalize_transactions
|
from dexorder.transactions import handle_transaction_receipts, finalize_transactions
|
||||||
|
|
||||||
log = logging.getLogger('dexorder')
|
log = logging.getLogger('dexorder')
|
||||||
LOG_ALL_EVENTS = False # for debug todo config
|
LOG_ALL_EVENTS = False # for debug todo config
|
||||||
@@ -44,7 +44,10 @@ def setup_logevent_triggers(runner):
|
|||||||
else:
|
else:
|
||||||
executions = dexorder.events.DexorderExecutions()
|
executions = dexorder.events.DexorderExecutions()
|
||||||
|
|
||||||
runner.add_event_trigger(init)
|
# the callbacks are run even if there's no blocks and the regular timer triggers. event triggers only run when
|
||||||
|
# a block is received.
|
||||||
|
|
||||||
|
runner.add_callback(init)
|
||||||
runner.add_event_trigger(handle_vault_created, get_contract_event('Vault', 'VaultCreated'))
|
runner.add_event_trigger(handle_vault_created, get_contract_event('Vault', 'VaultCreated'))
|
||||||
runner.add_event_trigger(handle_vault_logic_changed, get_contract_event('Vault', 'VaultLogicChanged'))
|
runner.add_event_trigger(handle_vault_logic_changed, get_contract_event('Vault', 'VaultLogicChanged'))
|
||||||
runner.add_event_trigger(handle_order_placed, get_contract_event('OrderLib', 'DexorderSwapPlaced'))
|
runner.add_event_trigger(handle_order_placed, get_contract_event('OrderLib', 'DexorderSwapPlaced'))
|
||||||
@@ -53,15 +56,12 @@ def setup_logevent_triggers(runner):
|
|||||||
runner.add_event_trigger(handle_swap_filled, get_contract_event('OrderLib', 'DexorderSwapFilled'))
|
runner.add_event_trigger(handle_swap_filled, get_contract_event('OrderLib', 'DexorderSwapFilled'))
|
||||||
runner.add_event_trigger(handle_order_canceled, get_contract_event('OrderLib', 'DexorderSwapCanceled'))
|
runner.add_event_trigger(handle_order_canceled, get_contract_event('OrderLib', 'DexorderSwapCanceled'))
|
||||||
runner.add_event_trigger(handle_order_cancel_all, get_contract_event('OrderLib', 'DexorderCancelAll'))
|
runner.add_event_trigger(handle_order_cancel_all, get_contract_event('OrderLib', 'DexorderCancelAll'))
|
||||||
|
|
||||||
runner.add_event_trigger(handle_transaction_receipts) # todo handle only the transactions that were posted to this block
|
runner.add_event_trigger(handle_transaction_receipts) # todo handle only the transactions that were posted to this block
|
||||||
runner.add_event_trigger(handle_dexorderexecutions, executions)
|
runner.add_event_trigger(handle_dexorderexecutions, executions)
|
||||||
|
|
||||||
# these callbacks run after the ones above on each block, plus these also run every second
|
runner.add_callback(end_trigger_updates)
|
||||||
runner.postprocess_cbs.append(check_ohlc_rollover)
|
runner.add_callback(execute_tranches)
|
||||||
runner.postprocess_cbs.append(activate_time_triggers)
|
|
||||||
runner.postprocess_cbs.append(activate_price_triggers)
|
|
||||||
runner.postprocess_cbs.append(process_active_tranches)
|
|
||||||
runner.postprocess_cbs.append(process_execution_requests)
|
|
||||||
|
|
||||||
|
|
||||||
# noinspection DuplicatedCode
|
# noinspection DuplicatedCode
|
||||||
|
|||||||
@@ -146,6 +146,7 @@ async def main():
|
|||||||
log.debug(f'Mirroring tokens')
|
log.debug(f'Mirroring tokens')
|
||||||
txs = []
|
txs = []
|
||||||
for t in tokens:
|
for t in tokens:
|
||||||
|
# noinspection PyBroadException
|
||||||
try:
|
try:
|
||||||
info = await get_token_info(t)
|
info = await get_token_info(t)
|
||||||
# anvil had trouble estimating the gas, so we hardcode it.
|
# anvil had trouble estimating the gas, so we hardcode it.
|
||||||
@@ -163,6 +164,7 @@ async def main():
|
|||||||
log.debug(f'Mirroring pools {", ".join(pools)}')
|
log.debug(f'Mirroring pools {", ".join(pools)}')
|
||||||
txs = []
|
txs = []
|
||||||
for pool, info in zip(pools, pool_infos):
|
for pool, info in zip(pools, pool_infos):
|
||||||
|
# noinspection PyBroadException
|
||||||
try:
|
try:
|
||||||
# anvil had trouble estimating the gas, so we hardcode it.
|
# anvil had trouble estimating the gas, so we hardcode it.
|
||||||
tx = await mirrorenv.transact.mirrorPool(info, gas=5_500_000)
|
tx = await mirrorenv.transact.mirrorPool(info, gas=5_500_000)
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ from typing import Union, Optional
|
|||||||
|
|
||||||
from cachetools import LRUCache
|
from cachetools import LRUCache
|
||||||
from sqlalchemy import select
|
from sqlalchemy import select
|
||||||
from sqlalchemy.dialects.postgresql import insert
|
|
||||||
|
|
||||||
from dexorder import current_w3, config, db, Blockchain
|
from dexorder import current_w3, config, db, Blockchain
|
||||||
from dexorder.base.block import Block, BlockInfo, latest_block
|
from dexorder.base.block import Block, BlockInfo, latest_block
|
||||||
@@ -56,18 +55,11 @@ async def _fetch(fetch: FetchLock, chain_id: int, block_id: Union[int,bytes]) ->
|
|||||||
return Block(chain_id, found.data)
|
return Block(chain_id, found.data)
|
||||||
|
|
||||||
# fetch from RPC
|
# fetch from RPC
|
||||||
try:
|
if type(block_id) is int:
|
||||||
if type(block_id) is int:
|
fetch.result = await fetch_block_by_number(block_id, chain_id=chain_id)
|
||||||
fetch.result = await fetch_block_by_number(block_id, chain_id=chain_id)
|
else:
|
||||||
else:
|
fetch.result = await fetch_block(block_id, chain_id=chain_id)
|
||||||
fetch.result = await fetch_block(block_id, chain_id=chain_id)
|
return fetch.result
|
||||||
return fetch.result
|
|
||||||
except Exception as e:
|
|
||||||
fetch.exception = e
|
|
||||||
fetch.result = None
|
|
||||||
raise
|
|
||||||
finally:
|
|
||||||
fetch.ready.set()
|
|
||||||
|
|
||||||
|
|
||||||
_lru = LRUCache[tuple[int, Union[int,bytes]], Block](maxsize=256)
|
_lru = LRUCache[tuple[int, Union[int,bytes]], Block](maxsize=256)
|
||||||
@@ -78,9 +70,9 @@ def cache_block(block: Block, confirmed=False):
|
|||||||
_lru[block.chain_id, block.hash] = block
|
_lru[block.chain_id, block.hash] = block
|
||||||
_lru[block.chain_id, block.height] = block
|
_lru[block.chain_id, block.height] = block
|
||||||
if db:
|
if db:
|
||||||
db.session.execute(insert(DbBlock).values(
|
db.session.add(DbBlock(
|
||||||
chain=block.chain_id, hash=block.hash, height=block.height, timestamp=block.timestamp,
|
chain=block.chain_id, hash=block.hash, height=block.height, timestamp=block.timestamp,
|
||||||
confirmed=confirmed, data=block.data).on_conflict_do_nothing())
|
confirmed=confirmed, data=block.data))
|
||||||
|
|
||||||
|
|
||||||
async def get_block(block_id: Union[bytes,int], *, chain_id=None) -> Block:
|
async def get_block(block_id: Union[bytes,int], *, chain_id=None) -> Block:
|
||||||
@@ -91,7 +83,6 @@ async def get_block(block_id: Union[bytes,int], *, chain_id=None) -> Block:
|
|||||||
key = chain_id, block_id
|
key = chain_id, block_id
|
||||||
# try LRU cache synchronously first
|
# try LRU cache synchronously first
|
||||||
try:
|
try:
|
||||||
# log.debug(f'\thit LRU')
|
|
||||||
return _lru[key]
|
return _lru[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
@@ -118,6 +109,7 @@ async def get_block(block_id: Union[bytes,int], *, chain_id=None) -> Block:
|
|||||||
finally:
|
finally:
|
||||||
# log.debug(f'fetch.result {fetch.result}')
|
# log.debug(f'fetch.result {fetch.result}')
|
||||||
del _fetch_locks[key]
|
del _fetch_locks[key]
|
||||||
|
fetch.ready.set()
|
||||||
# log.debug(f'\t{fetch.result}')
|
# log.debug(f'\t{fetch.result}')
|
||||||
return fetch.result
|
return fetch.result
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ from .diff import DiffEntry, DELETE, DiffEntryItem
|
|||||||
from ..base.block import Block
|
from ..base.block import Block
|
||||||
from ..base.chain import current_chain
|
from ..base.chain import current_chain
|
||||||
from ..blocks import promotion_height
|
from ..blocks import promotion_height
|
||||||
from ..util import hexstr
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
state_log = logging.getLogger('dexorder.state')
|
state_log = logging.getLogger('dexorder.state')
|
||||||
@@ -158,14 +157,16 @@ class BlockState:
|
|||||||
def remove_branch(self, branch: Branch, *, remove_series_diffs=True):
|
def remove_branch(self, branch: Branch, *, remove_series_diffs=True):
|
||||||
if branch.height == self.height and len(self.branches_by_height[branch.height]) == 1:
|
if branch.height == self.height and len(self.branches_by_height[branch.height]) == 1:
|
||||||
# this is the only branch at this height: compute the new lower height
|
# this is the only branch at this height: compute the new lower height
|
||||||
self.height = max(0, *[b.height for b in self.branches_by_id.values() if b is not branch])
|
other_heights = [b.height for b in self.branches_by_id.values() if b is not branch]
|
||||||
|
self.height = 0 if not other_heights else max(0, *other_heights)
|
||||||
del self.branches_by_id[branch.id]
|
del self.branches_by_id[branch.id]
|
||||||
by_height = self.branches_by_height.get(branch.height)
|
if self.height:
|
||||||
if by_height is not None:
|
by_height = self.branches_by_height.get(branch.height)
|
||||||
by_height.remove(branch)
|
if by_height is not None:
|
||||||
if len(by_height) == 0:
|
by_height.remove(branch)
|
||||||
# garbage collect empty arrays
|
if len(by_height) == 0:
|
||||||
del self.branches_by_height[branch.height]
|
# garbage collect empty arrays
|
||||||
|
del self.branches_by_height[branch.height]
|
||||||
try:
|
try:
|
||||||
del self.unloads[branch.id]
|
del self.unloads[branch.id]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
|||||||
@@ -31,6 +31,12 @@ class Config:
|
|||||||
accounts: Optional[dict[str,str]] = field(default_factory=dict) # account aliases
|
accounts: Optional[dict[str,str]] = field(default_factory=dict) # account aliases
|
||||||
min_gas: str = '0'
|
min_gas: str = '0'
|
||||||
|
|
||||||
|
# Order slashing
|
||||||
|
slash_kill_count: int = 5
|
||||||
|
slash_delay_base: float = 60 # one minute
|
||||||
|
slash_delay_mul: float = 2 # double the delay each time
|
||||||
|
slash_delay_max: int = 15 * 60
|
||||||
|
|
||||||
walker_name: str = 'default'
|
walker_name: str = 'default'
|
||||||
walker_flush_interval: float = 300
|
walker_flush_interval: float = 300
|
||||||
walker_stop: Optional[int] = None # block number of the last block the walker should process
|
walker_stop: Optional[int] = None # block number of the last block the walker should process
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
test_accounts = {
|
test_accounts = {
|
||||||
'test0': '0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80',
|
# 'account_name': '0x_private_key', # public address
|
||||||
'test1': '0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d',
|
'test0': '0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80', # 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266
|
||||||
'test2': '0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a',
|
'test1': '0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d', # 0x70997970C51812dc3A010C7d01b50e0d17dc79C8
|
||||||
'test3': '0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6',
|
'test2': '0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a', # 0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC
|
||||||
'test4': '0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a',
|
'test3': '0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6', # 0x90F79bf6EB2c4f870365E785982E1f101E93b906
|
||||||
'test5': '0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba',
|
'test4': '0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a', # 0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65
|
||||||
'test6': '0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e',
|
'test5': '0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba', # 0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc
|
||||||
'test7': '0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356',
|
'test6': '0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e', # 0x976EA74026E726554dB657fA54763abd0C3a0aa9
|
||||||
'test8': '0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97',
|
'test7': '0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356', # 0x14dC79964da2C08b23698B3D3cc7Ca32193d9955
|
||||||
'test9': '0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6',
|
'test8': '0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97', # 0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f
|
||||||
|
'test9': '0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6', # 0xa0Ee7A142d267C1f36714E4a8F75612F20a79720
|
||||||
}
|
}
|
||||||
|
|
||||||
default_accounts_config = {}
|
default_accounts_config = {}
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
|
import glob
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from eth_abi.exceptions import InsufficientDataBytes
|
from eth_abi.exceptions import InsufficientDataBytes
|
||||||
from eth_utils import to_checksum_address
|
from eth_utils import to_checksum_address
|
||||||
|
from typing_extensions import Union
|
||||||
from web3.exceptions import BadFunctionCallOutput, ContractLogicError
|
from web3.exceptions import BadFunctionCallOutput, ContractLogicError
|
||||||
|
|
||||||
from .abi import abis
|
from .abi import abis
|
||||||
@@ -13,20 +15,31 @@ from ..base.chain import current_chain
|
|||||||
CONTRACT_ERRORS = (InsufficientDataBytes, ContractLogicError, BadFunctionCallOutput)
|
CONTRACT_ERRORS = (InsufficientDataBytes, ContractLogicError, BadFunctionCallOutput)
|
||||||
|
|
||||||
|
|
||||||
def get_abi(name, filename=None):
|
# set initially to the string filename, then loaded on demand and set to the parsed JSON result
|
||||||
return get_contract_data(name, filename)['abi']
|
_contract_data: dict[str,Union[str,dict]] = {}
|
||||||
|
|
||||||
|
# finds all .sol files and sets _contract_data with their pathname
|
||||||
|
for _file in glob.glob('../contract/out/**/*.sol/*.json', recursive=True):
|
||||||
|
if os.path.isfile(_file):
|
||||||
|
_contract_data[os.path.basename(_file)[:-5]] = _file
|
||||||
|
|
||||||
|
|
||||||
def get_contract_data(name, filename=None):
|
def get_abi(name):
|
||||||
if filename is None and name in abis:
|
return get_contract_data(name)['abi']
|
||||||
|
|
||||||
|
|
||||||
|
def get_contract_data(name):
|
||||||
|
try:
|
||||||
return {'abi':abis[name]}
|
return {'abi':abis[name]}
|
||||||
if filename is None and name == "Vault" and os.path.exists(f'../contract/out/IVault.sol/IVault.json') :
|
except KeyError:
|
||||||
# logging.debug("getting abi from IVault.json instead of Vault.json")
|
pass
|
||||||
name = "IVault" # Special case for proxy Vault
|
if name == 'Vault':
|
||||||
if filename is None:
|
name = 'IVault' # special exception due to use of a proxy
|
||||||
filename = name
|
entry = _contract_data[name]
|
||||||
with open(f'../contract/out/{filename}.sol/{name}.json', 'rt') as file:
|
if type(entry) is str:
|
||||||
return json.load(file)
|
with open(entry, 'rt') as file:
|
||||||
|
entry = _contract_data[name] = json.load(file)
|
||||||
|
return entry
|
||||||
|
|
||||||
|
|
||||||
def get_deployment_address(deployment_name, contract_name, *, chain_id=None):
|
def get_deployment_address(deployment_name, contract_name, *, chain_id=None):
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ from web3.exceptions import Web3Exception
|
|||||||
from web3.types import TxReceipt, TxData
|
from web3.types import TxReceipt, TxData
|
||||||
|
|
||||||
from dexorder import current_w3, Account
|
from dexorder import current_w3, Account
|
||||||
from dexorder.base.account import current_account
|
|
||||||
from dexorder.blockstate.fork import current_fork
|
from dexorder.blockstate.fork import current_fork
|
||||||
from dexorder.util import hexstr
|
from dexorder.util import hexstr
|
||||||
|
|
||||||
@@ -91,7 +90,7 @@ def transact_wrapper(addr, name, func):
|
|||||||
return f
|
return f
|
||||||
|
|
||||||
|
|
||||||
def build_wrapper(addr, name, func):
|
def build_wrapper(_addr, _name, func):
|
||||||
async def f(*args, **kwargs):
|
async def f(*args, **kwargs):
|
||||||
tx = await func(*args).build_transaction(kwargs)
|
tx = await func(*args).build_transaction(kwargs)
|
||||||
return ContractTransaction(tx)
|
return ContractTransaction(tx)
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ class Kv:
|
|||||||
found.value = value
|
found.value = value
|
||||||
|
|
||||||
def __delitem__(self, key: str):
|
def __delitem__(self, key: str):
|
||||||
|
# noinspection PyTypeChecker
|
||||||
db.session.query(KeyValue).filter(KeyValue.key == key).delete()
|
db.session.query(KeyValue).filter(KeyValue.key == key).delete()
|
||||||
|
|
||||||
def get(self, key: str, default=None):
|
def get(self, key: str, default=None):
|
||||||
|
|||||||
@@ -3,11 +3,10 @@ from enum import Enum
|
|||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
from sqlalchemy import ForeignKey
|
from sqlalchemy.orm import mapped_column, Mapped
|
||||||
from sqlalchemy.orm import mapped_column, Mapped, relationship
|
|
||||||
|
|
||||||
from dexorder.base.order import TransactionRequest as TransactionRequestDict, deserialize_transaction_request
|
from dexorder.base import TransactionRequest, transaction_request_registry
|
||||||
from dexorder.database.column import Dict, Bytes, UUID_PK, Blockchain, UUID
|
from dexorder.database.column import Dict, Bytes, UUID_PK, Blockchain
|
||||||
from dexorder.database.column_types import DataclassDict
|
from dexorder.database.column_types import DataclassDict
|
||||||
from dexorder.database.model import Base
|
from dexorder.database.model import Base
|
||||||
|
|
||||||
@@ -18,6 +17,7 @@ class TransactionJobState (Enum):
|
|||||||
Signed = 'n' # tx has been signed
|
Signed = 'n' # tx has been signed
|
||||||
Sent = 's' # tx has been delivered to a node
|
Sent = 's' # tx has been delivered to a node
|
||||||
Mined = 'z' # mined on at least one fork, whether reverted or not. todo handle forks that didnt confirm: receipts are per-fork!
|
Mined = 'z' # mined on at least one fork, whether reverted or not. todo handle forks that didnt confirm: receipts are per-fork!
|
||||||
|
Error = 'x' # an exception has prevented this job from sending a transaction
|
||||||
|
|
||||||
|
|
||||||
# noinspection PyProtectedMember
|
# noinspection PyProtectedMember
|
||||||
@@ -25,12 +25,21 @@ class TransactionJobState (Enum):
|
|||||||
TransactionJobStateColumnType = sa.Enum(TransactionJobState)
|
TransactionJobStateColumnType = sa.Enum(TransactionJobState)
|
||||||
|
|
||||||
|
|
||||||
|
def deserialize_transaction_request(**d):
|
||||||
|
t = d['type']
|
||||||
|
Class = transaction_request_registry.get(t)
|
||||||
|
if Class is None:
|
||||||
|
raise ValueError(f'No TransactionRequest for type "{t}"')
|
||||||
|
# noinspection PyArgumentList
|
||||||
|
return Class(**d)
|
||||||
|
|
||||||
|
|
||||||
class TransactionJob (Base):
|
class TransactionJob (Base):
|
||||||
id: Mapped[UUID_PK]
|
id: Mapped[UUID_PK]
|
||||||
chain: Mapped[Blockchain] = mapped_column(index=True)
|
chain: Mapped[Blockchain] = mapped_column(index=True)
|
||||||
height: Mapped[int] = mapped_column(index=True) # the height at which the job was created, to be used for timeout/ data rolloff and/or by Timescale
|
height: Mapped[int] = mapped_column(index=True) # the height at which the job was created, to be used for timeout/ data rolloff and/or by Timescale
|
||||||
state: Mapped[TransactionJobState] = mapped_column(TransactionJobStateColumnType, default=TransactionJobState.Requested, index=True)
|
state: Mapped[TransactionJobState] = mapped_column(TransactionJobStateColumnType, default=TransactionJobState.Requested, index=True)
|
||||||
request: Mapped[TransactionRequestDict] = mapped_column(DataclassDict(deserialize_transaction_request))
|
request: Mapped[TransactionRequest] = mapped_column(DataclassDict(deserialize_transaction_request))
|
||||||
tx_id: Mapped[Optional[Bytes]] = mapped_column(index=True)
|
tx_id: Mapped[Optional[Bytes]] = mapped_column(index=True)
|
||||||
tx_data: Mapped[Optional[Bytes]]
|
tx_data: Mapped[Optional[Bytes]]
|
||||||
receipt: Mapped[Optional[Dict]]
|
receipt: Mapped[Optional[Dict]]
|
||||||
|
|||||||
@@ -1,30 +1,21 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import itertools
|
|
||||||
import logging
|
import logging
|
||||||
from uuid import UUID
|
|
||||||
|
|
||||||
from web3.types import EventData
|
from web3.types import EventData
|
||||||
|
|
||||||
from dexorder import current_pub, db, from_timestamp, minutely
|
from dexorder import current_pub, minutely
|
||||||
from dexorder.base.chain import current_chain, current_clock
|
from dexorder.base.chain import current_chain
|
||||||
from dexorder.base.order import TrancheExecutionRequest, TrancheKey, ExecutionRequest, new_tranche_execution_request, \
|
from dexorder.base.order import TrancheKey, OrderKey
|
||||||
OrderKey
|
|
||||||
from dexorder.base.orderlib import SwapOrderState
|
from dexorder.base.orderlib import SwapOrderState
|
||||||
from dexorder.blocks import get_block_timestamp
|
from dexorder.blocks import get_block_timestamp
|
||||||
from dexorder.blockstate.fork import current_fork
|
|
||||||
from dexorder.contract import ERC20
|
|
||||||
from dexorder.contract.dexorder import vault_address, VaultContract, get_factory_contract
|
from dexorder.contract.dexorder import vault_address, VaultContract, get_factory_contract
|
||||||
from dexorder.database.model.transaction import TransactionJob
|
|
||||||
from dexorder.logics import get_logic_version
|
from dexorder.logics import get_logic_version
|
||||||
from dexorder.ohlc import ohlcs, recent_ohlcs
|
from dexorder.ohlc import ohlcs
|
||||||
from dexorder.order.orderstate import Order
|
from dexorder.order.orderstate import Order
|
||||||
from dexorder.order.triggers import OrderTriggers, price_triggers, time_triggers, \
|
from dexorder.order.triggers import (OrderTriggers, activate_order, update_balance_triggers, start_trigger_updates,
|
||||||
unconstrained_price_triggers, execution_requests, inflight_execution_requests, TrancheStatus, active_tranches, \
|
update_price_triggers)
|
||||||
new_price_triggers, activate_order, close_order_and_disable_triggers
|
|
||||||
from dexorder.pools import new_pool_prices, pool_prices, get_uniswap_data
|
from dexorder.pools import new_pool_prices, pool_prices, get_uniswap_data
|
||||||
from dexorder.transaction import submit_transaction_request
|
from dexorder.vault_blockdata import vault_owners, adjust_balance, MAX_VAULTS, verify_vault
|
||||||
from dexorder.util.async_util import maywait
|
|
||||||
from dexorder.vault_blockdata import vault_owners, vault_balances, adjust_balance, MAX_VAULTS, verify_vault
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -35,7 +26,7 @@ def dump_log(eventlog):
|
|||||||
|
|
||||||
def init():
|
def init():
|
||||||
new_pool_prices.clear()
|
new_pool_prices.clear()
|
||||||
new_price_triggers.clear()
|
start_trigger_updates()
|
||||||
|
|
||||||
|
|
||||||
async def handle_order_placed(event: EventData):
|
async def handle_order_placed(event: EventData):
|
||||||
@@ -44,6 +35,9 @@ async def handle_order_placed(event: EventData):
|
|||||||
addr = event['address']
|
addr = event['address']
|
||||||
start_index = int(event['args']['startOrderIndex'])
|
start_index = int(event['args']['startOrderIndex'])
|
||||||
num_orders = int(event['args']['numOrders'])
|
num_orders = int(event['args']['numOrders'])
|
||||||
|
# todo accounting
|
||||||
|
order_fee = int(event['args']['orderFee'])
|
||||||
|
gas_fee = int(event['args']['gasFee'])
|
||||||
log.debug(f'DexorderPlaced {addr} {start_index} {num_orders}')
|
log.debug(f'DexorderPlaced {addr} {start_index} {num_orders}')
|
||||||
if not await verify_vault(addr):
|
if not await verify_vault(addr):
|
||||||
log.warning(f'Discarding order from rogue vault {addr}.')
|
log.warning(f'Discarding order from rogue vault {addr}.')
|
||||||
@@ -62,6 +56,7 @@ async def handle_order_placed(event: EventData):
|
|||||||
|
|
||||||
|
|
||||||
def handle_swap_filled(event: EventData):
|
def handle_swap_filled(event: EventData):
|
||||||
|
log.debug('handle_swap_filled')
|
||||||
# event DexorderSwapFilled (uint64 orderIndex, uint8 trancheIndex, uint256 amountIn, uint256 amountOut);
|
# event DexorderSwapFilled (uint64 orderIndex, uint8 trancheIndex, uint256 amountIn, uint256 amountOut);
|
||||||
log.debug(f'DexorderSwapFilled {event}')
|
log.debug(f'DexorderSwapFilled {event}')
|
||||||
args = event['args']
|
args = event['args']
|
||||||
@@ -70,6 +65,7 @@ def handle_swap_filled(event: EventData):
|
|||||||
tranche_index = args['trancheIndex']
|
tranche_index = args['trancheIndex']
|
||||||
amount_in = args['amountIn']
|
amount_in = args['amountIn']
|
||||||
amount_out = args['amountOut']
|
amount_out = args['amountOut']
|
||||||
|
# todo accounting
|
||||||
fill_fee = args['fillFee']
|
fill_fee = args['fillFee']
|
||||||
next_execution_time = args['nextExecutionTime']
|
next_execution_time = args['nextExecutionTime']
|
||||||
try:
|
try:
|
||||||
@@ -77,7 +73,7 @@ def handle_swap_filled(event: EventData):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
log.warning(f'DexorderSwapFilled IGNORED due to missing order {vault} {order_index}')
|
log.warning(f'DexorderSwapFilled IGNORED due to missing order {vault} {order_index}')
|
||||||
return
|
return
|
||||||
order.status.trancheActivationTime[tranche_index] = next_execution_time # update rate limit
|
order.status.trancheStatus[tranche_index].activationTime = next_execution_time # update rate limit
|
||||||
try:
|
try:
|
||||||
triggers = OrderTriggers.instances[order.key]
|
triggers = OrderTriggers.instances[order.key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@@ -118,28 +114,27 @@ async def handle_transfer(transfer: EventData):
|
|||||||
# log.debug(f'Transfer {transfer}')
|
# log.debug(f'Transfer {transfer}')
|
||||||
from_address = transfer['args']['from']
|
from_address = transfer['args']['from']
|
||||||
to_address = transfer['args']['to']
|
to_address = transfer['args']['to']
|
||||||
|
if to_address == from_address:
|
||||||
|
return
|
||||||
amount = int(transfer['args']['value'])
|
amount = int(transfer['args']['value'])
|
||||||
if to_address in vault_owners and to_address != from_address:
|
if to_address in vault_owners:
|
||||||
log.debug(f'deposit {to_address} {amount}')
|
log.debug(f'deposit {to_address} {amount}')
|
||||||
vault = to_address
|
vault = to_address
|
||||||
token_address = transfer['address']
|
elif from_address in vault_owners:
|
||||||
await adjust_balance(vault, token_address, amount)
|
|
||||||
if from_address in vault_owners and to_address != from_address:
|
|
||||||
log.debug(f'withdraw {to_address} {amount}')
|
log.debug(f'withdraw {to_address} {amount}')
|
||||||
vault = from_address
|
vault = from_address
|
||||||
|
else:
|
||||||
|
vault = None
|
||||||
|
if vault is not None:
|
||||||
token_address = transfer['address']
|
token_address = transfer['address']
|
||||||
await adjust_balance(vault, token_address, amount)
|
await adjust_balance(vault, token_address, amount)
|
||||||
# if to_address not in vault_owners and from_address not in vault_owners:
|
await update_balance_triggers(vault, token_address, amount)
|
||||||
# vaults = vault_owners.keys()
|
|
||||||
# log.debug(f'vaults: {list(vaults)}')
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_uniswap_swaps(swaps: list[EventData]):
|
async def handle_uniswap_swaps(swaps: list[EventData]):
|
||||||
# asynchronously prefetch the block timestamps we'll need
|
# asynchronously prefetch the block timestamps we'll need
|
||||||
block_ids = set(swap['blockHash'] for swap in swaps)
|
hashes = set(swap['blockHash'] for swap in swaps)
|
||||||
for batch in itertools.batched(block_ids, 4):
|
await asyncio.gather(*[get_block_timestamp(h) for h in hashes])
|
||||||
await asyncio.gather(*[get_block_timestamp(h) for h in batch])
|
|
||||||
|
|
||||||
# now execute the swaps synchronously
|
# now execute the swaps synchronously
|
||||||
for swap in swaps:
|
for swap in swaps:
|
||||||
await handle_uniswap_swap(swap)
|
await handle_uniswap_swap(swap)
|
||||||
@@ -153,6 +148,7 @@ async def handle_uniswap_swap(swap: EventData):
|
|||||||
addr = pool['address']
|
addr = pool['address']
|
||||||
pool_prices[addr] = price
|
pool_prices[addr] = price
|
||||||
await ohlcs.update_all(addr, time, price)
|
await ohlcs.update_all(addr, time, price)
|
||||||
|
await update_price_triggers(pool, price)
|
||||||
log.debug(f'pool {addr} {minutely(time)} {price}')
|
log.debug(f'pool {addr} {minutely(time)} {price}')
|
||||||
|
|
||||||
|
|
||||||
@@ -198,155 +194,3 @@ async def handle_vault_logic_changed(upgrade: EventData):
|
|||||||
version = await get_logic_version(logic)
|
version = await get_logic_version(logic)
|
||||||
log.debug(f'Vault {addr} upgraded to logic version {version}')
|
log.debug(f'Vault {addr} upgraded to logic version {version}')
|
||||||
|
|
||||||
|
|
||||||
async def activate_time_triggers():
|
|
||||||
now = current_clock.get().timestamp
|
|
||||||
# log.debug(f'activating time triggers at {now}')
|
|
||||||
# time triggers
|
|
||||||
for tt in tuple(time_triggers):
|
|
||||||
await maywait(tt(now))
|
|
||||||
|
|
||||||
|
|
||||||
async def activate_price_triggers():
|
|
||||||
# log.debug(f'activating price triggers')
|
|
||||||
pools_triggered = set()
|
|
||||||
for pool, price in new_pool_prices.items():
|
|
||||||
pools_triggered.add(pool)
|
|
||||||
for pt in tuple(price_triggers[pool]):
|
|
||||||
await maywait(pt(price))
|
|
||||||
for pool, triggers in new_price_triggers.items():
|
|
||||||
if pool not in pools_triggered:
|
|
||||||
price = pool_prices[pool]
|
|
||||||
for pt in triggers:
|
|
||||||
await maywait(pt(price))
|
|
||||||
for t in tuple(unconstrained_price_triggers):
|
|
||||||
await maywait(t(None))
|
|
||||||
|
|
||||||
|
|
||||||
async def process_active_tranches():
|
|
||||||
for tk, proof in active_tranches.items():
|
|
||||||
old_req = execution_requests.get(tk)
|
|
||||||
height = current_fork.get().height
|
|
||||||
if old_req is None or old_req.height <= height: # '<=' is used so proof is updated with more recent values
|
|
||||||
if await has_funds(tk):
|
|
||||||
log.info(f'execution request for {tk}')
|
|
||||||
execution_requests[tk] = ExecutionRequest(height, proof)
|
|
||||||
# else:
|
|
||||||
# log.debug(f'underfunded tranche {tk}')
|
|
||||||
|
|
||||||
|
|
||||||
async def has_funds(tk: TrancheKey):
|
|
||||||
# log.debug(f'has funds? {tk.vault}')
|
|
||||||
order = Order.of(tk)
|
|
||||||
minimum = order.status.order.minFillAmount if order.amount_is_input else 0
|
|
||||||
balances = vault_balances.get(tk.vault, {})
|
|
||||||
token_addr = order.status.order.tokenIn
|
|
||||||
token_balance = balances.get(token_addr)
|
|
||||||
if token_balance is None:
|
|
||||||
# unknown balance
|
|
||||||
token_balance = balances[token_addr] = await ERC20(token_addr).balanceOf(tk.vault)
|
|
||||||
log.debug(f'queried token balance {token_addr}.balanceOf({tk.vault}) = {token_balance}')
|
|
||||||
await adjust_balance(tk.vault, token_addr, token_balance)
|
|
||||||
# log.debug(f'minimum {minimum} balances {token_addr} {balances}')
|
|
||||||
return token_balance > minimum
|
|
||||||
|
|
||||||
|
|
||||||
async def process_execution_requests():
|
|
||||||
height = current_fork.get().height
|
|
||||||
execs = {} # which requests to act on
|
|
||||||
for tk, er in execution_requests.items():
|
|
||||||
tk: TrancheKey
|
|
||||||
er: ExecutionRequest
|
|
||||||
pending = inflight_execution_requests.get(tk)
|
|
||||||
if pending is None or height-pending >= 30:
|
|
||||||
# todo execution timeout => retry ; should we use timestamps? configure per-chain.
|
|
||||||
# todo check balances
|
|
||||||
log.warning(f're-sending unconfirmed transaction {tk} is pending execution')
|
|
||||||
execs[tk] = er
|
|
||||||
else:
|
|
||||||
log.debug(f'tranche {tk} is pending execution')
|
|
||||||
|
|
||||||
# execute the list
|
|
||||||
# todo batch execution
|
|
||||||
for tk, er in execs.items():
|
|
||||||
job = submit_transaction_request(new_tranche_execution_request(tk, er.proof))
|
|
||||||
inflight_execution_requests[tk] = height
|
|
||||||
log.info(f'created job {job.id} to execute tranche {tk}')
|
|
||||||
|
|
||||||
|
|
||||||
def handle_dexorderexecutions(event: EventData):
|
|
||||||
log.debug(f'executions {event}')
|
|
||||||
exe_id = UUID(bytes=event['args']['id'])
|
|
||||||
errors = event['args']['errors']
|
|
||||||
if len(errors) == 0:
|
|
||||||
log.warning(f'No errors found in DexorderExecutions event: {event}')
|
|
||||||
return
|
|
||||||
if len(errors) > 1:
|
|
||||||
log.warning(f'Multiple executions not yet implemented')
|
|
||||||
job: TransactionJob = db.session.get(TransactionJob, exe_id)
|
|
||||||
if job is None:
|
|
||||||
log.warning(f'Job {exe_id} not found!')
|
|
||||||
return
|
|
||||||
finish_execution_request(job.request, errors[0])
|
|
||||||
|
|
||||||
|
|
||||||
def finish_execution_request(req: TrancheExecutionRequest, error: str):
|
|
||||||
try:
|
|
||||||
order: Order = Order.of(req.vault, req.order_index)
|
|
||||||
except KeyError:
|
|
||||||
log.error(f'Could not get order {OrderKey(req.vault, req.order_index)}')
|
|
||||||
return
|
|
||||||
tk = TrancheKey(req.vault, req.order_index, req.tranche_index)
|
|
||||||
try:
|
|
||||||
del execution_requests[tk]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
if error != '':
|
|
||||||
log.debug(f'execution request for tranche {tk} had error "{error}"')
|
|
||||||
if error == '':
|
|
||||||
log.debug(f'execution request for tranche {tk} was successful!')
|
|
||||||
elif error == 'IIA':
|
|
||||||
# Insufficient Input Amount or Safe Transfer Failure: suspend execution until new funds are sent
|
|
||||||
# todo vault balance checks
|
|
||||||
token = order.order.tokenIn
|
|
||||||
log.debug(f'insufficient funds {req.vault} {token} ')
|
|
||||||
elif error == 'SPL':
|
|
||||||
# Square-root price limit from Uniswap means we asked for a limit price that isn't met. This is a fault of
|
|
||||||
# vault logic if it happens.
|
|
||||||
log.warning(f'SPL when executing tranche {tk}')
|
|
||||||
close_order_and_disable_triggers(order, SwapOrderState.Error)
|
|
||||||
elif error == 'NO':
|
|
||||||
# order is not open
|
|
||||||
log.warning(f'order {OrderKey(tk.vault,tk.order_index)} was closed, undetected!')
|
|
||||||
close_order_and_disable_triggers(order, SwapOrderState.Error)
|
|
||||||
elif error == 'TF':
|
|
||||||
# Tranche Filled
|
|
||||||
log.warning(f'tranche already filled {tk}')
|
|
||||||
try:
|
|
||||||
triggers = OrderTriggers.instances[order.key]
|
|
||||||
tranche_trigger = triggers.triggers[tk.tranche_index]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
tranche_trigger.status = TrancheStatus.Filled
|
|
||||||
tranche_trigger.disable()
|
|
||||||
elif error == 'Too little received':
|
|
||||||
# from UniswapV3 SwapRouter when not even 1 satoshi of output was gained
|
|
||||||
log.debug('warning: de minimis liquidity in pool')
|
|
||||||
# todo dont keep trying
|
|
||||||
else:
|
|
||||||
# todo slash and backoff
|
|
||||||
log.error(f'Unhandled execution error for transaction request {req} ERROR: "{error}"')
|
|
||||||
|
|
||||||
|
|
||||||
last_ohlc_rollover = 0
|
|
||||||
async def check_ohlc_rollover():
|
|
||||||
global last_ohlc_rollover
|
|
||||||
time = await get_block_timestamp(current_fork.get().head_identifier)
|
|
||||||
dt = from_timestamp(time)
|
|
||||||
diff = time - last_ohlc_rollover
|
|
||||||
if diff >= 60 or dt.minute != from_timestamp(last_ohlc_rollover).minute:
|
|
||||||
for (symbol, period) in recent_ohlcs.keys():
|
|
||||||
await ohlcs.update(symbol, period, dt)
|
|
||||||
last_ohlc_rollover = time
|
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,6 @@ async def get_logic_version(addr):
|
|||||||
try:
|
try:
|
||||||
return logics[addr]
|
return logics[addr]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
version = await ContractProxy(addr, abi=get_abi('IVaultLogic', 'IVault')).version()
|
version = await ContractProxy(addr, abi=get_abi('IVaultLogic')).version()
|
||||||
logics[addr] = version
|
logics[addr] = version
|
||||||
return version
|
return version
|
||||||
|
|||||||
@@ -1,37 +1,171 @@
|
|||||||
import logging
|
import logging
|
||||||
|
from typing import Optional
|
||||||
from uuid import UUID
|
from uuid import UUID
|
||||||
|
|
||||||
from web3.exceptions import ContractPanicError, ContractLogicError
|
from web3.exceptions import ContractPanicError, ContractLogicError
|
||||||
|
from web3.types import EventData
|
||||||
|
|
||||||
from dexorder import db
|
from dexorder import db
|
||||||
from dexorder.base.order import TrancheExecutionRequest, TrancheKey
|
from dexorder.base.order import TrancheKey, OrderKey
|
||||||
from dexorder.transaction import TransactionHandler
|
from dexorder.base.orderlib import SwapOrderState, PriceProof
|
||||||
from dexorder.contract.dexorder import get_dexorder_contract
|
from dexorder.contract.dexorder import get_dexorder_contract
|
||||||
from dexorder.database.model.transaction import TransactionJob
|
from dexorder.database.model.transaction import TransactionJob
|
||||||
from dexorder.order.triggers import inflight_execution_requests
|
from dexorder.order.orderstate import Order
|
||||||
|
from dexorder.order.triggers import (inflight_execution_requests, OrderTriggers,
|
||||||
|
close_order_and_disable_triggers, TrancheState, active_tranches)
|
||||||
|
from dexorder.transactions import TransactionHandler, TrancheExecutionRequest, submit_transaction_request, \
|
||||||
|
new_tranche_execution_request
|
||||||
|
from dexorder.util import hexbytes
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TrancheExecutionHandler (TransactionHandler):
|
class TrancheExecutionHandler (TransactionHandler):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__('te')
|
super().__init__('te')
|
||||||
|
|
||||||
async def build_transaction(self, job_id: UUID, req: TrancheExecutionRequest) -> dict:
|
async def build_transaction(self, job_id: UUID, req: TrancheExecutionRequest) -> dict:
|
||||||
# noinspection PyBroadException
|
tk = req.tranche_key
|
||||||
try:
|
try:
|
||||||
return await get_dexorder_contract().build.execute(job_id.bytes, (req.vault, req.order_index, req.tranche_index, req.price_proof))
|
return await get_dexorder_contract().build.execute(job_id.bytes, (req.vault, req.order_index, req.tranche_index, req.price_proof))
|
||||||
except (ContractPanicError, ContractLogicError) as x:
|
except ContractPanicError as x:
|
||||||
# todo if there's a logic error we shouldn't keep trying
|
exception = x
|
||||||
log.error(f'While executing job {job_id}: {x}')
|
errcode = ''
|
||||||
await self.complete_transaction(db.session.get(TransactionJob, job_id))
|
except ContractLogicError as x:
|
||||||
except Exception:
|
exception = x
|
||||||
log.exception(f'Could not send execution request {req}')
|
errcode = hexbytes(x.args[1]).decode('utf-8')
|
||||||
|
log.error(f'While building execution for tranche {tk}: {errcode}')
|
||||||
|
# if there's a logic error we shouldn't keep trying
|
||||||
|
finish_execution_request(tk, errcode)
|
||||||
|
raise exception
|
||||||
|
|
||||||
async def complete_transaction(self, job: TransactionJob) -> None:
|
async def complete_transaction(self, job: TransactionJob) -> None:
|
||||||
|
# noinspection PyTypeChecker
|
||||||
req: TrancheExecutionRequest = job.request
|
req: TrancheExecutionRequest = job.request
|
||||||
tk = TrancheKey(req.vault, req.order_index, req.tranche_index)
|
tk = TrancheKey(req.vault, req.order_index, req.tranche_index)
|
||||||
log.debug(f'completing execution request {tk}')
|
log.debug(f'completing execution request {tk}')
|
||||||
del inflight_execution_requests[tk]
|
finish_execution_request(tk)
|
||||||
|
|
||||||
|
|
||||||
TrancheExecutionHandler() # map 'te' to a TrancheExecutionHandler
|
TrancheExecutionHandler() # map 'te' to a TrancheExecutionHandler
|
||||||
|
|
||||||
|
|
||||||
|
def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
|
||||||
|
order_key = OrderKey(tk.vault, tk.order_index)
|
||||||
|
try:
|
||||||
|
order: Order = Order.of(order_key)
|
||||||
|
except KeyError:
|
||||||
|
log.error(f'Could not get order {order_key}')
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
inflight_execution_requests.remove(tk)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_trigger():
|
||||||
|
try:
|
||||||
|
return OrderTriggers.instances[order.key].triggers[tk.tranche_index]
|
||||||
|
except KeyError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def slash():
|
||||||
|
trig = get_trigger()
|
||||||
|
if trig is not None:
|
||||||
|
trig.slash()
|
||||||
|
|
||||||
|
#
|
||||||
|
# execute() error handling
|
||||||
|
#
|
||||||
|
if error is None:
|
||||||
|
log.debug(f'execution request for tranche {tk} was successful!')
|
||||||
|
elif error == 'IIA':
|
||||||
|
# Insufficient Input Amount
|
||||||
|
token = order.order.tokenIn
|
||||||
|
log.debug(f'insufficient funds {tk.vault} {token} ')
|
||||||
|
elif error == 'SPL':
|
||||||
|
# todo tight slippage can cause excessive executions as the backend repeatedly retries the remainder. The symptom is error 'SPL'.
|
||||||
|
# Square-root price limit from Uniswap means we asked for a limit price that isn't met. This is a fault of
|
||||||
|
# vault logic if it happens.
|
||||||
|
log.warning(f'SPL when executing tranche {tk}')
|
||||||
|
close_order_and_disable_triggers(order, SwapOrderState.Error)
|
||||||
|
elif error == 'NO':
|
||||||
|
# order is not open
|
||||||
|
log.warning(f'order {order_key} was closed, undetected!')
|
||||||
|
close_order_and_disable_triggers(order, SwapOrderState.Error) # We do not know if it was filled or not so only Error status can be given
|
||||||
|
elif error == 'TF':
|
||||||
|
# Tranche Filled
|
||||||
|
log.warning(f'tranche already filled {tk}')
|
||||||
|
tranche_trigger = get_trigger()
|
||||||
|
if tranche_trigger is not None:
|
||||||
|
tranche_trigger.status = TrancheState.Filled
|
||||||
|
tranche_trigger.disable()
|
||||||
|
elif error == 'Too little received':
|
||||||
|
# from UniswapV3 SwapRouter when not even 1 satoshi of output was gained
|
||||||
|
log.debug('warning: de minimis liquidity in pool')
|
||||||
|
slash()
|
||||||
|
elif error == 'RL':
|
||||||
|
log.debug(f'tranche {tk} execution failed due to "RL" rate limit')
|
||||||
|
pass
|
||||||
|
elif error == 'TE':
|
||||||
|
log.debug(f'tranche {tk} execution failed due to "TE" too early')
|
||||||
|
pass
|
||||||
|
elif error == 'TL':
|
||||||
|
log.debug(f'tranche {tk} execution failed due to "TL" too late')
|
||||||
|
pass
|
||||||
|
elif error == 'LL':
|
||||||
|
log.debug(f'tranche {tk} execution failed due to "LL" lower limit')
|
||||||
|
pass
|
||||||
|
elif error == 'LU':
|
||||||
|
log.debug(f'tranche {tk} execution failed due to "LU" upper limit')
|
||||||
|
pass
|
||||||
|
elif error == 'OVR':
|
||||||
|
log.warning(f'tranche {tk} execution failed due to "OVR" overfilled')
|
||||||
|
# this should never happen. Shut down the order.
|
||||||
|
close_order_and_disable_triggers(order, SwapOrderState.Error)
|
||||||
|
elif error == 'K':
|
||||||
|
log.error(f'vault killed')
|
||||||
|
close_order_and_disable_triggers(order, SwapOrderState.Error)
|
||||||
|
elif error == 'STF':
|
||||||
|
log.error(f'tranche {tk} execution failed due to "STF" safe transfer failure')
|
||||||
|
close_order_and_disable_triggers(order, SwapOrderState.Error)
|
||||||
|
else:
|
||||||
|
slash()
|
||||||
|
msg = '<unspecified>' if not error else error
|
||||||
|
log.error(f'Unhandled execution error for tranche {tk} ERROR: {msg}')
|
||||||
|
|
||||||
|
|
||||||
|
def execute_tranches():
|
||||||
|
new_execution_requests = []
|
||||||
|
for tk, proof in active_tranches.items():
|
||||||
|
if tk not in inflight_execution_requests:
|
||||||
|
new_execution_requests.append((tk, proof))
|
||||||
|
# todo order requests and batch
|
||||||
|
for tk, proof in new_execution_requests:
|
||||||
|
create_execution_request(tk, proof)
|
||||||
|
|
||||||
|
|
||||||
|
def create_execution_request(tk: TrancheKey, proof: PriceProof):
|
||||||
|
inflight_execution_requests.add(tk)
|
||||||
|
job = submit_transaction_request(new_tranche_execution_request(tk, proof))
|
||||||
|
log.debug(f'Executing {tk} as job {job.id}')
|
||||||
|
return job
|
||||||
|
|
||||||
|
|
||||||
|
def handle_dexorderexecutions(event: EventData):
|
||||||
|
log.debug(f'executions {event}')
|
||||||
|
exe_id = UUID(bytes=event['args']['id'])
|
||||||
|
errors = event['args']['errors']
|
||||||
|
if len(errors) == 0:
|
||||||
|
log.warning(f'No errors found in DexorderExecutions event: {event}')
|
||||||
|
return
|
||||||
|
if len(errors) > 1:
|
||||||
|
log.warning(f'Multiple executions not yet implemented')
|
||||||
|
job: TransactionJob = db.session.get(TransactionJob, exe_id)
|
||||||
|
if job is None:
|
||||||
|
log.warning(f'Job {exe_id} not found!')
|
||||||
|
return
|
||||||
|
# noinspection PyTypeChecker
|
||||||
|
req: TrancheExecutionRequest = job.request
|
||||||
|
tk = TrancheKey(req.vault, req.order_index, req.tranche_index)
|
||||||
|
finish_execution_request(tk, None if errors[0] == '' else errors[0])
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from typing import overload
|
|||||||
from dexorder import DELETE, db, order_log
|
from dexorder import DELETE, db, order_log
|
||||||
from dexorder.base.chain import current_chain
|
from dexorder.base.chain import current_chain
|
||||||
from dexorder.base.order import OrderKey, TrancheKey
|
from dexorder.base.order import OrderKey, TrancheKey
|
||||||
from dexorder.base.orderlib import SwapOrderStatus, SwapOrderState, ElaboratedSwapOrderStatus
|
from dexorder.base.orderlib import SwapOrderState, ElaboratedSwapOrderStatus
|
||||||
from dexorder.blockstate import BlockDict, BlockSet
|
from dexorder.blockstate import BlockDict, BlockSet
|
||||||
from dexorder.database.model.orderindex import OrderIndex
|
from dexorder.database.model.orderindex import OrderIndex
|
||||||
from dexorder.routing import pool_address
|
from dexorder.routing import pool_address
|
||||||
@@ -16,6 +16,8 @@ from dexorder.vault_blockdata import vault_owners
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# We split off the fill information for efficient communication to clients.
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Filled:
|
class Filled:
|
||||||
filled_in: int
|
filled_in: int
|
||||||
@@ -79,16 +81,16 @@ class Order:
|
|||||||
key = OrderKey(vault, order_index)
|
key = OrderKey(vault, order_index)
|
||||||
if key in Order.instances:
|
if key in Order.instances:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
status = ElaboratedSwapOrderStatus.load_from_tx(tx_id, obj)
|
status = ElaboratedSwapOrderStatus.load_from_chain(tx_id, obj)
|
||||||
Order.order_statuses[key] = status.copy() # always copy the struct when setting. values in BlockData must be immutable
|
Order.order_statuses[key] = status.copy() # always copy the struct when setting. values in BlockData must be immutable
|
||||||
order = Order(key)
|
order = Order(key)
|
||||||
if order.is_open:
|
if order.is_open:
|
||||||
Order.open_orders.add(key)
|
Order.open_orders.add(key)
|
||||||
Order.vault_open_orders.listappend(key.vault, key.order_index)
|
Order.vault_open_orders.listappend(key.vault, key.order_index)
|
||||||
# Start with a filled value of 0 even if the chain says otherwise, because we will process the fill events later and add them in
|
# Start with a filled value of 0 even if the chain says otherwise, because we will process the fill events later and add them in
|
||||||
tranche_filled = [Filled(0,0) for _ in range(len(status.trancheFilledIn))]
|
tranche_filled = [Filled(0, 0) for _ in range(len(status.trancheStatus))]
|
||||||
order_log.debug(f'initialized order_filled[{key}]')
|
order_log.debug(f'initialized order_filled[{key}]')
|
||||||
Order.order_filled[key] = OrderFilled(Filled(0,0), tranche_filled)
|
Order.order_filled[key] = OrderFilled(Filled(0, 0), tranche_filled)
|
||||||
order_log.debug(f'order created {key}')
|
order_log.debug(f'order created {key}')
|
||||||
return order
|
return order
|
||||||
|
|
||||||
@@ -103,9 +105,9 @@ class Order:
|
|||||||
key = a if b is None else OrderKey(a, b)
|
key = a if b is None else OrderKey(a, b)
|
||||||
assert key not in Order.instances
|
assert key not in Order.instances
|
||||||
self.key = key
|
self.key = key
|
||||||
self.status: SwapOrderStatus = Order.order_statuses[key].copy()
|
self.status: ElaboratedSwapOrderStatus = Order.order_statuses[key].copy()
|
||||||
self.pool_address: str = pool_address(self.status.order)
|
self.pool_address: str = pool_address(self.status.order)
|
||||||
self.tranche_keys = [TrancheKey(key.vault, key.order_index, i) for i in range(len(self.status.trancheFilledIn))]
|
self.tranche_keys = [TrancheKey(key.vault, key.order_index, i) for i in range(len(self.status.trancheStatus))]
|
||||||
# flattenings of various static data
|
# flattenings of various static data
|
||||||
self.order = self.status.order
|
self.order = self.status.order
|
||||||
self.amount = self.status.order.amount
|
self.amount = self.status.order.amount
|
||||||
@@ -132,11 +134,11 @@ class Order:
|
|||||||
|
|
||||||
def tranche_filled_in(self, tranche_index: int):
|
def tranche_filled_in(self, tranche_index: int):
|
||||||
return Order.order_filled[self.key].tranche_filled[tranche_index].filled_in if self.is_open \
|
return Order.order_filled[self.key].tranche_filled[tranche_index].filled_in if self.is_open \
|
||||||
else self.status.trancheFilledIn[tranche_index]
|
else self.status.trancheStatus[tranche_index].filledIn
|
||||||
|
|
||||||
def tranche_filled_out(self, tranche_index: int):
|
def tranche_filled_out(self, tranche_index: int):
|
||||||
return Order.order_filled[self.key].tranche_filled[tranche_index].filled_out if self.is_open \
|
return Order.order_filled[self.key].tranche_filled[tranche_index].filled_out if self.is_open \
|
||||||
else self.status.trancheFilledIn[tranche_index]
|
else self.status.trancheStatus[tranche_index].filledOut
|
||||||
|
|
||||||
def tranche_filled(self, tranche_index: int):
|
def tranche_filled(self, tranche_index: int):
|
||||||
return self.tranche_filled_in(tranche_index) if self.amount_is_input \
|
return self.tranche_filled_in(tranche_index) if self.amount_is_input \
|
||||||
@@ -146,18 +148,16 @@ class Order:
|
|||||||
return self.tranche_amounts[tranche_index] - self.tranche_filled(tranche_index)
|
return self.tranche_amounts[tranche_index] - self.tranche_filled(tranche_index)
|
||||||
|
|
||||||
def activation_time(self, tranche_index: int):
|
def activation_time(self, tranche_index: int):
|
||||||
return self.status.trancheActivationTime[tranche_index]
|
return self.status.trancheStatus[tranche_index].activationTime
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def filled(self):
|
def filled(self):
|
||||||
return self.filled_in if self.amount_is_input else self.filled_out
|
return self.filled_in if self.amount_is_input else self.filled_out
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_open(self):
|
def is_open(self):
|
||||||
return self.state.is_open
|
return self.state.is_open
|
||||||
|
|
||||||
|
|
||||||
def add_fill(self, tranche_index: int, filled_in: int, filled_out: int):
|
def add_fill(self, tranche_index: int, filled_in: int, filled_out: int):
|
||||||
order_log.debug(f'tranche fill {self.key}|{tranche_index} in:{filled_in} out:{filled_out}')
|
order_log.debug(f'tranche fill {self.key}|{tranche_index} in:{filled_in} out:{filled_out}')
|
||||||
try:
|
try:
|
||||||
@@ -192,8 +192,8 @@ class Order:
|
|||||||
status.filledIn = of.filled.filled_in
|
status.filledIn = of.filled.filled_in
|
||||||
status.filledOut = of.filled.filled_out
|
status.filledOut = of.filled.filled_out
|
||||||
for i, tf in enumerate(of.tranche_filled):
|
for i, tf in enumerate(of.tranche_filled):
|
||||||
status.trancheFilledIn[i] += of.tranche_filled[i].filled_in
|
status.trancheStatus[i].filledIn = of.tranche_filled[i].filled_in
|
||||||
status.trancheFilledOut[i] += of.tranche_filled[i].filled_out
|
status.trancheStatus[i].filledOut = of.tranche_filled[i].filled_out
|
||||||
Order.order_statuses[self.key] = status # set the status in order to save it
|
Order.order_statuses[self.key] = status # set the status in order to save it
|
||||||
Order.order_statuses.unload(self.key) # but then unload from memory after root promotion
|
Order.order_statuses.unload(self.key) # but then unload from memory after root promotion
|
||||||
order_log.debug(f'order completed {status}')
|
order_log.debug(f'order completed {status}')
|
||||||
@@ -229,7 +229,7 @@ class Order:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def save_order_index(key: OrderKey, status: SwapOrderStatus):
|
def save_order_index(key: OrderKey, status: ElaboratedSwapOrderStatus):
|
||||||
if status is DELETE:
|
if status is DELETE:
|
||||||
sess = db.session
|
sess = db.session
|
||||||
oi = sess.get(OrderIndex, (current_chain.get(), key.vault, key.order_index))
|
oi = sess.get(OrderIndex, (current_chain.get(), key.vault, key.order_index))
|
||||||
@@ -255,7 +255,7 @@ class Order:
|
|||||||
# this is the main order table.
|
# this is the main order table.
|
||||||
# it holds "everything" about an order in the canonical format specified by the contract orderlib, except that
|
# it holds "everything" about an order in the canonical format specified by the contract orderlib, except that
|
||||||
# the filled amount fields for active orders are maintained in the order_remainings and tranche_remainings series.
|
# the filled amount fields for active orders are maintained in the order_remainings and tranche_remainings series.
|
||||||
order_statuses: BlockDict[OrderKey, SwapOrderStatus] = BlockDict(
|
order_statuses: BlockDict[OrderKey, ElaboratedSwapOrderStatus] = BlockDict(
|
||||||
'o', db='lazy', redis=True, pub=pub_order_status, finalize_cb=save_order_index,
|
'o', db='lazy', redis=True, pub=pub_order_status, finalize_cb=save_order_index,
|
||||||
str2key=OrderKey.str2key, value2str=lambda v: json.dumps(v.dump()),
|
str2key=OrderKey.str2key, value2str=lambda v: json.dumps(v.dump()),
|
||||||
str2value=lambda s:ElaboratedSwapOrderStatus.load(json.loads(s)),
|
str2value=lambda s:ElaboratedSwapOrderStatus.load(json.loads(s)),
|
||||||
|
|||||||
@@ -1,229 +1,57 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
from abc import abstractmethod
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from typing import Callable, Optional, Union, Awaitable
|
from typing import Optional, Sequence
|
||||||
|
|
||||||
from dexorder.base.orderlib import SwapOrderState, PriceProof, DISTANT_FUTURE, DISTANT_PAST
|
import numpy as np
|
||||||
from dexorder.blockstate import BlockSet, BlockDict
|
from sortedcontainers import SortedList
|
||||||
from dexorder.util import defaultdictk
|
|
||||||
|
from dexorder.base.orderlib import SwapOrderState, PriceProof, DISTANT_FUTURE, DISTANT_PAST, Line
|
||||||
|
from dexorder.blockstate import BlockDict
|
||||||
from .orderstate import Order
|
from .orderstate import Order
|
||||||
from .. import dec, order_log, now, timestamp, from_timestamp
|
from .. import dec, order_log, timestamp, from_timestamp, config
|
||||||
from ..base.chain import current_clock
|
from ..base.chain import current_clock
|
||||||
from ..base.order import OrderKey, TrancheKey, ExecutionRequest
|
from ..base.order import OrderKey, TrancheKey
|
||||||
|
from ..contract import ERC20
|
||||||
|
from ..database.model.pool import OldPoolDict
|
||||||
from ..pools import ensure_pool_price, pool_prices, get_pool
|
from ..pools import ensure_pool_price, pool_prices, get_pool
|
||||||
from ..routing import pool_address
|
from ..routing import pool_address
|
||||||
|
from ..vault_blockdata import vault_balances, adjust_balance
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
# todo time and price triggers should be BlockSortedSets that support range queries for efficient lookup of triggers
|
|
||||||
TimeTrigger = Callable[[int], None] # func(timestamp)
|
|
||||||
time_triggers:BlockSet[TimeTrigger] = BlockSet('tt')
|
|
||||||
|
|
||||||
PriceTrigger = Callable[[dec], Union[Awaitable[None],None]] # [async] func(pool_price)
|
"""
|
||||||
UnconstrainedPriceTrigger = Callable[[Optional[dec]], Union[Awaitable[None],None]] # [async] func(pool_price)
|
Each tranche can have up to two time constraints: activation time and expiration time, and two price constraints:
|
||||||
price_triggers:dict[str, BlockSet[PriceTrigger]] = defaultdictk(lambda addr:BlockSet(f'pt|{addr}')) # different BlockSet per pool address
|
min line and max line. Line constraints may either be barriers or not.
|
||||||
new_price_triggers:dict[str, set[PriceTrigger]] = defaultdict(set) # when price triggers are first set, they must be tested against the current price even if it didnt change this block
|
|
||||||
unconstrained_price_triggers: BlockSet[UnconstrainedPriceTrigger] = BlockSet('upt') # tranches with no price constraints, whose time constraint is fulfilled
|
Additionally, each order can be blocked based on available funds in the vault.
|
||||||
active_tranches: BlockDict[TrancheKey, Optional[PriceProof]] = BlockDict('at') # tranches which have passed all constraints and should be executed
|
|
||||||
execution_requests:BlockDict[TrancheKey, ExecutionRequest] = BlockDict('e') # generated by the active tranches
|
In order to handle chain reorganizations without re-evaluating every trigger for every head, the boolean state of each
|
||||||
# todo should this really be blockdata?
|
constraint is saved in BlockState as a bitarray. When a time or price is changed, only the triggers sensitive to that
|
||||||
inflight_execution_requests:BlockDict[TrancheKey, int] = BlockDict('ei') # value is block height when the request was sent
|
input are updated, and then checked along with the cached values from unchanged constraints to determine if an
|
||||||
|
execution should be attempted on the tranche.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
async def activate_orders():
|
# tranches which have passed all constraints and should be executed
|
||||||
log.debug('activating orders')
|
active_tranches: BlockDict[TrancheKey, Optional[PriceProof]] = BlockDict('at')
|
||||||
# this is a state init callback, called only once after the state has been loaded from the db or created fresh
|
|
||||||
keys = list(Order.open_orders)
|
|
||||||
orders = [Order.of(key) for key in keys]
|
|
||||||
for order in orders:
|
|
||||||
# setup triggers
|
|
||||||
await activate_order(order) # too many to really parallelize, and it's startup anyway
|
|
||||||
log.debug(f'activated {len(keys)} orders')
|
|
||||||
|
|
||||||
|
|
||||||
async def activate_order(order: Order):
|
|
||||||
"""
|
|
||||||
Call this to enable triggers on an order which is already in the state.
|
|
||||||
"""
|
|
||||||
address = pool_address(order.status.order)
|
|
||||||
pool = await get_pool(address)
|
|
||||||
await ensure_pool_price(pool)
|
|
||||||
triggers = OrderTriggers(order)
|
|
||||||
if triggers.closed:
|
|
||||||
log.debug(f'order {order.key} was immediately closed')
|
|
||||||
close_order_and_disable_triggers(order, SwapOrderState.Filled if order.remaining == 0 or order.remaining <= order.min_fill_amount else SwapOrderState.Expired)
|
|
||||||
|
|
||||||
|
|
||||||
def intersect_ranges( a_low, a_high, b_low, b_high):
|
|
||||||
low, high = max(a_low,b_low), min(a_high,b_high)
|
|
||||||
if high <= low:
|
|
||||||
low, high = None, None
|
|
||||||
return low, high
|
|
||||||
|
|
||||||
|
|
||||||
async def line_passes(lc: tuple[float,float], is_min: bool, price: dec) -> bool:
|
|
||||||
b, m = lc
|
|
||||||
if b == 0 and m == 0:
|
|
||||||
return True
|
|
||||||
limit = m * current_clock.get().timestamp + b
|
|
||||||
# log.debug(f'line passes {limit} {"<" if is_min else ">"} {price}')
|
|
||||||
# todo ratios
|
|
||||||
# prices AT the limit get zero volume, so we only trigger on >, not >=
|
|
||||||
return is_min and limit < price or not is_min and limit > price
|
|
||||||
|
|
||||||
|
|
||||||
class TrancheStatus (Enum):
|
|
||||||
Early = auto() # first time trigger hasnt happened yet
|
|
||||||
Pricing = auto() # we are inside the time window and checking prices
|
|
||||||
Filled = auto() # tranche has no more available amount
|
|
||||||
Expired = auto() # time deadline has past and this tranche cannot be filled
|
|
||||||
|
|
||||||
class TrancheTrigger:
|
|
||||||
def __init__(self, order: Order, tranche_key: TrancheKey):
|
|
||||||
assert order.key.vault == tranche_key.vault and order.key.order_index == tranche_key.order_index
|
|
||||||
self.order = order
|
|
||||||
self.tk = tranche_key
|
|
||||||
self.status = TrancheStatus.Early
|
|
||||||
|
|
||||||
tranche = order.order.tranches[self.tk.tranche_index]
|
|
||||||
tranche_amount = tranche.fraction_of(order.amount)
|
|
||||||
tranche_filled = order.tranche_filled(self.tk.tranche_index)
|
|
||||||
tranche_remaining = tranche_amount - tranche_filled
|
|
||||||
|
|
||||||
# time and price constraints
|
|
||||||
self.time_constraint = [tranche.startTime, tranche.endTime]
|
|
||||||
if tranche.startTimeIsRelative:
|
|
||||||
self.time_constraint[0] += self.order.status.start
|
|
||||||
if tranche.endTimeIsRelative:
|
|
||||||
self.time_constraint[1] += self.order.status.start
|
|
||||||
if self.time_constraint[0] <= DISTANT_PAST and self.time_constraint[1] >= DISTANT_FUTURE:
|
|
||||||
self.time_constraint = None
|
|
||||||
self.min_line_constraint = (0.,0.) if tranche.marketOrder else (tranche.minIntercept, tranche.minSlope)
|
|
||||||
self.max_line_constraint = (0.,0.) if tranche.marketOrder else (tranche.maxIntercept, tranche.maxSlope)
|
|
||||||
self.has_line_constraint = any( a or b for a,b in (self.min_line_constraint, self.max_line_constraint))
|
|
||||||
self.has_sloped_line_constraint = any(m!=0 for b,m in (self.min_line_constraint, self.max_line_constraint))
|
|
||||||
self.slippage = tranche.minIntercept if tranche.marketOrder else 0
|
|
||||||
self.pool_price_multiplier = None
|
|
||||||
|
|
||||||
# compute status and set relevant triggers
|
|
||||||
if tranche_remaining == 0 or tranche_remaining < self.order.min_fill_amount: # min_fill_amount could be 0 (disabled) so we also check for the 0 case separately
|
|
||||||
self.status = TrancheStatus.Filled
|
|
||||||
return
|
|
||||||
timestamp = current_clock.get().timestamp
|
|
||||||
self.status = \
|
|
||||||
TrancheStatus.Pricing if self.time_constraint is None else \
|
|
||||||
TrancheStatus.Early if timestamp < self.time_constraint[0] else \
|
|
||||||
TrancheStatus.Expired if timestamp > self.time_constraint[1] else \
|
|
||||||
TrancheStatus.Pricing
|
|
||||||
self.enable_time_trigger()
|
|
||||||
if self.status == TrancheStatus.Pricing:
|
|
||||||
self.enable_price_trigger()
|
|
||||||
|
|
||||||
def enable_time_trigger(self):
|
|
||||||
if self.time_constraint:
|
|
||||||
log.debug(f'enable_time_trigger')
|
|
||||||
time_triggers.add(self.time_trigger)
|
|
||||||
|
|
||||||
def disable_time_trigger(self):
|
|
||||||
if self.time_constraint:
|
|
||||||
time_triggers.remove(self.time_trigger)
|
|
||||||
|
|
||||||
def time_trigger(self, now):
|
|
||||||
# log.debug(f'time_trigger {now} {self.status} {self.time_constraint}')
|
|
||||||
if self.closed:
|
|
||||||
log.debug(f'price trigger ignored because trigger status is {self.status}')
|
|
||||||
return
|
|
||||||
if not self.check_expired(now) and self.status == TrancheStatus.Early and now >= self.time_constraint[0]:
|
|
||||||
order_log.debug(f'tranche time enabled {self.tk}')
|
|
||||||
self.status = TrancheStatus.Pricing
|
|
||||||
self.enable_price_trigger()
|
|
||||||
|
|
||||||
def enable_price_trigger(self):
|
|
||||||
if self.has_line_constraint and not self.has_sloped_line_constraint: # sloped constraints must be triggered every tick, not just on pool price changes
|
|
||||||
price_triggers[self.order.pool_address].add(self.price_trigger)
|
|
||||||
new_price_triggers[self.order.pool_address].add(self.price_trigger)
|
|
||||||
else:
|
|
||||||
unconstrained_price_triggers.add(self.price_trigger)
|
|
||||||
|
|
||||||
def disable_price_trigger(self):
|
|
||||||
if self.has_line_constraint and not self.has_sloped_line_constraint:
|
|
||||||
price_triggers[self.order.pool_address].remove(self.price_trigger)
|
|
||||||
else:
|
|
||||||
unconstrained_price_triggers.remove(self.price_trigger)
|
|
||||||
|
|
||||||
async def price_trigger(self, cur: dec):
|
|
||||||
# must be idempotent. could be called twice when first activated: once for the initial price lookup then once again if that price was changed in the current block
|
|
||||||
if self.closed:
|
|
||||||
log.debug(f'price trigger ignored because trigger status is {self.status}')
|
|
||||||
return
|
|
||||||
activation_time = self.order.activation_time(self.tk.tranche_index)
|
|
||||||
if activation_time != 0 and timestamp() < activation_time:
|
|
||||||
log.debug(f'{self.tk} is rate limited until {from_timestamp(activation_time)}')
|
|
||||||
return # rate limited
|
|
||||||
# log.debug(f'price trigger {cur}')
|
|
||||||
addr = pool_address(self.order.order)
|
|
||||||
pool = await get_pool(addr)
|
|
||||||
if cur is None and self.has_line_constraint:
|
|
||||||
await ensure_pool_price(pool)
|
|
||||||
cur = pool_prices[addr]
|
|
||||||
if cur is not None:
|
|
||||||
if self.pool_price_multiplier is None:
|
|
||||||
self.pool_price_multiplier = dec(10) ** dec(-pool['decimals'])
|
|
||||||
# log.debug(f'adjusted cur price from {cur} => {cur*self.pool_price_multiplier}')
|
|
||||||
cur *= self.pool_price_multiplier
|
|
||||||
if cur is None or not self.has_line_constraint or all(await asyncio.gather(
|
|
||||||
line_passes(self.min_line_constraint, True, cur),
|
|
||||||
line_passes(self.max_line_constraint, False, cur))):
|
|
||||||
# setting active_tranches[] with a PriceProof causes an execute() invocation
|
|
||||||
active_tranches[self.tk] = PriceProof(0) # todo PriceProof
|
|
||||||
|
|
||||||
def fill(self, _amount_in, _amount_out ):
|
|
||||||
remaining = self.order.tranche_remaining(self.tk.tranche_index)
|
|
||||||
filled = remaining == 0 or remaining < self.order.min_fill_amount
|
|
||||||
if filled:
|
|
||||||
order_log.debug(f'tranche filled {self.tk}')
|
|
||||||
self.status = TrancheStatus.Filled
|
|
||||||
self.disable()
|
|
||||||
else:
|
|
||||||
order_log.debug(f'tranche part-filled {self.tk} in:{_amount_in} out:{_amount_out} remaining:{remaining}')
|
|
||||||
return filled
|
|
||||||
|
|
||||||
def check_expired(self, now):
|
|
||||||
expired = now >= self.time_constraint[1]
|
|
||||||
if expired:
|
|
||||||
self.expire()
|
|
||||||
return expired
|
|
||||||
|
|
||||||
def expire(self):
|
|
||||||
order_log.debug(f'tranche expired {self.tk}')
|
|
||||||
self.status = TrancheStatus.Expired
|
|
||||||
self.disable()
|
|
||||||
|
|
||||||
def disable(self):
|
|
||||||
try:
|
|
||||||
del active_tranches[self.tk]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
self.disable_time_trigger()
|
|
||||||
self.disable_price_trigger()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def closed(self):
|
|
||||||
return self.status in (TrancheStatus.Filled, TrancheStatus.Expired)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def open(self):
|
|
||||||
return not self.closed
|
|
||||||
|
|
||||||
|
|
||||||
class OrderTriggers:
|
class OrderTriggers:
|
||||||
instances: dict[OrderKey, 'OrderTriggers'] = {}
|
instances: dict[OrderKey, 'OrderTriggers'] = {}
|
||||||
|
|
||||||
def __init__(self, order: Order):
|
@staticmethod
|
||||||
|
async def create(order: Order):
|
||||||
|
triggers = await asyncio.gather(*[TrancheTrigger.create(order, tk) for tk in order.tranche_keys])
|
||||||
|
return OrderTriggers(order, triggers)
|
||||||
|
|
||||||
|
def __init__(self, order: Order, triggers: Sequence['TrancheTrigger']):
|
||||||
assert order.key not in OrderTriggers.instances
|
assert order.key not in OrderTriggers.instances
|
||||||
self.order = order
|
self.order = order
|
||||||
self.triggers = [TrancheTrigger(order, tk) for tk in self.order.tranche_keys]
|
self.triggers = triggers
|
||||||
OrderTriggers.instances[order.key] = self
|
OrderTriggers.instances[order.key] = self
|
||||||
log.debug(f'created OrderTriggers for {order.key}')
|
log.debug(f'created OrderTriggers for {order.key}')
|
||||||
|
|
||||||
@@ -256,6 +84,50 @@ class OrderTriggers:
|
|||||||
self.check_complete()
|
self.check_complete()
|
||||||
|
|
||||||
|
|
||||||
|
def start_trigger_updates():
|
||||||
|
"""
|
||||||
|
Called near the beginning of block handling to initialize any per-block trigger data structures
|
||||||
|
"""
|
||||||
|
TimeTrigger.update_all(current_clock.get().timestamp)
|
||||||
|
PriceLineTrigger.clear_data()
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Client Interface
|
||||||
|
#
|
||||||
|
|
||||||
|
async def update_balance_triggers(vault: str, token: str, balance: int):
|
||||||
|
updates = [bt.update(balance) for bt in BalanceTrigger.by_vault_token.get((vault, token), [])]
|
||||||
|
await asyncio.gather(*updates)
|
||||||
|
|
||||||
|
|
||||||
|
async def update_price_triggers(pool: OldPoolDict, price: dec):
|
||||||
|
price = price * dec(10) ** dec(-pool['decimals']) # adjust for pool decimals to get onchain price
|
||||||
|
price = float(price) # since we use SIMD operations to evaluate lines, we must convert to float
|
||||||
|
updates = [pt.update(price) for pt in PriceLineTrigger.by_pool.get(pool['address'], [])]
|
||||||
|
await asyncio.gather(*updates)
|
||||||
|
|
||||||
|
|
||||||
|
inflight_execution_requests: set[TrancheKey] = set()
|
||||||
|
|
||||||
|
async def end_trigger_updates():
|
||||||
|
"""
|
||||||
|
Call once after all updates have been handled. This updates the active_tranches array based on final trigger state.
|
||||||
|
"""
|
||||||
|
PriceLineTrigger.end_updates(current_clock.get().timestamp)
|
||||||
|
for tk in _dirty:
|
||||||
|
if _trigger_state.get(tk,0) == 0:
|
||||||
|
# all clear for execution. add to active list with any necessary proofs
|
||||||
|
active_tranches[tk] = PriceProof(0)
|
||||||
|
else:
|
||||||
|
# blocked by one or more triggers. delete from active list.
|
||||||
|
try:
|
||||||
|
del active_tranches[tk]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
_dirty.clear()
|
||||||
|
|
||||||
|
|
||||||
def close_order_and_disable_triggers(order: Order, final_state: SwapOrderState):
|
def close_order_and_disable_triggers(order: Order, final_state: SwapOrderState):
|
||||||
order.complete(final_state)
|
order.complete(final_state)
|
||||||
try:
|
try:
|
||||||
@@ -265,3 +137,387 @@ def close_order_and_disable_triggers(order: Order, final_state: SwapOrderState):
|
|||||||
else:
|
else:
|
||||||
triggers.disable()
|
triggers.disable()
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: we store the INVERSE of each trigger's value! this causes the test for "All True" to be comparison with 0
|
||||||
|
# instead of comparison with a set of 1's the correct size. By storing inverted values, the group does not
|
||||||
|
# need to know the number of child triggers, only that no falses have been reported.
|
||||||
|
_trigger_state: BlockDict[TrancheKey, int] = BlockDict('trig', str2key=TrancheKey.str2key, db=True)
|
||||||
|
_dirty:set[TrancheKey] = set()
|
||||||
|
|
||||||
|
|
||||||
|
class Trigger:
|
||||||
|
def __init__(self, position: int, tk: TrancheKey, value: bool):
|
||||||
|
"""
|
||||||
|
position is the bit position of the boolean result in the tranche's constraint bitfield.
|
||||||
|
"""
|
||||||
|
self.position = position
|
||||||
|
self.tk = tk
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def value(self):
|
||||||
|
return _trigger_state.get(self.tk,0) & (1 << self.position) == 0 # NOTE: inverted
|
||||||
|
|
||||||
|
@value.setter
|
||||||
|
def value(self, value):
|
||||||
|
if value != self.value:
|
||||||
|
_dirty.add(self.tk)
|
||||||
|
if not value: # this conditional is inverted
|
||||||
|
_trigger_state[self.tk] |= 1 << self.position # set
|
||||||
|
else:
|
||||||
|
_trigger_state[self.tk] &= ~(1 << self.position) # clear
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def remove(self): ...
|
||||||
|
|
||||||
|
|
||||||
|
async def has_funds(tk: TrancheKey):
|
||||||
|
# log.debug(f'has funds? {tk.vault}')
|
||||||
|
order = Order.of(tk)
|
||||||
|
balances = vault_balances.get(tk.vault, {})
|
||||||
|
token_addr = order.status.order.tokenIn
|
||||||
|
token_balance = balances.get(token_addr)
|
||||||
|
if token_balance is None:
|
||||||
|
# unknown balance
|
||||||
|
token_balance = balances[token_addr] = await ERC20(token_addr).balanceOf(tk.vault)
|
||||||
|
log.debug(f'queried token balance {token_addr}.balanceOf({tk.vault}) = {token_balance}')
|
||||||
|
await adjust_balance(tk.vault, token_addr, token_balance)
|
||||||
|
return await input_amount_is_sufficient(order, token_balance)
|
||||||
|
|
||||||
|
|
||||||
|
async def input_amount_is_sufficient(order, token_balance):
|
||||||
|
if order.amount_is_input:
|
||||||
|
return token_balance >= order.status.order.minFillAmount
|
||||||
|
# amount is an output amount, so we need to know the price
|
||||||
|
price = pool_prices.get(order.pool_address)
|
||||||
|
if price is None:
|
||||||
|
return token_balance > 0 # we don't know the price so we allow any nonzero amount to be sufficient
|
||||||
|
pool = await get_pool(order.pool_address)
|
||||||
|
inverted = order.order.tokenIn != pool['base']
|
||||||
|
minimum = dec(order.min_fill_amount)*price if inverted else dec(order.min_fill_amount)/price
|
||||||
|
log.debug(f'order minimum amount is {order.min_fill_amount} '+ ("input" if order.amount_is_input else f"output @ {price} = {minimum} ")+f'< {token_balance} balance')
|
||||||
|
return token_balance > minimum
|
||||||
|
|
||||||
|
|
||||||
|
class BalanceTrigger (Trigger):
|
||||||
|
by_vault_token: dict[tuple[str,str],set['BalanceTrigger']] = defaultdict(set)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def create(tk: TrancheKey):
|
||||||
|
value = await has_funds(tk)
|
||||||
|
return BalanceTrigger(tk, value)
|
||||||
|
|
||||||
|
def __init__(self, tk: TrancheKey, value: bool):
|
||||||
|
super().__init__(0, tk, value)
|
||||||
|
self.order = Order.of(self.tk)
|
||||||
|
self.vault_token = self.tk.vault, self.order.status.order.tokenIn
|
||||||
|
log.debug(f'adding balanc trigger {id(self)}')
|
||||||
|
BalanceTrigger.by_vault_token[self.vault_token].add(self)
|
||||||
|
|
||||||
|
async def update(self, balance):
|
||||||
|
self.value = await input_amount_is_sufficient(self.order, balance)
|
||||||
|
|
||||||
|
def remove(self):
|
||||||
|
log.debug(f'removing balanc trigger {id(self)}')
|
||||||
|
try:
|
||||||
|
BalanceTrigger.by_vault_token[self.vault_token].remove(self)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TimeTrigger (Trigger):
|
||||||
|
|
||||||
|
all = SortedList(key=lambda t: (t.time, 0 if t.is_start else 1)) # start before end even if the same time
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create(is_start: bool, tk: TrancheKey, time: int, time_now: int = None):
|
||||||
|
if is_start and time == DISTANT_PAST or not is_start and time == DISTANT_FUTURE:
|
||||||
|
return None
|
||||||
|
if time_now is None:
|
||||||
|
time_now = current_clock.get().timestamp
|
||||||
|
return TimeTrigger(is_start, tk, time, time_now)
|
||||||
|
|
||||||
|
def __init__(self, is_start: bool, tk: TrancheKey, time: int, time_now: int):
|
||||||
|
triggered = time_now >= time
|
||||||
|
super().__init__(1 if is_start else 2, tk, triggered is is_start)
|
||||||
|
self.is_start = is_start
|
||||||
|
self._time = time
|
||||||
|
self.active = not triggered
|
||||||
|
if self.active:
|
||||||
|
TimeTrigger.all.add(self)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def time(self):
|
||||||
|
return self._time
|
||||||
|
|
||||||
|
@time.setter
|
||||||
|
def time(self, time: int):
|
||||||
|
self.set_time(time, current_clock.get().timestamp)
|
||||||
|
|
||||||
|
def set_time(self, time: int, time_now: int):
|
||||||
|
self._time = time
|
||||||
|
self.active = (time_now > time) is self.is_start
|
||||||
|
TimeTrigger.all.remove(self)
|
||||||
|
TimeTrigger.all.add(self)
|
||||||
|
|
||||||
|
def update(self):
|
||||||
|
# called when our self.time has been reached
|
||||||
|
self.value = self.is_start
|
||||||
|
self.active = False
|
||||||
|
# we are popped off the stack by update_all()
|
||||||
|
|
||||||
|
def remove(self):
|
||||||
|
if self.active:
|
||||||
|
TimeTrigger.all.remove(self)
|
||||||
|
self.active = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def update_all(time):
|
||||||
|
while TimeTrigger.all and TimeTrigger.all[0].time <= time:
|
||||||
|
# todo this doesnt work across reorgs. we need to keep a BlockState cursor of the last time handled,
|
||||||
|
# then activate any time triggers from that past time through the present. time triggers may only
|
||||||
|
# be popped off the stack after their times are older than the latest finalized block
|
||||||
|
# todo what if an order is placed on a reorg'd branch but never hits main branch? we have triggers going
|
||||||
|
# for a nonexistent order!
|
||||||
|
t = TimeTrigger.all.pop(0)
|
||||||
|
t.update()
|
||||||
|
|
||||||
|
|
||||||
|
class PriceLineTrigger (Trigger):
|
||||||
|
by_pool: dict[str,set['PriceLineTrigger']] = defaultdict(set)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def create(tk: TrancheKey, line: Line, is_min: bool, is_barrier: bool):
|
||||||
|
if line.intercept == 0 and line.slope == 0:
|
||||||
|
return None # no constraint (deactivated)
|
||||||
|
pool = await get_pool(Order.of(tk).pool_address)
|
||||||
|
await ensure_pool_price(pool)
|
||||||
|
price_now = pool_prices[pool['address']]
|
||||||
|
return PriceLineTrigger(tk, line, is_min, is_barrier, price_now)
|
||||||
|
|
||||||
|
def __init__(self, tk: TrancheKey, line: Line, is_min: bool, is_barrier: bool, price_now: dec):
|
||||||
|
if is_barrier:
|
||||||
|
log.warning('Barriers not supported')
|
||||||
|
price_above = price_now > line.intercept + line.slope * current_clock.get().timestamp
|
||||||
|
super().__init__(3 if is_min else 4, tk, is_min is price_above)
|
||||||
|
self.line = line
|
||||||
|
self.is_min = is_min
|
||||||
|
self.is_barrier = is_barrier
|
||||||
|
self.pool_address = Order.of(tk).pool_address
|
||||||
|
self.index: Optional[int] = None
|
||||||
|
PriceLineTrigger.by_pool[self.pool_address].add(self)
|
||||||
|
|
||||||
|
# lines that need evaluating add their data to these arrays, which are then sent to SIMD for evaluation. each
|
||||||
|
# array must always have the same size as the others.
|
||||||
|
y = []
|
||||||
|
m = []
|
||||||
|
b = []
|
||||||
|
triggers = [] # 1-for-1 with line_data
|
||||||
|
triggers_set = set()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def clear_data():
|
||||||
|
PriceLineTrigger.y.clear()
|
||||||
|
PriceLineTrigger.m.clear()
|
||||||
|
PriceLineTrigger.b.clear()
|
||||||
|
PriceLineTrigger.triggers.clear()
|
||||||
|
PriceLineTrigger.triggers_set.clear()
|
||||||
|
|
||||||
|
def update(self, price: float):
|
||||||
|
if self not in PriceLineTrigger.triggers_set:
|
||||||
|
self.index = len(PriceLineTrigger.y)
|
||||||
|
PriceLineTrigger.y.append(price)
|
||||||
|
PriceLineTrigger.m.append(self.line.slope)
|
||||||
|
PriceLineTrigger.b.append(self.line.intercept)
|
||||||
|
PriceLineTrigger.triggers.append(self)
|
||||||
|
PriceLineTrigger.triggers_set.add(self)
|
||||||
|
else:
|
||||||
|
# update an existing equation's price
|
||||||
|
PriceLineTrigger.y[self.index] = price
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def end_updates(time: int):
|
||||||
|
# here we use numpy to compute all dirty lines using SIMD
|
||||||
|
y, m, b = map(np.array, (PriceLineTrigger.y, PriceLineTrigger.m, PriceLineTrigger.b))
|
||||||
|
line_value = m * time + b
|
||||||
|
price_diff = y - line_value
|
||||||
|
for t, pd in zip(PriceLineTrigger.triggers, price_diff):
|
||||||
|
t.handle_result(pd)
|
||||||
|
|
||||||
|
def handle_result(self, price_diff: float):
|
||||||
|
value = self.is_min and price_diff > 0 or not self.is_min and price_diff < 0
|
||||||
|
if not self.is_barrier or value: # barriers that are False do not update their values to False
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
def remove(self):
|
||||||
|
PriceLineTrigger.by_pool[self.pool_address].remove(self)
|
||||||
|
|
||||||
|
|
||||||
|
async def activate_orders():
|
||||||
|
log.debug('activating orders')
|
||||||
|
# this is a state init callback, called only once after the state has been loaded from the db or created fresh
|
||||||
|
keys = list(Order.open_orders)
|
||||||
|
orders = [Order.of(key) for key in keys]
|
||||||
|
for order in orders:
|
||||||
|
# setup triggers
|
||||||
|
await activate_order(order) # too many to really parallelize, and it's startup anyway
|
||||||
|
log.debug(f'activated {len(keys)} orders')
|
||||||
|
|
||||||
|
|
||||||
|
async def activate_order(order: Order):
|
||||||
|
"""
|
||||||
|
Call this to enable triggers on an order which is already in the state.
|
||||||
|
"""
|
||||||
|
address = pool_address(order.status.order)
|
||||||
|
pool = await get_pool(address)
|
||||||
|
await ensure_pool_price(pool)
|
||||||
|
triggers = await OrderTriggers.create(order)
|
||||||
|
if triggers.closed:
|
||||||
|
log.debug(f'order {order.key} was immediately closed')
|
||||||
|
close_order_and_disable_triggers(order, SwapOrderState.Filled if order.remaining == 0 or order.remaining <= order.min_fill_amount else SwapOrderState.Expired)
|
||||||
|
|
||||||
|
|
||||||
|
class TrancheState (Enum):
|
||||||
|
Early = auto() # first time trigger hasnt happened yet
|
||||||
|
Active = auto() # we are inside the time window and checking prices
|
||||||
|
Filled = auto() # tranche has no more available amount
|
||||||
|
Expired = auto() # time deadline has past and this tranche cannot be filled
|
||||||
|
Error = auto() # the tranche was slashed and killed due to reverts during execute()
|
||||||
|
|
||||||
|
|
||||||
|
class TrancheTrigger:
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def create(order: Order, tk: TrancheKey) -> 'TrancheTrigger':
|
||||||
|
time = current_clock.get().timestamp
|
||||||
|
tranche = order.order.tranches[tk.tranche_index]
|
||||||
|
ts = order.status.trancheStatus[tk.tranche_index]
|
||||||
|
balance_trigger = await BalanceTrigger.create(tk)
|
||||||
|
activation_trigger = TimeTrigger.create(True, tk, ts.activationTime, time)
|
||||||
|
expiration_trigger = TimeTrigger.create(False, tk, ts.endTime, time)
|
||||||
|
if tranche.marketOrder:
|
||||||
|
min_trigger = max_trigger = None
|
||||||
|
else:
|
||||||
|
min_trigger, max_trigger = await asyncio.gather(
|
||||||
|
PriceLineTrigger.create(tk, tranche.minLine, True, tranche.minIsBarrier),
|
||||||
|
PriceLineTrigger.create(tk, tranche.maxLine, True, tranche.maxIsBarrier))
|
||||||
|
return TrancheTrigger(order, tk, balance_trigger, activation_trigger, expiration_trigger, min_trigger, max_trigger)
|
||||||
|
|
||||||
|
def __init__(self, order: Order, tk: TrancheKey,
|
||||||
|
balance_trigger: BalanceTrigger,
|
||||||
|
activation_trigger: Optional[TimeTrigger],
|
||||||
|
expiration_trigger: Optional[TimeTrigger],
|
||||||
|
min_trigger: Optional[PriceLineTrigger],
|
||||||
|
max_trigger: Optional[PriceLineTrigger],
|
||||||
|
):
|
||||||
|
assert order.key.vault == tk.vault and order.key.order_index == tk.order_index
|
||||||
|
tranche = order.order.tranches[tk.tranche_index]
|
||||||
|
|
||||||
|
self.order = order
|
||||||
|
self.tk = tk
|
||||||
|
|
||||||
|
self.balance_trigger = balance_trigger
|
||||||
|
self.activation_trigger = activation_trigger
|
||||||
|
self.expiration_trigger = expiration_trigger
|
||||||
|
self.min_trigger = min_trigger
|
||||||
|
self.max_trigger = max_trigger
|
||||||
|
|
||||||
|
self.slippage = tranche.minLine.intercept if tranche.marketOrder else 0
|
||||||
|
self.slash_count = 0
|
||||||
|
|
||||||
|
tranche_remaining = tranche.fraction_of(order.amount) - order.tranche_filled(self.tk.tranche_index)
|
||||||
|
self.status = \
|
||||||
|
TrancheState.Filled if tranche_remaining == 0 or tranche_remaining < self.order.min_fill_amount else \
|
||||||
|
TrancheState.Expired if self.expiration_trigger is not None and not self.expiration_trigger else \
|
||||||
|
TrancheState.Early if self.activation_trigger is None and not self.activation_trigger else \
|
||||||
|
TrancheState.Active
|
||||||
|
_dirty.add(tk)
|
||||||
|
log.debug(f'Tranche {tk} initial status {self.status} {self}')
|
||||||
|
|
||||||
|
|
||||||
|
def fill(self, _amount_in, _amount_out ):
|
||||||
|
remaining = self.order.tranche_remaining(self.tk.tranche_index)
|
||||||
|
filled = remaining == 0 or remaining < self.order.min_fill_amount
|
||||||
|
if filled:
|
||||||
|
order_log.debug(f'tranche filled {self.tk}')
|
||||||
|
self.status = TrancheState.Filled
|
||||||
|
self.disable()
|
||||||
|
else:
|
||||||
|
order_log.debug(f'tranche part-filled {self.tk} in:{_amount_in} out:{_amount_out} remaining:{remaining}')
|
||||||
|
self.slash_count = 0 # reset slash count
|
||||||
|
return filled
|
||||||
|
|
||||||
|
def expire(self):
|
||||||
|
order_log.debug(f'tranche expired {self.tk}')
|
||||||
|
self.status = TrancheState.Expired
|
||||||
|
self.disable()
|
||||||
|
|
||||||
|
def kill(self):
|
||||||
|
order_log.warning(f'tranche KILLED {self.tk}')
|
||||||
|
self.status = TrancheState.Error
|
||||||
|
self.disable()
|
||||||
|
|
||||||
|
def slash(self):
|
||||||
|
# slash() is called when an execute() transaction on this tranche reverts without a recognized reason.
|
||||||
|
self.slash_count += 1
|
||||||
|
log.debug(f'slashed tranche x{self.slash_count} {self.tk}')
|
||||||
|
if self.slash_count >= config.slash_kill_count:
|
||||||
|
self.kill()
|
||||||
|
else:
|
||||||
|
delay = round(config.slash_delay_base * config.slash_delay_mul ** (self.slash_count-1))
|
||||||
|
self.deactivate(timestamp()+delay)
|
||||||
|
|
||||||
|
def deactivate(self, until):
|
||||||
|
# Temporarily deactivate the tranche due to a rate limit. Use disable() to permanently halt the trigger.
|
||||||
|
log.debug(f'deactivating tranche {self.tk} until {from_timestamp(until)}')
|
||||||
|
if self.activation_trigger is None:
|
||||||
|
self.activation_trigger = TimeTrigger.create(True, self.tk, until)
|
||||||
|
else:
|
||||||
|
self.activation_trigger.time = until
|
||||||
|
|
||||||
|
def disable(self):
|
||||||
|
# permanently stop this trigger and deconstruct
|
||||||
|
self.balance_trigger.remove()
|
||||||
|
if self.activation_trigger is not None:
|
||||||
|
self.activation_trigger.remove()
|
||||||
|
if self.expiration_trigger is not None:
|
||||||
|
self.expiration_trigger.remove()
|
||||||
|
if self.min_trigger is not None:
|
||||||
|
self.min_trigger.remove()
|
||||||
|
if self.max_trigger is not None:
|
||||||
|
self.max_trigger.remove()
|
||||||
|
try:
|
||||||
|
del _trigger_state[self.tk]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
_dirty.remove(self.tk)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
del active_tranches[self.tk]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self):
|
||||||
|
return self.status in (TrancheState.Filled, TrancheState.Expired, TrancheState.Error)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def open(self):
|
||||||
|
return not self.closed
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
trigs = []
|
||||||
|
if self.balance_trigger is not None:
|
||||||
|
trigs.append(f'balance {self.balance_trigger.value}')
|
||||||
|
if self.activation_trigger is not None:
|
||||||
|
trigs.append(f'activation {self.activation_trigger.value}')
|
||||||
|
if self.expiration_trigger is not None:
|
||||||
|
trigs.append(f'expiration {self.expiration_trigger.value}')
|
||||||
|
if self.min_trigger is not None:
|
||||||
|
trigs.append(f'min line {self.min_trigger.value}')
|
||||||
|
if self.max_trigger is not None:
|
||||||
|
trigs.append(f'max line {self.max_trigger.value}')
|
||||||
|
return f'TrancheTrigger[{",".join(str(t) for t in trigs)}]'
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ import logging
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from sqlalchemy.exc import NoResultFound
|
|
||||||
from web3.exceptions import ContractLogicError
|
from web3.exceptions import ContractLogicError
|
||||||
from web3.types import EventData
|
from web3.types import EventData
|
||||||
|
|
||||||
@@ -11,12 +10,11 @@ from dexorder import dec, ADDRESS_0, from_timestamp, db
|
|||||||
from dexorder.addrmeta import address_metadata
|
from dexorder.addrmeta import address_metadata
|
||||||
from dexorder.base.chain import current_chain
|
from dexorder.base.chain import current_chain
|
||||||
from dexorder.base.orderlib import Exchange
|
from dexorder.base.orderlib import Exchange
|
||||||
|
from dexorder.blocks import get_block_timestamp
|
||||||
from dexorder.blockstate import BlockDict
|
from dexorder.blockstate import BlockDict
|
||||||
from dexorder.blockstate.blockdata import K, V
|
from dexorder.blockstate.blockdata import K, V
|
||||||
from dexorder.blocks import get_block_timestamp
|
|
||||||
from dexorder.database.model import Pool
|
from dexorder.database.model import Pool
|
||||||
from dexorder.database.model.pool import OldPoolDict
|
from dexorder.database.model.pool import OldPoolDict
|
||||||
from dexorder.metadata import is_generating_metadata
|
|
||||||
from dexorder.tokens import get_token
|
from dexorder.tokens import get_token
|
||||||
from dexorder.uniswap import UniswapV3Pool, uniswapV3_pool_address
|
from dexorder.uniswap import UniswapV3Pool, uniswapV3_pool_address
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,16 @@ class BlockProgressor(metaclass=ABCMeta):
|
|||||||
# items are (callback, event, log_filter). The callback is invoked with web3 EventData for every detected event
|
# items are (callback, event, log_filter). The callback is invoked with web3 EventData for every detected event
|
||||||
self.events:list[tuple[Callable[[EventData],Maywaitable[None]],ContractEvents,dict]] = []
|
self.events:list[tuple[Callable[[EventData],Maywaitable[None]],ContractEvents,dict]] = []
|
||||||
# these callbacks are invoked after every block and also every second if there wasnt a block
|
# these callbacks are invoked after every block and also every second if there wasnt a block
|
||||||
self.postprocess_cbs:list[Callable[[],Maywaitable[None]]] = []
|
self.callbacks:list[tuple[Callable[[],Maywaitable[None]],bool]] = []
|
||||||
|
self.combined = [] # a mix of both event handlers and callbacks
|
||||||
|
|
||||||
|
def add_callback(self, callback: Callable[[], Maywaitable[None]], trigger_on_timer=True):
|
||||||
|
"""
|
||||||
|
If trigger_on_timer is True, then the callback is also invoked on a regular timer if there is a lull in blocks.
|
||||||
|
"""
|
||||||
|
item = (callback, trigger_on_timer)
|
||||||
|
self.callbacks.append(item)
|
||||||
|
self.combined.append(item)
|
||||||
|
|
||||||
def add_event_trigger(self,
|
def add_event_trigger(self,
|
||||||
# callback takes either a single event if multi=False, or if multi=True then a list of all events in the processing range
|
# callback takes either a single event if multi=False, or if multi=True then a list of all events in the processing range
|
||||||
@@ -50,7 +59,9 @@ class BlockProgressor(metaclass=ABCMeta):
|
|||||||
for e in events:
|
for e in events:
|
||||||
await maywait(func(e))
|
await maywait(func(e))
|
||||||
cb = callback if event is None or multi else functools.partial(_map, callback)
|
cb = callback if event is None or multi else functools.partial(_map, callback)
|
||||||
self.events.append((cb, event, log_filter))
|
item = (cb, event, log_filter)
|
||||||
|
self.events.append(item)
|
||||||
|
self.combined.append(item)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def run(self):
|
def run(self):
|
||||||
@@ -61,24 +72,29 @@ class BlockProgressor(metaclass=ABCMeta):
|
|||||||
if w3 is None:
|
if w3 is None:
|
||||||
w3 = current_w3.get()
|
w3 = current_w3.get()
|
||||||
batches = []
|
batches = []
|
||||||
for callback, event, log_filter in self.events:
|
for entry in self.combined:
|
||||||
if log_filter is None:
|
if len(entry) == 2:
|
||||||
batches.append((None, callback, event, None))
|
# plain callback
|
||||||
|
callback, on_timer = entry
|
||||||
|
batches.append((None, callback, None, None))
|
||||||
else:
|
else:
|
||||||
lf = dict(log_filter)
|
# event callback
|
||||||
lf['fromBlock'] = from_height
|
callback, event, log_filter = entry
|
||||||
lf['toBlock'] = to_height
|
if log_filter is None:
|
||||||
get_logs = w3.eth.get_logs(lf)
|
batches.append((None, callback, event, None))
|
||||||
if not config.parallel_logevent_queries:
|
else:
|
||||||
get_logs = await get_logs
|
lf = dict(log_filter)
|
||||||
batches.append((get_logs, callback, event, lf))
|
lf['fromBlock'] = from_height
|
||||||
for callback in self.postprocess_cbs:
|
lf['toBlock'] = to_height
|
||||||
batches.append((None, callback, None, None))
|
get_logs = w3.eth.get_logs(lf)
|
||||||
|
if not config.parallel_logevent_queries:
|
||||||
|
get_logs = await get_logs
|
||||||
|
batches.append((get_logs, callback, event, lf))
|
||||||
return batches
|
return batches
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def invoke_callbacks(batches, chain=None):
|
async def invoke_callback_batches(batches, chain=None):
|
||||||
if chain is None:
|
if chain is None:
|
||||||
chain = current_chain.get()
|
chain = current_chain.get()
|
||||||
# logevent callbacks
|
# logevent callbacks
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from dexorder.blockstate.branch import Branch
|
|||||||
from dexorder.blockstate.diff import DiffEntryItem
|
from dexorder.blockstate.diff import DiffEntryItem
|
||||||
from dexorder.blockstate.fork import current_fork, Fork
|
from dexorder.blockstate.fork import current_fork, Fork
|
||||||
from dexorder.progressor import BlockProgressor
|
from dexorder.progressor import BlockProgressor
|
||||||
from dexorder.transaction import create_and_send_transactions
|
from dexorder.transactions import create_and_send_transactions
|
||||||
from dexorder.util import hexstr, hexbytes
|
from dexorder.util import hexstr, hexbytes
|
||||||
from dexorder.util.async_util import maywait, Maywaitable
|
from dexorder.util.async_util import maywait, Maywaitable
|
||||||
from dexorder.util.shutdown import fatal
|
from dexorder.util.shutdown import fatal
|
||||||
@@ -270,31 +270,34 @@ class BlockStateRunner(BlockProgressor):
|
|||||||
block = await get_block(blockhash)
|
block = await get_block(blockhash)
|
||||||
current_block.set(block)
|
current_block.set(block)
|
||||||
bloom = BloomFilter(int.from_bytes(hexbytes(block.data['logsBloom'])))
|
bloom = BloomFilter(int.from_bytes(hexbytes(block.data['logsBloom'])))
|
||||||
for callback, event, log_filter in self.events:
|
for item in self.combined:
|
||||||
if log_filter is None:
|
if len(item) == 2:
|
||||||
batches.append((None, callback, event, None))
|
callback, on_timer = item
|
||||||
|
batches.append((None, callback, None, None))
|
||||||
else:
|
else:
|
||||||
lf = dict(log_filter)
|
callback, event, log_filter = item
|
||||||
lf['blockHash'] = hexstr(block.hash)
|
if log_filter is None:
|
||||||
has_logs = any(bytes(hexbytes(topic)) in bloom for topic in lf['topics'])
|
batches.append((None, callback, event, None))
|
||||||
# log.debug(f'has {event.__class__.__name__}? {has_logs}')
|
|
||||||
if not has_logs:
|
|
||||||
get_logs = None
|
|
||||||
else:
|
else:
|
||||||
# log.debug(f'has {event.__class__.__name__}')
|
lf = dict(log_filter)
|
||||||
get_logs = w3.eth.get_logs(lf)
|
lf['blockHash'] = hexstr(block.hash)
|
||||||
if not config.parallel_logevent_queries:
|
has_logs = any(bytes(hexbytes(topic)) in bloom for topic in lf['topics'])
|
||||||
get_logs = await get_logs
|
# log.debug(f'has {event.__class__.__name__}? {has_logs}')
|
||||||
batches.append((get_logs, callback, event, log_filter))
|
if not has_logs:
|
||||||
for callback in self.postprocess_cbs:
|
get_logs = None
|
||||||
batches.append((None, callback, None, None))
|
else:
|
||||||
|
# log.debug(f'has {event.__class__.__name__}')
|
||||||
|
get_logs = w3.eth.get_logs(lf)
|
||||||
|
if not config.parallel_logevent_queries:
|
||||||
|
get_logs = await get_logs
|
||||||
|
batches.append((get_logs, callback, event, log_filter))
|
||||||
|
|
||||||
# set up for callbacks
|
# set up for callbacks
|
||||||
current_pub.set(lambda room, evnt, *args: pubs.append((room, evnt, args))) # used by handle_vault_created
|
current_pub.set(lambda room, evnt, *args: pubs.append((room, evnt, args))) # used by handle_vault_created
|
||||||
if not self.state_initialized:
|
if not self.state_initialized:
|
||||||
await self.do_state_init_cbs()
|
await self.do_state_init_cbs()
|
||||||
# log.debug(f'invoking callbacks with fork {current_fork.get()}')
|
# log.debug(f'invoking callbacks with fork {current_fork.get()}')
|
||||||
await self.invoke_callbacks(batches)
|
await self.invoke_callback_batches(batches)
|
||||||
|
|
||||||
# todo
|
# todo
|
||||||
# IMPORTANT! check for a reorg and generate a reorg diff list. the diff list we need is the union of the set of keys touched by either
|
# IMPORTANT! check for a reorg and generate a reorg diff list. the diff list we need is the union of the set of keys touched by either
|
||||||
@@ -358,9 +361,10 @@ class BlockStateRunner(BlockProgressor):
|
|||||||
session = db.session
|
session = db.session
|
||||||
session.begin()
|
session.begin()
|
||||||
try:
|
try:
|
||||||
for callback in self.postprocess_cbs:
|
for callback, on_timer in self.callbacks:
|
||||||
# noinspection PyCallingNonCallable
|
if on_timer:
|
||||||
await maywait(callback())
|
# noinspection PyCallingNonCallable
|
||||||
|
await maywait(callback())
|
||||||
except BaseException:
|
except BaseException:
|
||||||
session.rollback()
|
session.rollback()
|
||||||
raise
|
raise
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import logging
|
|||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from eth_abi.exceptions import InsufficientDataBytes
|
from eth_abi.exceptions import InsufficientDataBytes
|
||||||
from web3.exceptions import ContractLogicError, BadFunctionCallOutput
|
from web3.exceptions import BadFunctionCallOutput
|
||||||
|
|
||||||
from dexorder import ADDRESS_0, config, db
|
from dexorder import ADDRESS_0, config, db
|
||||||
from dexorder.addrmeta import address_metadata
|
from dexorder.addrmeta import address_metadata
|
||||||
@@ -11,7 +11,6 @@ from dexorder.contract import ERC20, ContractProxy, CONTRACT_ERRORS
|
|||||||
from dexorder.database.model import Token
|
from dexorder.database.model import Token
|
||||||
from dexorder.database.model.token import OldTokenDict
|
from dexorder.database.model.token import OldTokenDict
|
||||||
from dexorder.metadata import get_metadata
|
from dexorder.metadata import get_metadata
|
||||||
from dexorder.util import hexstr
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -1,21 +1,22 @@
|
|||||||
import logging
|
import logging
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from typing import Optional
|
from dataclasses import dataclass
|
||||||
|
from typing import Union, Optional
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from sqlalchemy import select
|
from sqlalchemy import select
|
||||||
from web3.exceptions import TransactionNotFound
|
from web3.exceptions import TransactionNotFound, ContractPanicError, ContractLogicError
|
||||||
|
|
||||||
from dexorder import db, current_w3, Account
|
from dexorder import db, current_w3, Account
|
||||||
from dexorder.base import TransactionReceiptDict
|
from dexorder.base import TransactionReceiptDict, TransactionRequest, transaction_request_registry
|
||||||
from dexorder.base.chain import current_chain
|
from dexorder.base.chain import current_chain
|
||||||
from dexorder.base.order import TransactionRequest
|
from dexorder.base.order import TrancheKey, OrderKey
|
||||||
|
from dexorder.base.orderlib import PriceProof
|
||||||
from dexorder.blockstate import BlockDict
|
from dexorder.blockstate import BlockDict
|
||||||
from dexorder.blockstate.diff import DiffEntryItem
|
from dexorder.blockstate.diff import DiffEntryItem
|
||||||
from dexorder.blockstate.fork import current_fork, Fork
|
from dexorder.blockstate.fork import current_fork, Fork
|
||||||
from dexorder.contract.contract_proxy import ContractTransaction
|
from dexorder.contract.contract_proxy import ContractTransaction
|
||||||
from dexorder.database.model.transaction import TransactionJob, TransactionJobState
|
from dexorder.database.model.transaction import TransactionJob, TransactionJobState
|
||||||
from dexorder.util.shutdown import fatal
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -38,7 +39,47 @@ class TransactionHandler:
|
|||||||
async def complete_transaction(self, job: TransactionJob) -> None: ...
|
async def complete_transaction(self, job: TransactionJob) -> None: ...
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TrancheExecutionRequest (TransactionRequest):
|
||||||
|
TYPE = 'te'
|
||||||
|
|
||||||
|
# type='te' for tranche execution
|
||||||
|
vault: str
|
||||||
|
order_index: int
|
||||||
|
tranche_index: int
|
||||||
|
price_proof: Union[None,dict,tuple[int]]
|
||||||
|
|
||||||
|
def __init__(self, vault: str, order_index: int, tranche_index: int, price_proof: Union[None,dict,tuple[int]], **_):
|
||||||
|
super().__init__(TrancheExecutionRequest.TYPE)
|
||||||
|
self.vault = vault
|
||||||
|
self.order_index = order_index
|
||||||
|
self.tranche_index = tranche_index
|
||||||
|
self.price_proof = price_proof
|
||||||
|
|
||||||
|
@property
|
||||||
|
def order_key(self):
|
||||||
|
return OrderKey(self.vault, self.order_index)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tranche_key(self):
|
||||||
|
return TrancheKey(self.vault, self.order_index, self.tranche_index)
|
||||||
|
|
||||||
|
# Must register the class for deserialization
|
||||||
|
transaction_request_registry[TrancheExecutionRequest.TYPE] = TrancheExecutionRequest
|
||||||
|
|
||||||
|
|
||||||
|
def new_tranche_execution_request(tk: TrancheKey, proof: Optional[PriceProof]=None) -> TrancheExecutionRequest:
|
||||||
|
if proof is None:
|
||||||
|
proof = PriceProof(0)
|
||||||
|
return TrancheExecutionRequest(tk.vault, tk.order_index, tk.tranche_index, proof.dump())
|
||||||
|
|
||||||
|
|
||||||
def submit_transaction_request(tr: TransactionRequest):
|
def submit_transaction_request(tr: TransactionRequest):
|
||||||
|
"""
|
||||||
|
Once a transaction request has been submitted, it is this module's responsibility to see that it gets mined, at
|
||||||
|
which point `tr.complete_transaction()` is called with the transaction receipt.
|
||||||
|
The building of a transaction can also fail,
|
||||||
|
"""
|
||||||
job = TransactionJob(id=uuid4(), chain=current_chain.get(), height=current_fork.get().height,
|
job = TransactionJob(id=uuid4(), chain=current_chain.get(), height=current_fork.get().height,
|
||||||
state=TransactionJobState.Requested, request=tr)
|
state=TransactionJobState.Requested, request=tr)
|
||||||
db.session.add(job)
|
db.session.add(job)
|
||||||
@@ -58,37 +99,46 @@ async def create_and_send_transactions():
|
|||||||
# todo remove bad request?
|
# todo remove bad request?
|
||||||
log.warning('ignoring transaction request with bad type '
|
log.warning('ignoring transaction request with bad type '
|
||||||
f'"{job.request.type}": ' + ",".join(TransactionHandler.instances.keys()))
|
f'"{job.request.type}": ' + ",".join(TransactionHandler.instances.keys()))
|
||||||
else:
|
return
|
||||||
|
try:
|
||||||
ctx: ContractTransaction = await handler.build_transaction(job.id, job.request)
|
ctx: ContractTransaction = await handler.build_transaction(job.id, job.request)
|
||||||
if ctx is None:
|
except (ContractPanicError, ContractLogicError):
|
||||||
log.warning(f'unable to send transaction for job {job.id}')
|
# these errors can be thrown immediately when the tx is tested for gas
|
||||||
return
|
log.warning(f'failed to build transaction request for {job.request.__class__.__name__} {job.id}')
|
||||||
w3 = current_w3.get()
|
job.state = TransactionJobState.Error
|
||||||
account = Account.get_named(handler.tag)
|
db.session.add(job)
|
||||||
if account is None:
|
await handler.complete_transaction(job)
|
||||||
account = Account.get()
|
return
|
||||||
if account is None:
|
except Exception as x:
|
||||||
log.error(f'No account available for transaction request type "{handler.tag}"')
|
log.warning(f'unable to send transaction for job {job.id}', exc_info=x)
|
||||||
continue
|
return
|
||||||
await ctx.sign(account)
|
w3 = current_w3.get()
|
||||||
job.state = TransactionJobState.Signed
|
account = Account.get_named(handler.tag)
|
||||||
job.tx_id = ctx.id_bytes
|
if account is None:
|
||||||
job.tx_data = ctx.data
|
account = Account.get()
|
||||||
|
if account is None:
|
||||||
|
log.error(f'No account available for transaction request type "{handler.tag}"')
|
||||||
|
continue
|
||||||
|
await ctx.sign(account)
|
||||||
|
job.state = TransactionJobState.Signed
|
||||||
|
job.tx_id = ctx.id_bytes
|
||||||
|
job.tx_data = ctx.data
|
||||||
|
db.session.add(job)
|
||||||
|
log.info(f'servicing transaction request {job.request.__class__.__name__} {job.id} with tx {ctx.id}')
|
||||||
|
try:
|
||||||
|
sent = await w3.eth.send_raw_transaction(job.tx_data)
|
||||||
|
except:
|
||||||
|
log.exception(f'Failure sending transaction for job {job.id}')
|
||||||
|
# todo pager
|
||||||
|
# todo send state unknown!
|
||||||
|
else:
|
||||||
|
assert sent == job.tx_id
|
||||||
|
job.state = TransactionJobState.Sent
|
||||||
db.session.add(job)
|
db.session.add(job)
|
||||||
log.info(f'servicing transaction request {job.request.__class__.__name__} {job.id} with tx {ctx.id}')
|
|
||||||
try:
|
|
||||||
sent = await w3.eth.send_raw_transaction(job.tx_data)
|
|
||||||
except:
|
|
||||||
log.exception(f'Failure sending transaction for job {job.id}')
|
|
||||||
# todo pager
|
|
||||||
# todo send state unknown!
|
|
||||||
else:
|
|
||||||
assert sent == job.tx_id
|
|
||||||
job.state = TransactionJobState.Sent
|
|
||||||
db.session.add(job)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_transaction_receipts():
|
async def handle_transaction_receipts():
|
||||||
|
log.debug('handle_transaction_receipts')
|
||||||
w3 = current_w3.get()
|
w3 = current_w3.get()
|
||||||
for job in db.session.query(TransactionJob).filter(
|
for job in db.session.query(TransactionJob).filter(
|
||||||
TransactionJob.chain == current_chain.get(),
|
TransactionJob.chain == current_chain.get(),
|
||||||
@@ -116,6 +166,7 @@ async def handle_transaction_receipts():
|
|||||||
|
|
||||||
|
|
||||||
def finalize_transactions(_fork: Fork, diffs: list[DiffEntryItem]):
|
def finalize_transactions(_fork: Fork, diffs: list[DiffEntryItem]):
|
||||||
|
# noinspection PyTypeChecker
|
||||||
open_jobs = db.session.scalars(select(TransactionJob).where(
|
open_jobs = db.session.scalars(select(TransactionJob).where(
|
||||||
TransactionJob.chain == current_chain.get(),
|
TransactionJob.chain == current_chain.get(),
|
||||||
TransactionJob.state == TransactionJobState.Sent
|
TransactionJob.state == TransactionJobState.Sent
|
||||||
@@ -128,5 +128,5 @@ class BlockWalker (BlockProgressor):
|
|||||||
fork = Fork([branch])
|
fork = Fork([branch])
|
||||||
current_fork.set(fork)
|
current_fork.set(fork)
|
||||||
batches = await self.get_backfill_batches(from_height, to_height, w3=w3)
|
batches = await self.get_backfill_batches(from_height, to_height, w3=w3)
|
||||||
await self.invoke_callbacks(batches, chain)
|
await self.invoke_callback_batches(batches, chain)
|
||||||
log.info(f'completed through block {to_height}')
|
log.info(f'completed through block {to_height}')
|
||||||
|
|||||||
Reference in New Issue
Block a user