Compare commits

...

60 Commits

Author SHA1 Message Date
tim
4936150c3b bugfixes; startall works 2025-12-09 2025-12-09 15:11:58 -04:00
tim
88057607d5 put app back on app.dexorder.com and corp site on dexorder.com with www redirecting to apex 2025-05-19 15:19:20 -04:00
tim
36d0a863c6 remove spammy debug logs 2025-05-07 16:02:37 -04:00
tim
89ce46793e dotcom 2025-05-06 13:56:05 -04:00
tim
2bcf5d043c redis pipeline overflow fix 2025-04-23 15:20:00 -04:00
tim
71942d5b8f memcache init doesn't use transaction 2025-04-23 14:13:58 -04:00
tim
ef44973646 sharedata 2025-04-23 12:51:14 -04:00
tim
ce55609297 examine open orders 2025-04-07 01:32:19 -04:00
tim
a27300b5e4 info log for websocket connection drops 2025-04-03 18:15:16 -04:00
tim
f3faaa3dd6 tranchestatus tostring touchup 2025-04-01 14:20:58 -04:00
tim
0bb670b356 redis initial state push fix 2025-04-01 13:52:49 -04:00
tim
52b406ba17 ohlc retained length fix 2025-04-01 13:52:39 -04:00
tim
3d0342d19d price line metrics fix 2025-04-01 13:52:29 -04:00
tim
dbf960bae9 initial TrancheState fix 2025-04-01 13:52:21 -04:00
tim
d49f142fe3 redis pipeline autoflush after 10000 entries 2025-04-01 10:54:25 -04:00
tim
34fa439b3c USD marks 2025-03-29 15:27:13 -04:00
tim
41a1e2d9fe MIN_SLIPPAGE epsilon leeway 2025-03-28 20:05:52 -04:00
tim
66229e67bb bugfix for 0 slippage market orders 2025-03-26 23:48:43 -04:00
tim
31b6ddd314 initial redis state load doesn't use pipeline now, because it overflowed. 2025-03-26 23:25:10 -04:00
tim
07c6423fd5 USDC/USDC.e naming update 2025-03-26 17:17:54 -04:00
tim
4740687167 account release bugfix 2025-03-19 21:05:19 -04:00
tim
a06eeeb10d bugfix 2025-03-19 17:31:34 -04:00
tim
4492d23c47 better "addrmeta is None" fix 2025-03-16 21:17:19 -04:00
tim
1c0c2f0e63 "address_meta None" fix 2025-03-15 06:26:01 -04:00
tim
f3bdfdf97b trigger fixes 2025-03-10 21:09:40 -04:00
tim
be8c8bf019 order pprint touchup 2025-03-10 14:31:55 -04:00
tim
ecf1d21d5f bin/examine.py; readonly state; debug logs for Underfunded 2025-03-10 14:18:40 -04:00
tim
b7ed91d1c0 start of kraken accounting (unfinished) 2025-03-07 19:00:42 -04:00
tim
646449e456 underfunded state 2025-03-03 21:43:17 -04:00
tim
1bcf73de22 execute refactor for extraconf; accounting fixes 2025-02-28 01:04:12 -04:00
tim
af0f35eba5 execute refactor for extraconf; accounting fixes 2025-02-28 01:02:36 -04:00
tim
e868ea5a4b composable cli config 2025-02-27 17:51:07 -04:00
tim
c132f40164 transfer accounting fix 2025-02-27 14:23:07 -04:00
tim
eccf81c3c8 bugfixes; pagerduty client lib change; requirements bump 2025-02-26 16:58:57 -04:00
tim
61ab34a9f7 arb1 accounting config 2025-02-26 14:01:00 -04:00
tim
aed6c36746 arb1 accounting config 2025-02-26 13:39:08 -04:00
tim
31a2edd0c6 job cleanup fix 2025-02-26 13:39:00 -04:00
tim
6c76a9efd7 archive rpc fix 2025-02-25 19:38:00 -04:00
tim
5ef92caa51 possible account leak fix 2025-02-25 19:27:31 -04:00
tim
473e0ec516 log tweak 2025-02-25 19:08:13 -04:00
tim
979f31dfe0 roundrobin/archive connection fix; job cleanup fix; mirror fix 2025-02-25 19:00:31 -04:00
tim
afb1ee49a4 transaction handling touchups 2025-02-25 09:57:56 -04:00
tim
8b541bd76d metrics fixes 2025-02-24 22:15:05 -04:00
tim
04d7686c30 backend metrics port/svc 2025-02-24 21:31:17 -04:00
tim
603dd64dc4 log tweak 2025-02-24 21:00:53 -04:00
tim
920109ba27 async_yield() tweak 2025-02-24 20:47:46 -04:00
tim
67ab504a40 rpc connections limited to 8 2025-02-24 19:46:42 -04:00
tim
f2e7749c7b accounting row fix 2025-02-24 19:29:20 -04:00
tim
c9245615cb transaction job cleanup 2025-02-24 19:05:59 -04:00
tim
7d929db304 enable metrics in prod 2025-02-24 16:41:42 -04:00
tim
56e7c32705 reduce log spam 2025-02-24 10:43:11 -04:00
tim
14b19dcc78 initialize_accounting() bugfix 2025-02-24 10:36:29 -04:00
tim
f2eb4ea96b initialize_accounting() bugfix 2025-02-24 10:23:57 -04:00
tim
58b17f21a6 initialize_accounting() bugfix 2025-02-24 10:15:57 -04:00
tim
d942666e16 volume metric bugfix 2025-02-24 09:47:34 -04:00
tim
44d1c4a920 gas fees handler (incomplete) 2025-02-21 23:40:14 -04:00
tim
9dbc7e0378 fixed default pool. what was I thinking??? 2025-02-19 15:09:37 -04:00
tim
3c7d7f5d57 execution metrics 2025-02-17 14:08:28 -04:00
tim
b18eeb5069 mirror.py connection fix 2025-02-12 18:26:51 -04:00
tim
91973304e2 accounting_lock 2025-02-12 13:02:08 -04:00
56 changed files with 1363 additions and 494 deletions

View File

@@ -28,7 +28,7 @@ def upgrade() -> None:
sa.Column('time', sa.DateTime(), nullable=False), sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('account', sa.String(), nullable=False), sa.Column('account', sa.String(), nullable=False),
sa.Column('category', sa.Enum('Transfer', 'Income', 'Expense', 'Trade', 'Special', name='accountingcategory'), nullable=False), sa.Column('category', sa.Enum('Transfer', 'Income', 'Expense', 'Trade', 'Special', name='accountingcategory'), nullable=False),
sa.Column('subcategory', sa.Enum('OrderFee', 'GasFee', 'FillFee', 'VaultCreation', 'Execution', 'InitialBalance', name='accountingsubcategory'), nullable=True), sa.Column('subcategory', sa.Enum('OrderFee', 'GasFee', 'FillFee', 'Admin', 'TransactionGas', 'VaultCreation', 'Execution', 'FeeAdjustment', 'InitialBalance', name='accountingsubcategory'), nullable=True),
sa.Column('token', sa.String(), nullable=False), sa.Column('token', sa.String(), nullable=False),
sa.Column('amount', dexorder.database.column_types.DecimalNumeric(), nullable=False), sa.Column('amount', dexorder.database.column_types.DecimalNumeric(), nullable=False),
sa.Column('value', dexorder.database.column_types.DecimalNumeric(), nullable=True), sa.Column('value', dexorder.database.column_types.DecimalNumeric(), nullable=True),

View File

@@ -0,0 +1,30 @@
"""sharedata
Revision ID: e47d1bca4b3d
Revises: 509010f13e8b
Create Date: 2025-04-23 11:23:10.809341
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision: str = 'e47d1bca4b3d'
down_revision: Union[str, None] = '509010f13e8b'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.create_table('sharedata',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.PrimaryKeyConstraint('id')
)
def downgrade() -> None:
op.drop_table('sharedata')

15
bin/examine Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
kubectl port-forward postgres-0 5431:5432 &
PF_PID=$!
shutdown () {
kill $PF_PID
wait
}
trap shutdown INT TERM
PYTHONPATH=src python -m dexorder.bin.examine rpc_url=arbitrum_dxod db_url=postgres://dexorder@localhost:5431/dexorder "$@"
shutdown

View File

@@ -1,5 +1,20 @@
rpc_url = 'arbitrum_dxod'
ws_url = 'arbitrum_dxod_ws'
archive_url = 'arbitrum_alchemy'
concurrent_rpc_connections=8
metrics_port=9090
metadata = '' # this setting approves no tokens metadata = '' # this setting approves no tokens
account = '${accounts.gas}' stablecoins = [
rpc_url = '${rpc_urls.arbitrum_alchemy}' # in order of preference
ws_url = '${rpc_urls.arbitrum_alchemy_ws}' '0xaf88d065e77c8cC2239327C5EDb3A432268e5831', # USDC
concurrent_rpc_connections=100 '0xFd086bC7CD5C481DCC9C85ebE478A1C0b69FCbb9', # USDT
'0xFF970A61A04b1cA14834A43f5dE4533eBDDB5CC8', # USDC.e
]
quotecoins = [
# in order of preference
'0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f', # WBTC
'0x82aF49447D8a07e3bd95BD0d56f35241523fBab1', # WETH
]
nativecoin = '0x82aF49447D8a07e3bd95BD0d56f35241523fBab1' # WETH

File diff suppressed because one or more lines are too long

View File

@@ -1,7 +1,7 @@
metadata='metadata.json' # the Dockerfile will move metadata-finaldata.json into positon metadata='metadata.json' # the Dockerfile will move metadata-finaldata.json into positon
accounts = [ accounts = [
# dev account #6 # dev account #4
'0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e', # 0x976EA74026E726554dB657fA54763abd0C3a0aa9 '0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6', # 0x90F79bf6EB2c4f870365E785982E1f101E93b906
] ]
rpc_url = '${rpc_urls.arbsep_alchemy}' rpc_url = '${rpc_urls.arbsep_alchemy}'
mirror_source_rpc_url='${rpc_urls.arbsep_alchemy}' mirror_source_rpc_url='${rpc_urls.arbsep_alchemy}'

View File

@@ -1,21 +1,24 @@
aiohappyeyeballs==2.4.3 aiohappyeyeballs==2.4.3
aiohttp==3.11.12 aiohttp==3.11.13
aiosignal==1.3.1 aiosignal==1.3.1
alembic==1.14.1 alembic==1.15.1
annotated-types==0.7.0 annotated-types==0.7.0
antlr4-python3-runtime==4.9.3 antlr4-python3-runtime==4.9.3
asn1crypto==1.5.1 asn1crypto==1.5.1
async-lru==2.0.4 async-lru==2.0.4
attrs==23.2.0 attrs==23.2.0
bip-utils==2.9.3 bip-utils==2.9.3
bitarray==3.0.0 bitarray==3.1.1
cachetools==5.5.1 cachetools==5.5.2
cattrs==24.1.2
cbor2==5.6.4 cbor2==5.6.4
certifi==2024.2.2 certifi==2024.2.2
cffi==1.16.0 cffi==1.16.0
charset-normalizer==3.4.1 charset-normalizer==3.4.1
ckzg==1.0.2 ckzg==1.0.2
click==8.1.8
coincurve==20.0.0 coincurve==20.0.0
coremltools==8.2
crcmod==1.7 crcmod==1.7
cytoolz==0.12.3 cytoolz==0.12.3
defaultlist==1.0.0 defaultlist==1.0.0
@@ -31,39 +34,73 @@ eth-rlp==1.0.1
eth-typing==4.4.0 eth-typing==4.4.0
eth-utils==4.1.1 eth-utils==4.1.1
eth_abi==5.2.0 eth_abi==5.2.0
filelock==3.17.0
frozenlist==1.4.1 frozenlist==1.4.1
fsspec==2025.2.0
google-auth==2.35.0 google-auth==2.35.0
greenlet==3.0.3 greenlet==3.0.3
hexbytes==0.3.1 hexbytes==0.3.1
hiredis==3.0.0 hiredis==3.0.0
idna==3.7 idna==3.7
imageio==2.37.0
importlib_resources==6.5.2
Jinja2==3.1.6
joblib==1.4.2
jsonschema==4.21.1 jsonschema==4.21.1
jsonschema-specifications==2023.12.1 jsonschema-specifications==2023.12.1
kraken==5.3.0
kubernetes==31.0.0 kubernetes==31.0.0
lazy_loader==0.4
lightning==2.4.0
lightning-utilities==0.14.0
lru-dict==1.2.0 lru-dict==1.2.0
lxml==5.3.1
Mako==1.3.3 Mako==1.3.3
markdown-it-py==3.0.0
MarkupSafe==2.1.5 MarkupSafe==2.1.5
mdurl==0.1.2
mpmath==1.3.0
msgpack-python==0.5.6 msgpack-python==0.5.6
multidict==6.0.5 multidict==6.0.5
numpy==2.2.2 networkx==3.4.2
numpy==2.0.2
nvidia-cublas-cu12==12.1.3.1
nvidia-cuda-cupti-cu12==12.1.105
nvidia-cuda-nvrtc-cu12==12.1.105
nvidia-cuda-runtime-cu12==12.1.105
nvidia-cudnn-cu12==9.1.0.70
nvidia-cufft-cu12==11.0.2.54
nvidia-curand-cu12==10.3.2.106
nvidia-cusolver-cu12==11.4.5.107
nvidia-cusparse-cu12==12.1.0.106
nvidia-nccl-cu12==2.20.5
nvidia-nvjitlink-cu12==12.8.93
nvidia-nvtx-cu12==12.1.105
oauthlib==3.2.2 oauthlib==3.2.2
omegaconf==2.3.0 omegaconf==2.3.0
orjson==3.10.15 orjson==3.10.15
packaging==24.2
pagerduty==1.0.0
parsimonious==0.10.0 parsimonious==0.10.0
pdpyras==5.4.0 pillow==11.1.0
prometheus_client==0.21.1 prometheus_client==0.21.1
propcache==0.2.0 propcache==0.2.0
protobuf==5.26.1 protobuf==5.26.1
psycopg2-binary==2.9.10 psycopg2-binary==2.9.10
py-sr25519-bindings==0.2.0 py-sr25519-bindings==0.2.0
pyaml==25.1.0
pyarrow==19.0.1
pyasn1==0.6.1 pyasn1==0.6.1
pyasn1_modules==0.4.1 pyasn1_modules==0.4.1
pycparser==2.22 pycparser==2.22
pycryptodome==3.20.0 pycryptodome==3.20.0
pydantic==2.9.2 pydantic==2.9.2
pydantic_core==2.23.4 pydantic_core==2.23.4
Pygments==2.19.1
PyNaCl==1.5.0 PyNaCl==1.5.0
python-bidi==0.6.6
python-dateutil==2.9.0.post0 python-dateutil==2.9.0.post0
pytorch-lightning==2.5.0.post0
pytz==2025.1 pytz==2025.1
pyunormalize==15.1.0 pyunormalize==15.1.0
PyYAML==6.0.1 PyYAML==6.0.1
@@ -72,18 +109,32 @@ referencing==0.35.0
regex==2024.4.28 regex==2024.4.28
requests==2.32.3 requests==2.32.3
requests-oauthlib==2.0.0 requests-oauthlib==2.0.0
rich==13.9.4
rlp==4.0.1 rlp==4.0.1
rpds-py==0.18.0 rpds-py==0.18.0
rsa==4.9 rsa==4.9
scikit-image==0.24.0
scikit-learn==1.5.2
scipy==1.13.1
setuptools==75.8.2
shapely==2.0.7
six==1.16.0 six==1.16.0
socket.io-emitter==0.1.5.1 socket.io-emitter==0.1.5.1
sortedcontainers==2.4.0 sortedcontainers==2.4.0
SQLAlchemy==2.0.38 SQLAlchemy==2.0.38
sympy==1.13.3
threadpoolctl==3.5.0
tifffile==2025.2.18
toolz==0.12.1 toolz==0.12.1
torch==2.4.1
torchmetrics==1.6.2
torchvision==0.19.1
tqdm==4.67.1
triton==3.0.0
types-requests==2.32.0.20240914 types-requests==2.32.0.20240914
typing_extensions==4.12.2 typing_extensions==4.12.2
urllib3==2.2.1 urllib3==2.2.1
web3==6.20.3 web3==6.20.4
websocket-client==1.8.0 websocket-client==1.8.0
websockets==14.2 websockets==13.1
yarl==1.17.2 yarl==1.17.2

View File

@@ -21,7 +21,7 @@ eth-keys
eth-account eth-account
eth-utils eth-utils
eth-typing eth-typing
pdpyras # pagerduty pagerduty
numpy numpy
bitarray bitarray
typing_extensions typing_extensions
@@ -30,3 +30,4 @@ aiohttp
charset-normalizer charset-normalizer
pytz pytz
prometheus_client prometheus_client
krakenex

View File

@@ -35,14 +35,16 @@ class _Token:
def __repr__(self): return self.__token_name def __repr__(self): return self.__token_name
def __str__(self): return self.__token_name def __str__(self): return self.__token_name
class _FalseToken (_Token): class _FalseyToken (_Token):
def __bool__(self): return False def __bool__(self): return False
NARG = _FalseToken('NARG') NARG = _FalseyToken('NARG')
DELETE = _FalseToken('DELETE') # used as a value token to indicate removal of the key DELETE = _FalseyToken('DELETE') # used as a value token to indicate removal of the key
ADDRESS_0 = '0x0000000000000000000000000000000000000000' ADDRESS_0 = '0x0000000000000000000000000000000000000000'
NATIVE_TOKEN = '0x0000000000000000000000000000000000000001' # We use 0x01 to indicate the use of native ETH wherever a token address is normally required NATIVE_TOKEN = '0x0000000000000000000000000000000000000001' # We use 0x01 to indicate the use of native ETH wherever a token address is normally required
USD_FIAT = '0x0000000000000000000000000000000000000055' # We use 0x55 (ASCII 'U') to indicate the use of fiat USD
CHAIN_ID_OFFCHAIN = -1
WEI = 1 WEI = 1
GWEI = 1_000_000_000 GWEI = 1_000_000_000
ETH = 1_000_000_000_000_000_000 ETH = 1_000_000_000_000_000_000
@@ -57,7 +59,7 @@ _cwd() # do this first so that config has the right current working directory
# ordering here is important! # ordering here is important!
from .base.chain import Blockchain # the singletons are loaded into the dexorder.blockchain.* namespace from .base.chain import Blockchain # the singletons are loaded into the dexorder.blockchain.* namespace
from .util import async_yield from .util import async_yield, json
from .base.fixed import Fixed2, FixedDecimals, Dec18 from .base.fixed import Fixed2, FixedDecimals, Dec18
from .configuration import config from .configuration import config
from .base.account import Account from .base.account import Account

View File

@@ -0,0 +1 @@
from .accounting import *

View File

@@ -11,6 +11,7 @@ from dexorder import db, dec, NATIVE_TOKEN, from_timestamp, config, ADDRESS_0, n
from dexorder.base import TransactionReceiptDict from dexorder.base import TransactionReceiptDict
from dexorder.base.chain import current_chain from dexorder.base.chain import current_chain
from dexorder.blocks import get_block_timestamp, get_block, current_block from dexorder.blocks import get_block_timestamp, get_block, current_block
from dexorder.contract import ContractProxy
from dexorder.contract.dexorder import get_factory_contract, get_mirrorenv, get_mockenv from dexorder.contract.dexorder import get_factory_contract, get_mirrorenv, get_mockenv
from dexorder.database.model.accounting import AccountingSubcategory, Accounting, AccountingCategory, AccountKind, \ from dexorder.database.model.accounting import AccountingSubcategory, Accounting, AccountingCategory, AccountKind, \
DbAccount, Reconciliation DbAccount, Reconciliation
@@ -33,27 +34,35 @@ class ReconciliationException(Exception):
pass pass
async def initialize_accounting(): def initialize_accounting():
global accounting_initialized global accounting_initialized
if not accounting_initialized: if not accounting_initialized:
await initialize_mark_to_market() # set up mark-to-market first, so accounts can value their initial balances load_accounts_cache()
await initialize_accounts() accounting_initialized = True
async def initialize_accounting_runner():
global accounting_initialized
if not accounting_initialized:
await _initialize_mark_to_market() # set up mark-to-market first, so accounts can value their initial balances
await _initialize_accounts()
load_accounts_cache()
accounting_initialized = True accounting_initialized = True
log.info(f'accounting initialized\n\tstablecoins: {config.stablecoins}\n\tquotecoins: {config.quotecoins}\n\tnativecoin: {config.nativecoin}') log.info(f'accounting initialized\n\tstablecoins: {config.stablecoins}\n\tquotecoins: {config.quotecoins}\n\tnativecoin: {config.nativecoin}')
async def initialize_accounts(): async def _initialize_accounts():
# Since this is called by top-level main functions outside the Runner, we trigger an explicit db commit/rollback # Since this is called by top-level main functions outside the Runner, we trigger an explicit db commit/rollback
try: try:
# noinspection PyStatementEffect # noinspection PyStatementEffect
await initialize_accounts_2() await _initialize_accounts_2()
db.session.commit() db.session.commit()
except: except:
db.session.rollback() db.session.rollback()
raise raise
async def initialize_accounts_2(): async def _initialize_accounts_2():
fm = await FeeManager.get() fm = await FeeManager.get()
of_account = ensure_account(fm.order_fee_account_addr, AccountKind.OrderFee) of_account = ensure_account(fm.order_fee_account_addr, AccountKind.OrderFee)
gf_account = ensure_account(fm.gas_fee_account_addr, AccountKind.GasFee) gf_account = ensure_account(fm.gas_fee_account_addr, AccountKind.GasFee)
@@ -64,11 +73,17 @@ async def initialize_accounts_2():
await asyncio.gather( await asyncio.gather(
*map(adjust_balance, (of_account, gf_account, ff_account, *exe_accounts)) *map(adjust_balance, (of_account, gf_account, ff_account, *exe_accounts))
) )
for db_account in db.session.execute(select(DbAccount)).scalars():
def load_accounts_cache(*, chain=None):
if chain is None:
chain = current_chain.get()
for db_account in db.session.execute(select(DbAccount).where(DbAccount.chain==chain)).scalars():
_tracked_addrs.add(db_account.address) _tracked_addrs.add(db_account.address)
log.info(f'tracking account {db_account.chain.id} {db_account.address}')
async def initialize_mark_to_market(): async def _initialize_mark_to_market():
quotes.clear() quotes.clear()
quotes.extend(config.stablecoins) quotes.extend(config.stablecoins)
quotes.extend(config.quotecoins) quotes.extend(config.quotecoins)
@@ -101,9 +116,11 @@ async def initialize_mark_to_market():
if not config.nativecoin: if not config.nativecoin:
config.nativecoin = weth if weth is not None else meh if meh is not None else None config.nativecoin = weth if weth is not None else meh if meh is not None else None
elif not config.nativecoin: elif not config.nativecoin:
factory = await get_factory_contract() factory = get_factory_contract()
wrapper = await factory.wrapper() impl_addr = await factory.implementation()
wrapper = await ContractProxy(impl_addr, 'Vault').wrapper()
if wrapper != ADDRESS_0: if wrapper != ADDRESS_0:
log.info(f'Detected native coin wrapper {wrapper}')
config.nativecoin = wrapper config.nativecoin = wrapper
quotes.clear() quotes.clear()
quotes.extend(config.stablecoins) quotes.extend(config.stablecoins)
@@ -113,6 +130,23 @@ async def initialize_mark_to_market():
add_mark_pool(addr, pool['base'], pool['quote'], pool['fee']) add_mark_pool(addr, pool['base'], pool['quote'], pool['fee'])
def ensure_account(addr: str, kind: AccountKind, *, chain=None) -> DbAccount:
if chain is None:
chain = current_chain.get()
found = db.session.get(DbAccount, (chain, addr))
if found:
if found.kind != kind:
log.warning(f'Account {addr} has wrong kind {found.kind} != {kind}')
# found.kind = kind
db.session.add(found)
_tracked_addrs.add(found.address)
else:
found = DbAccount(chain=chain, address=addr, kind=kind, balances={})
db.session.add(found)
_tracked_addrs.add(found.address)
return found
async def handle_feeaccountschanged(fee_accounts: EventData): async def handle_feeaccountschanged(fee_accounts: EventData):
try: try:
order_fee_account_addr = fee_accounts['args']['orderFeeAccount'] order_fee_account_addr = fee_accounts['args']['orderFeeAccount']
@@ -125,23 +159,7 @@ async def handle_feeaccountschanged(fee_accounts: EventData):
fm.order_fee_account_addr = order_fee_account_addr fm.order_fee_account_addr = order_fee_account_addr
fm.gas_fee_account_addr = gas_fee_account_addr fm.gas_fee_account_addr = gas_fee_account_addr
fm.fill_fee_account_addr = fill_fee_account_addr fm.fill_fee_account_addr = fill_fee_account_addr
await initialize_accounts_2() await _initialize_accounts_2()
def ensure_account(addr: str, kind: AccountKind) -> DbAccount:
chain = current_chain.get()
found = db.session.get(DbAccount, (chain, addr))
if found:
if found.kind != kind:
log.warning(f'Account {addr} has wrong kind {found.kind} != {kind}')
found.kind = kind
db.session.add(found)
_tracked_addrs.add(found.address)
else:
found = DbAccount(chain=chain, address=addr, kind=kind, balances={})
db.session.add(found)
_tracked_addrs.add(found.address)
return found
async def accounting_transfer(receipt: TransactionReceiptDict, token: str, async def accounting_transfer(receipt: TransactionReceiptDict, token: str,
@@ -149,20 +167,21 @@ async def accounting_transfer(receipt: TransactionReceiptDict, token: str,
block_hash = hexstr(receipt['blockHash']) block_hash = hexstr(receipt['blockHash'])
tx_id = hexstr(receipt['transactionHash']) tx_id = hexstr(receipt['transactionHash'])
await asyncio.gather( await asyncio.gather(
add_accounting_row( sender, block_hash, tx_id, AccountingCategory.Transfer, None, accounting_transaction_gas(receipt),
token, -amount, receiver, adjust_decimals=adjust_decimals), add_accounting_entry_m2m(sender, block_hash, tx_id, AccountingCategory.Transfer, None,
add_accounting_row( receiver, block_hash, tx_id, AccountingCategory.Transfer, None, token, -amount, receiver, adjust_decimals=adjust_decimals),
token, amount, sender, adjust_decimals=adjust_decimals), add_accounting_entry_m2m(receiver, block_hash, tx_id, AccountingCategory.Transfer, None,
token, amount, sender, adjust_decimals=adjust_decimals),
) )
async def accounting_transaction_gas(receipt: TransactionReceiptDict, subcategory: AccountingSubcategory): async def accounting_transaction_gas(receipt: TransactionReceiptDict, subcategory: AccountingSubcategory = AccountingSubcategory.TransactionGas):
""" Accounts for the gas spent on the given transaction """ """ Accounts for the gas spent on the given transaction """
amount = dec(receipt['gasUsed']) * dec(receipt['effectiveGasPrice']) amount = dec(receipt['gasUsed']) * dec(receipt['effectiveGasPrice'])
await add_accounting_row( receipt['from'], await add_accounting_entry_m2m(receipt['from'],
hexstr(receipt['blockHash']), hexstr(receipt['transactionHash']), hexstr(receipt['blockHash']), hexstr(receipt['transactionHash']),
AccountingCategory.Expense, subcategory, NATIVE_TOKEN, -amount AccountingCategory.Expense, subcategory, NATIVE_TOKEN, -amount
) )
async def accounting_placement(order_placed: EventData): async def accounting_placement(order_placed: EventData):
@@ -175,25 +194,34 @@ async def accounting_placement(order_placed: EventData):
log.warning(f'Rogue DexorderPlacedEvent in tx {hexstr(tx_id)}') log.warning(f'Rogue DexorderPlacedEvent in tx {hexstr(tx_id)}')
return return
fm = await FeeManager.get() fm = await FeeManager.get()
await add_accounting_row( fm.order_fee_account_addr, block_hash, tx_id, AccountingCategory.Income, await add_accounting_entry_m2m(fm.order_fee_account_addr, block_hash, tx_id, AccountingCategory.Income,
AccountingSubcategory.OrderFee, NATIVE_TOKEN, order_fee) AccountingSubcategory.OrderFee, NATIVE_TOKEN, order_fee)
await add_accounting_row( fm.gas_fee_account_addr, block_hash, tx_id, AccountingCategory.Income, await add_accounting_entry_m2m(fm.gas_fee_account_addr, block_hash, tx_id, AccountingCategory.Income,
AccountingSubcategory.GasFee, NATIVE_TOKEN, gas_fee) AccountingSubcategory.GasFee, NATIVE_TOKEN, gas_fee)
async def accounting_fill(fill: EventData, out_token: str): async def accounting_fill(fill: EventData, out_token: str) -> dec:
"""
Returns the mark-to-market USD value of the transaction.
"""
block_hash = hexstr(fill['blockHash']) block_hash = hexstr(fill['blockHash'])
tx_id = hexstr(fill['transactionHash']) tx_id = hexstr(fill['transactionHash'])
fee = int(fill['args']['fillFee']) fee = int(fill['args']['fillFee'])
fm = await FeeManager.get() fm = await FeeManager.get()
await add_accounting_row(fm.fill_fee_account_addr, block_hash, tx_id, AccountingCategory.Income, return await add_accounting_entry_m2m(fm.fill_fee_account_addr, block_hash, tx_id, AccountingCategory.Income,
AccountingSubcategory.FillFee, out_token, fee) AccountingSubcategory.FillFee, out_token, fee)
async def add_accounting_row(account: str, block_hash: Optional[str], tx_id: Optional[str], category, subcategory, token, amount, note=None, async def add_accounting_entry_m2m(account: str, block_hash: Optional[str], tx_id: Optional[str], category, subcategory, token, amount, note=None,
*, adjust_decimals=True): *, adjust_decimals=True) -> dec:
"""
Returns the mark-to-market USD value of the entry.
"""
if amount == 0: if amount == 0:
return return dec(0)
# Adjust database account if it exists
if not is_tracked_address(account):
return dec(0)
if adjust_decimals: if adjust_decimals:
amount = await adj_dec(token, amount) amount = await adj_dec(token, amount)
# noinspection PyTypeChecker # noinspection PyTypeChecker
@@ -201,33 +229,35 @@ async def add_accounting_row(account: str, block_hash: Optional[str], tx_id: Opt
value = mark_to_market(token, amount) value = mark_to_market(token, amount)
log.debug(f'accounting row {time} {account} {category} {subcategory} {token} {amount} ${value}') log.debug(f'accounting row {time} {account} {category} {subcategory} {token} {amount} ${value}')
chain_id = current_chain.get().id chain_id = current_chain.get().id
add_accounting_entry(chain_id, account, time, category, subcategory, token, amount, value, tx_id, note)
return value
def add_accounting_entry(chain_id, account, time, category, subcategory, token, amount, value=None, tx_id=None, note=None):
if not is_tracked_address(account):
return
db.session.add(Accounting(account=account, db.session.add(Accounting(account=account,
time=time, category=category, subcategory=subcategory, time=time, category=category, subcategory=subcategory,
token=token, amount=amount, value=value, note=note, token=token, amount=amount, value=value, note=note,
chain_id=chain_id, tx_id=tx_id, chain_id=chain_id, tx_id=tx_id,
)) ))
# Adjust database account if it exists account_db = db.session.get(DbAccount, (current_chain.get(), account))
if is_tracked_address(account): new_amount = account_db.balances.get(token, dec(0)) + amount
account_db = db.session.get(DbAccount, (current_chain.get(), account)) if new_amount < 0:
new_amount = account_db.balances.get(token, dec(0)) + amount log.error(
if new_amount < 0: f'negative balance for account {account} when applying accounting row {time} {category} {subcategory} {token} {amount} ${value}')
log.error(f'negative balance for account {account} when applying accounting row {time} {category} {subcategory} {token} {amount} ${value}') account_db.balances[token] = new_amount
account_db.balances[token] = new_amount db.session.add(account_db) # deep changes would not be detected by the ORM
metric.account_balance.labels(address=account, token=token).set(new_amount) db.session.flush()
db.session.add(account_db) # deep changes would not be detected by the ORM
else:
log.warning(f'No db account found for {account}')
async def adjust_balance(account: DbAccount, token=NATIVE_TOKEN, subcategory=AccountingSubcategory.InitialBalance, note=None): async def adjust_balance(account: DbAccount, token=NATIVE_TOKEN, subcategory=AccountingSubcategory.InitialBalance, note=None):
true_balance = await get_balance(account.address, token) true_balance = await get_balance(account.address, token)
amount = true_balance - account.balances.get(token, dec(0)) amount = true_balance - account.balances.get(token, dec(0))
await add_accounting_row(account.address, None, None, AccountingCategory.Special, subcategory, NATIVE_TOKEN, amount, note, adjust_decimals=False) await add_accounting_entry_m2m(account.address, None, None, AccountingCategory.Special, subcategory, NATIVE_TOKEN, amount, note, adjust_decimals=False)
async def reconcile(account: DbAccount, block_id: Optional[str] = None, last_accounting_row_id: Optional[int] = None): async def accounting_reconcile(account: DbAccount, block_id: Optional[str] = None, last_accounting_row_id: Optional[int] = None):
# First we lock all the relevant tables to ensure consistency
db.session.execute(text("LOCK TABLE account, accounting, reconciliation IN EXCLUSIVE MODE"))
# Fetch the latest reconciliation for the account # Fetch the latest reconciliation for the account
latest_recon = db.session.execute( latest_recon = db.session.execute(
select(Reconciliation).where( select(Reconciliation).where(

View File

@@ -0,0 +1,65 @@
import logging
import tempfile
from dataclasses import dataclass
from typing import Optional
import krakenex
from dexorder import timestamp
from dexorder.bin.executable import execute
log = logging.getLogger(__name__)
kraken_api_key=r'HqPHnGsAHunFtaP8YZTFsyh+LauVrcgFHi/US+RseR/4DiT+NG/JpONV'
kraken_api_secret=r'4hvdMdaN5TlNlyk2PShdRCsOE/T4sFzeBrR7ZjC+LUGuAXhBehY8vvWDZSUSyna2OFeOJ9GntPvyXOhrpx70Bg=='
kraken = krakenex.API()
# start and end should be timestamps or datetimes. inclusiveness is [start,end) as usual
def kraken_get_ledger(start=None, end=None):
entries = []
offset=1 # 1-based ffs
if start:
start = timestamp(start) - 1 # kraken start is EXCLUSIVE for some reason
if end:
end = timestamp(end) - 1 # kraken end is INCLUSIVE. :/
while True:
kl = kraken.query_private('Ledgers', {'start':start, 'end':end, 'ofs':offset})
print(repr(kl))
break
if kl.empty:
break
for t in kl.itertuples():
print(t)
# noinspection PyShadowingBuiltins
offset += len(kl)
return entries
@dataclass
class KrakenConfig:
kraken_api_key: Optional[str] = None
kraken_api_secret: Optional[str] = None
kraken_start: Optional[str]= None # timestamp or date
kraken_end: Optional[str] = None # timestamp or date
async def main(kconfig: KrakenConfig):
load_kraken_key(kconfig)
kraken_get_ledger()
def load_kraken_key(kconfig):
temp = tempfile.NamedTemporaryFile()
if not kconfig.kraken_api_key or not kconfig.kraken_api_secret:
log.error("Must set kraken_api_key= and kraken_api_secret= on the command line")
exit(1)
temp.write(kconfig.kraken_api_key.encode())
temp.write(b'\n')
temp.write(kconfig.kraken_api_secret.encode())
temp.write(b'\n')
kraken.load_key(temp.name)
if __name__ == '__main__':
execute(main, parse_args=KrakenConfig)

View File

@@ -19,7 +19,9 @@ class AddressMetadata (TypedDict):
def save_addrmeta(address: str, meta: AddressMetadata): def save_addrmeta(address: str, meta: AddressMetadata):
if meta['type'] == 'Token': if meta is None:
pass
elif meta['type'] == 'Token':
meta: OldTokenDict meta: OldTokenDict
updated = Token.load(meta) updated = Token.load(meta)
token = db.session.get(Token, (current_chain.get().id, address)) token = db.session.get(Token, (current_chain.get().id, address))

View File

@@ -1,30 +1,33 @@
import logging import logging
import socket import socket
import pdpyras import pagerduty
from dexorder import NARG, config from dexorder import NARG, config
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
def alert(title, message, dedup_key=NARG, log_level=logging.ERROR, do_log=True): def alert(title, message, dedup_key=NARG, log_level=logging.ERROR, do_log=True, severity='critical'):
if dedup_key is NARG: if dedup_key is NARG:
dedup_key = str(hash(title)) dedup_key = str(hash(title))
if do_log: if do_log:
msg = f'{title}: {message}' msg = f'{title}: {message}'
log.log(log_level, msg) # if log_level=CRITICAL for example, make sure this does not re-alert! log.log(log_level, msg) # if log_level=CRITICAL for example, make sure this does not re-alert!
alert_pagerduty(title, message, dedup_key, log_level) alert_pagerduty(title, message, dedup_key, severity)
def warningAlert(title, message, dedup_key=NARG, log_level=logging.WARNING): def warningAlert(title, message, dedup_key=NARG, log_level=logging.WARNING):
return alert(title, message, dedup_key, log_level) return alert(title, message, dedup_key, log_level, severity='warning')
def infoAlert(title, message, dedup_key=NARG, log_level=logging.INFO):
return alert(title, message, dedup_key, log_level, severity='info')
pagerduty_session = None pagerduty_session = None
hostname = None hostname = None
def alert_pagerduty(title, message, dedup_key, log_level): def alert_pagerduty(title, message, dedup_key, severity):
if not config.pagerduty: if not config.pagerduty:
return return
# noinspection PyBroadException # noinspection PyBroadException
@@ -32,10 +35,9 @@ def alert_pagerduty(title, message, dedup_key, log_level):
global pagerduty_session global pagerduty_session
global hostname global hostname
if pagerduty_session is None: if pagerduty_session is None:
pagerduty_session = pdpyras.EventsAPISession(config.pagerduty) pagerduty_session = pagerduty.EventsApiV2Client(config.pagerduty)
hostname = socket.gethostname() hostname = socket.gethostname()
sev = 'error' if log_level >= logging.ERROR else 'warning' pagerduty_session.trigger(title, hostname, severity=severity, custom_details={'message': message}, dedup_key=dedup_key, payload=dict(severity=severity))
pagerduty_session.trigger(title, hostname, severity=sev, custom_details={'message': message}, dedup_key=dedup_key)
except Exception: except Exception:
log.warning('Could not notify PagerDuty!', exc_info=True) log.warning('Could not notify PagerDuty!', exc_info=True)

View File

@@ -15,6 +15,9 @@ class TransactionRequest:
type: str type: str
def __init__(self, type: str, key: Any): def __init__(self, type: str, key: Any):
"""
key is used to deduplicate requests
"""
self.type = type self.type = type
self.key = key self.key = key

View File

@@ -39,13 +39,15 @@ class Account (LocalAccount):
MUST call account.release() after the transaction has completed, to return this Account to the available pool. MUST call account.release() after the transaction has completed, to return this Account to the available pool.
""" """
Account._init_pool() Account._init_pool()
log.debug(f'available accounts: {Account._pool.qsize()}') # log.debug(f'available accounts: {Account._pool.qsize()}')
try: try:
async with asyncio.timeout(1): async with asyncio.timeout(1):
result = await Account._pool.get() result: "Account" = await Account._pool.get()
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.error('waiting for an available account') log.error('waiting for an available account')
result = await Account._pool.get() result = await Account._pool.get()
# mark as out of pool
result._in_pool = False
metric.account_available.set(Account._pool.qsize()) metric.account_available.set(Account._pool.qsize())
return result return result
@@ -59,16 +61,20 @@ class Account (LocalAccount):
if Account._main_account is None: if Account._main_account is None:
Account._main_account = account Account._main_account = account
Account._pool.put_nowait(account) Account._pool.put_nowait(account)
account._in_pool = True # this account is now in the pool
Account._all.append(account) Account._all.append(account)
metric.account_available.set(Account._pool.qsize()) metric.account_available.set(Account._pool.qsize())
metric.account_total.set(len(Account._all)) metric.account_total.set(len(Account._all))
log.info(f'Account pool {[a.address for a in Account._all]}')
def __init__(self, local_account: LocalAccount): # todo chain_id? def __init__(self, local_account: LocalAccount): # todo chain_id?
super().__init__(local_account._key_obj, local_account._publicapi) # from digging into the source code super().__init__(local_account._key_obj, local_account._publicapi) # from digging into the source code
self.chain_id = current_chain.get().id self.chain_id = current_chain.get().id
self.signing_middleware = construct_sign_and_send_raw_middleware(self) self.signing_middleware = construct_sign_and_send_raw_middleware(self)
self._nonce: Optional[int] = None self._nonce: Optional[int] = None
self.tx_id: Optional[str] = None # current transaction id self.tx_id: Optional[str] = None # current transaction id
# release() idempotency tracking
self._in_pool: bool = False
async def next_nonce(self): async def next_nonce(self):
if self._nonce is None: if self._nonce is None:
@@ -85,8 +91,21 @@ class Account (LocalAccount):
return current_w3.get().eth.get_balance(self.address) return current_w3.get().eth.get_balance(self.address)
def release(self): def release(self):
metric.account_available.set(Account._pool.qsize() + 1) """
Return this Account to the pool.
Idempotent: calling release() multiple times without a new acquire()
will only enqueue the account once.
"""
# If we're already in the pool, do nothing.
if self._in_pool:
# Optional debug log; comment out if too noisy.
# log.debug(f'Account {self.address} already in pool; ignoring extra release()')
return
Account._pool.put_nowait(self) Account._pool.put_nowait(self)
self._in_pool = True
metric.account_available.set(Account._pool.qsize())
def __str__(self): def __str__(self):
return self.address return self.address

View File

@@ -4,7 +4,7 @@ from dataclasses import dataclass
from enum import Enum from enum import Enum
from typing import Optional from typing import Optional
from dexorder import timestamp from dexorder import timestamp, from_timestamp
from dexorder.util import hexbytes from dexorder.util import hexbytes
from dexorder.util.convert import decode_IEEE754 from dexorder.util.convert import decode_IEEE754
@@ -250,6 +250,26 @@ class ElaboratedSwapOrderStatus:
def copy(self): def copy(self):
return copy.deepcopy(self) return copy.deepcopy(self)
def __str__(self):
msg = f'''
SwapOrder
status: {self.state.name}
in: {self.order.tokenIn}
out: {self.order.tokenOut}
exchange: {self.order.route.exchange.name, self.order.route.fee}
amount: {"input" if self.order.amountIsInput else "output"} {self.filledIn if self.order.amountIsInput else self.filledOut}/{self.order.amount}{" to owner" if self.order.outputDirectlyToOwner else ""}
minFill: {self.order.minFillAmount}
inverted: {self.order.inverted}
tranches:
'''
for i in range(len(self.trancheStatus)):
tranche = self.order.tranches[i]
ts = self.trancheStatus[i]
msg += f' {tranche}\n'
for fill in ts.fills:
msg += f' {fill}\n'
return msg
NO_OCO = 18446744073709551615 # max uint64 NO_OCO = 18446744073709551615 # max uint64
@@ -263,6 +283,9 @@ DISTANT_FUTURE = 4294967295 # max uint32
MAX_FRACTION = 65535 # max uint16 MAX_FRACTION = 65535 # max uint16
MIN_SLIPPAGE = 0.0001 # one bip
MIN_SLIPPAGE_EPSILON = 0.000000000003
@dataclass @dataclass
class Tranche: class Tranche:
@@ -344,7 +367,7 @@ class Tranche:
) )
def __str__(self): def __str__(self):
msg = f'{self.fraction/MAX_FRACTION:.1%} {"start+" if self.startTimeIsRelative else ""}{self.startTime} to {"start+" if self.startTimeIsRelative else ""}{self.endTime}' msg = f'{self.fraction/MAX_FRACTION:.1%} {"start+" if self.startTimeIsRelative else ""}{from_timestamp(self.startTime)} to {"start+" if self.startTimeIsRelative else ""}{from_timestamp(self.endTime)}'
if self.marketOrder: if self.marketOrder:
# for marketOrders, minLine.intercept is the slippage # for marketOrders, minLine.intercept is the slippage
msg += f' market order slippage {self.minLine.intercept:.2%}' msg += f' market order slippage {self.minLine.intercept:.2%}'
@@ -352,11 +375,11 @@ class Tranche:
if self.minLine.intercept or self.minLine.slope: if self.minLine.intercept or self.minLine.slope:
msg += f' >{self.minLine.intercept:.5g}' msg += f' >{self.minLine.intercept:.5g}'
if self.minLine.slope: if self.minLine.slope:
msg += f'{self.minLine.slope:+.5g}/s({self.minLine.value():5g})' msg += f'{self.minLine.slope:+.5g}/s={self.minLine.value():5g}'
if self.maxLine.intercept or self.maxLine.slope: if self.maxLine.intercept or self.maxLine.slope:
msg += f' <{self.maxLine.intercept:.5g}' msg += f' <{self.maxLine.intercept:.5g}'
if self.maxLine.slope: if self.maxLine.slope:
msg += f'{self.maxLine.slope:+.5g}/s({self.maxLine.value():5g})' msg += f'{self.maxLine.slope:+.5g}/s={self.maxLine.value():5g}'
if self.rateLimitPeriod: if self.rateLimitPeriod:
msg += f' {self.rateLimitFraction/MAX_FRACTION:.1%} every {self.rateLimitPeriod/60:.0} minutes' msg += f' {self.rateLimitFraction/MAX_FRACTION:.1%} every {self.rateLimitPeriod/60:.0} minutes'
return msg return msg

View File

@@ -74,4 +74,4 @@ async def main():
if __name__ == '__main__': if __name__ == '__main__':
execute(main()) execute(main)

View File

@@ -37,4 +37,4 @@ if __name__ == '__main__':
time = parse_date(sys.argv[1], ignoretz=True).replace(tzinfo=timezone.utc) time = parse_date(sys.argv[1], ignoretz=True).replace(tzinfo=timezone.utc)
seconds_per_block = float(sys.argv[2]) seconds_per_block = float(sys.argv[2])
sys.argv = [sys.argv[0], *sys.argv[3:]] sys.argv = [sys.argv[0], *sys.argv[3:]]
execute(main()) execute(main)

View File

@@ -0,0 +1,95 @@
import argparse
import logging
from dexorder import db, blockchain
from dexorder.base.order import OrderKey
from dexorder.blocks import current_block, get_block
from dexorder.blockstate import current_blockstate
from dexorder.blockstate.blockdata import BlockData
from dexorder.blockstate.db_state import DbState
from dexorder.blockstate.fork import current_fork
from dexorder.contract.dexorder import VaultContract
from dexorder.order.orderstate import Order
from dexorder.tokens import adjust_decimals
from dexorder.util import json
from dexorder.vault_blockdata import vault_balances, pretty_balances
from dexorder.bin.executable import execute
log = logging.getLogger(__name__)
async def dump_orders(orders, args):
if args.json:
print(json.dumps([order.status.dump() for order in orders]))
else:
first = True
for order in orders:
if first:
first = False
else:
print()
print(await order.pprint())
def command_vault_argparse(subparsers):
parser = subparsers.add_parser('vault', help='show the vault\'s balances and orders')
parser.add_argument('address', help='address of the vault')
parser.add_argument('--all', help='show all orders including closed ones', action='store_true')
parser.add_argument('--json', help='output in JSON format', action='store_true')
async def command_vault(args):
balances = vault_balances.get(args.address, {})
print(f'Vault {args.address} v{await VaultContract(args.address).version()}')
print(f'Balances:')
print(pretty_balances({k: (await adjust_decimals(k, v)) for k, v in balances.items()}))
print(f'Orders:')
i = 0
orders = []
while True:
key = OrderKey(args.address, i)
try:
order = Order.of(key)
except KeyError:
break
if args.all or order.is_open:
orders.append(order)
i += 1
await dump_orders(orders, args)
def command_open_argparse(subparsers):
parser = subparsers.add_parser('open', help='show all open orders')
parser.add_argument('--json', help='output in JSON format', action='store_true')
async def command_open(args):
await dump_orders([Order.of(key) for key in Order.open_orders], args)
async def main(args: list):
parser = argparse.ArgumentParser()
parser.add_argument('--chain-id', default=None)
subparsers = parser.add_subparsers(dest='command')
for name in globals():
if name.startswith('command_') and name.endswith('_argparse'):
globals()[name](subparsers)
parsed = parser.parse_args(args)
print(parsed)
try:
subcommand = globals()[f'command_{parsed.command}']
except KeyError:
parser.print_help()
exit(1)
await blockchain.connect()
db.connect()
db_state = DbState(BlockData.by_opt('db'))
with db.transaction():
state = await db_state.load()
# state.readonly = True
current_blockstate.set(state)
block = await get_block(state.root_hash)
current_block.set(block)
current_fork.set(state.root_fork)
await subcommand(parsed)
if __name__ == '__main__':
execute(main, parse_args=True)

View File

@@ -7,10 +7,13 @@ import tomllib
from asyncio import CancelledError from asyncio import CancelledError
from signal import Signals from signal import Signals
from traceback import print_exception from traceback import print_exception
from typing import Coroutine from typing import Coroutine, Callable, Union, Any
from omegaconf import OmegaConf
from dexorder import configuration, config from dexorder import configuration, config
from dexorder.alert import init_alerts from dexorder.alert import init_alerts
from dexorder.configuration.schema import Config
from dexorder.metric.metric_startup import start_metrics_server from dexorder.metric.metric_startup import start_metrics_server
if __name__ == '__main__': if __name__ == '__main__':
@@ -25,7 +28,27 @@ async def _shutdown_coro(_sig, _loop):
if task is not this_task: if task is not this_task:
task.cancel() task.cancel()
def execute(main:Coroutine, shutdown=None, *, parse_logging=True, parse_args=True):
def split_args():
omegaconf_args = []
regular_args = []
for arg in sys.argv[1:]:
if '=' in arg and not arg.startswith('--'):
key, value = arg.split('=', 1)
if hasattr(Config, key):
omegaconf_args.append(arg)
continue
regular_args.append(arg)
return omegaconf_args, regular_args
def execute(main:Callable[...,Coroutine[Any,Any,Any]], shutdown=None, *, parse_logging=True,
parse_args: Union[Callable[[list[str]],Any], type, bool]=True):
"""
if parse_args is a function, then the command-line arguments are given to OmegaConf first, and any args parsed by
OmegaConf are stripped from the args list. The remaining args are then passed to parse_args(args)
if parse_args is a type, then the type is used to parse the extra command-line arguments using OmegaConf.
"""
# config # config
configured = False configured = False
if parse_logging: if parse_logging:
@@ -42,10 +65,23 @@ def execute(main:Coroutine, shutdown=None, *, parse_logging=True, parse_args=Tru
logging.basicConfig(level=logging.INFO, stream=sys.stdout) logging.basicConfig(level=logging.INFO, stream=sys.stdout)
log.setLevel(logging.DEBUG) log.setLevel(logging.DEBUG)
log.info('Logging configured to default') log.info('Logging configured to default')
xconf = None
if parse_args: if parse_args:
# NOTE: there is special command-line argument handling in config/load.py to get a config filename. # NOTE: there is special command-line argument handling in config/load.py to get a config filename.
# The -c/--config flag MUST BE FIRST if present. # The -c/--config flag MUST BE FIRST if present.
configuration.parse_args() # The rest of the arguments are split by format into key=value for omegaconf and anything else is "regular args"
omegaconf_args, regular_args = split_args()
configuration.parse_args(omegaconf_args)
# must check for `type` before `callable`, because types are also callables
if isinstance(parse_args, type):
# noinspection PyUnboundLocalVariable
xconf = OmegaConf.merge(OmegaConf.structured(parse_args), OmegaConf.from_cli(regular_args))
elif callable(parse_args):
# noinspection PyUnboundLocalVariable
xconf = parse_args(regular_args)
else:
# just pass the regular args to main
xconf = regular_args
init_alerts() init_alerts()
@@ -59,7 +95,14 @@ def execute(main:Coroutine, shutdown=None, *, parse_logging=True, parse_args=Tru
loop.add_signal_handler(s, lambda sig=s: asyncio.create_task(_shutdown_coro(sig, loop), name=f'{s.name} handler')) loop.add_signal_handler(s, lambda sig=s: asyncio.create_task(_shutdown_coro(sig, loop), name=f'{s.name} handler'))
# main # main
task = loop.create_task(main, name='main') num_args = len(inspect.signature(main).parameters)
if num_args == 0:
coro = main()
elif num_args == 1:
coro = main(xconf)
else:
raise Exception(f'main() must accept 0 or 1 arguments, not {num_args}')
task = loop.create_task(coro, name='main')
try: try:
loop.run_until_complete(task) loop.run_until_complete(task)
except CancelledError: except CancelledError:

View File

@@ -62,4 +62,4 @@ async def main():
if __name__ == '__main__': if __name__ == '__main__':
execute(main()) execute(main)

View File

@@ -2,7 +2,8 @@ import logging
from asyncio import CancelledError from asyncio import CancelledError
from dexorder import db, blockchain from dexorder import db, blockchain
from dexorder.alert import warningAlert from dexorder.accounting import initialize_accounting_runner
from dexorder.alert import infoAlert
from dexorder.base.chain import current_chain from dexorder.base.chain import current_chain
from dexorder.bin.executable import execute from dexorder.bin.executable import execute
from dexorder.blockstate import current_blockstate from dexorder.blockstate import current_blockstate
@@ -14,13 +15,13 @@ from dexorder.contract.dexorder import get_dexorder_contract
from dexorder.event_handler import (init, dump_log, handle_vault_created, handle_order_placed, from dexorder.event_handler import (init, dump_log, handle_vault_created, handle_order_placed,
handle_transfer, handle_swap_filled, handle_order_canceled, handle_order_cancel_all, handle_transfer, handle_swap_filled, handle_order_canceled, handle_order_cancel_all,
handle_uniswap_swaps, handle_vault_impl_changed, update_metrics) handle_uniswap_swaps, handle_vault_impl_changed, update_metrics)
from dexorder.marks import publish_marks
from dexorder.memcache import memcache from dexorder.memcache import memcache
from dexorder.memcache.memcache_state import RedisState, publish_all from dexorder.memcache.memcache_state import RedisState, publish_all
from dexorder.order.executionhandler import handle_dexorderexecutions, execute_tranches from dexorder.order.executionhandler import handle_dexorderexecutions, execute_tranches
from dexorder.order.triggers import activate_orders, end_trigger_updates from dexorder.order.triggers import activate_orders, end_trigger_updates
from dexorder.accounting import initialize_accounting
from dexorder.runner import BlockStateRunner from dexorder.runner import BlockStateRunner
from dexorder.transactions import handle_transaction_receipts, finalize_transactions from dexorder.transactions import handle_transaction_receipts, cleanup_jobs
from dexorder.vaultcreationhandler import handle_vault_creation_requests from dexorder.vaultcreationhandler import handle_vault_creation_requests
log = logging.getLogger('dexorder') log = logging.getLogger('dexorder')
@@ -60,6 +61,8 @@ def setup_logevent_triggers(runner):
runner.add_callback(check_activate_orders) runner.add_callback(check_activate_orders)
runner.add_callback(init) runner.add_callback(init)
runner.add_event_trigger(handle_transaction_receipts)
runner.add_event_trigger(handle_vault_created, get_contract_event('Vault', 'VaultCreated')) runner.add_event_trigger(handle_vault_created, get_contract_event('Vault', 'VaultCreated'))
runner.add_event_trigger(handle_vault_impl_changed, get_contract_event('Vault', 'VaultImplChanged')) runner.add_event_trigger(handle_vault_impl_changed, get_contract_event('Vault', 'VaultImplChanged'))
runner.add_event_trigger(handle_order_placed, get_contract_event('VaultImpl', 'DexorderSwapPlaced')) runner.add_event_trigger(handle_order_placed, get_contract_event('VaultImpl', 'DexorderSwapPlaced'))
@@ -69,18 +72,25 @@ def setup_logevent_triggers(runner):
runner.add_event_trigger(handle_order_canceled, get_contract_event('VaultImpl', 'DexorderSwapCanceled')) runner.add_event_trigger(handle_order_canceled, get_contract_event('VaultImpl', 'DexorderSwapCanceled'))
runner.add_event_trigger(handle_order_cancel_all, get_contract_event('VaultImpl', 'DexorderCancelAll')) runner.add_event_trigger(handle_order_cancel_all, get_contract_event('VaultImpl', 'DexorderCancelAll'))
runner.add_event_trigger(handle_transaction_receipts) # todo handle only the transactions that were posted to this block
runner.add_event_trigger(handle_dexorderexecutions, executions) runner.add_event_trigger(handle_dexorderexecutions, executions)
runner.add_event_trigger(handle_vault_creation_requests) runner.add_event_trigger(handle_vault_creation_requests)
runner.add_callback(end_trigger_updates) runner.add_callback(end_trigger_updates)
runner.add_callback(execute_tranches) runner.add_callback(execute_tranches)
# fee adjustments are handled offline by batch jobs
# runner.add_event_trigger(handle_fee_limits_changed, get_contract_event('IFeeManager', 'FeeLimitsChanged'))
# runner.add_event_trigger(handle_fees_changed, get_contract_event('IFeeManager', 'FeesChanged'))
# runner.add_callback(adjust_gas)
runner.add_callback(cleanup_jobs)
runner.add_callback(publish_marks)
runner.add_callback(update_metrics) runner.add_callback(update_metrics)
# noinspection DuplicatedCode # noinspection DuplicatedCode
async def main(): async def main():
warningAlert('Started', 'backend has started', log_level=logging.INFO) infoAlert('Started', 'backend has started', log_level=logging.INFO)
await blockchain.connect(autosign=False) # the transaction manager checks out accounts and releases them. await blockchain.connect(autosign=False) # the transaction manager checks out accounts and releases them.
redis_state = None redis_state = None
state = None state = None
@@ -107,9 +117,9 @@ async def main():
if redis_state: if redis_state:
# load initial state # load initial state
log.info('initializing redis with root state') log.info('initializing redis with root state')
await redis_state.save(state.root_fork, state.diffs_by_branch[state.root_branch.id]) await redis_state.init(state, state.root_fork)
await initialize_accounting() await initialize_accounting_runner()
runner = BlockStateRunner(state, publish_all=publish_all if redis_state else None) runner = BlockStateRunner(state, publish_all=publish_all if redis_state else None)
setup_logevent_triggers(runner) setup_logevent_triggers(runner)
@@ -121,7 +131,6 @@ async def main():
runner.on_promotion.append(db_state.finalize) runner.on_promotion.append(db_state.finalize)
if redis_state: if redis_state:
runner.on_head_update.append(redis_state.save) runner.on_head_update.append(redis_state.save)
runner.on_promotion.append(finalize_transactions)
try: try:
await runner.run() await runner.run()
@@ -131,4 +140,4 @@ async def main():
if __name__ == '__main__': if __name__ == '__main__':
execute(main()) execute(main)

View File

@@ -95,7 +95,7 @@ async def write_metadata( pools, mirror_pools ):
pool_dicts = [get_pool(addr) for (addr,_inverted) in mirror_pools] pool_dicts = [get_pool(addr) for (addr,_inverted) in mirror_pools]
pool_dicts = await asyncio.gather(*pool_dicts) pool_dicts = await asyncio.gather(*pool_dicts)
for data, addr, (_,inverted) in zip(pool_dicts, pools, mirror_pools): for data, addr, (_,inverted) in zip(pool_dicts, pools, mirror_pools):
data['x'] = dict(data=dict(uri=f'https://app.dexorder.trade/ohlc/', chain=42161, symbol=addr, inverted=inverted)) data['x'] = dict(data=dict(uri=f'https://app.dexorder.com/ohlc/', chain=42161, symbol=addr, inverted=inverted))
tokens = set(p['base'] for p in pool_dicts) tokens = set(p['base'] for p in pool_dicts)
tokens.update(p['quote'] for p in pool_dicts) tokens.update(p['quote'] for p in pool_dicts)
tokens = await asyncio.gather(*[get_token(t) for t in tokens]) tokens = await asyncio.gather(*[get_token(t) for t in tokens])
@@ -119,7 +119,7 @@ async def main():
delay = max(0.010, config.polling) delay = max(0.010, config.polling)
update_once = config.polling <= 0 update_once = config.polling <= 0
global source_w3 global source_w3
source_w3 = await create_w3(config.mirror_source_rpc_url, name='source', autosign=False) source_w3 = await create_w3(config.mirror_source_rpc_url, name='source', autosign=False, archive_url=[])
pools = (config.mirror_pools or []) pools = (config.mirror_pools or [])
if not pools: if not pools:
log.error('must configure mirror_pools') log.error('must configure mirror_pools')
@@ -144,35 +144,28 @@ async def main():
tokens = set(i[1] for i in pool_infos).union(i[2] for i in pool_infos) tokens = set(i[1] for i in pool_infos).union(i[2] for i in pool_infos)
log.debug(f'Mirroring tokens') log.debug(f'Mirroring tokens')
txs = []
for t in tokens: for t in tokens:
# noinspection PyBroadException # noinspection PyBroadException
try: try:
info = await get_token_info(t) info = await get_token_info(t)
# anvil had trouble estimating the gas, so we hardcode it. # anvil had trouble estimating the gas, so we hardcode it.
tx = await mirrorenv.transact.mirrorToken(info, gas=1_000_000) tx = await mirrorenv.transact.mirrorToken(info, gas=1_000_000)
txs.append(tx.wait()) await tx.wait()
except Exception: except Exception:
log.exception(f'Failed to mirror token {t}') log.exception(f'Failed to mirror token {t}')
exit(1) exit(1)
results = await asyncio.gather(*txs)
if any(result['status'] != 1 for result in results):
log.error('Mirroring a token reverted.')
exit(1)
log.info(f'Tokens deployed') log.info(f'Tokens deployed')
log.debug(f'Mirroring pools {", ".join(pools)}') log.debug(f'Mirroring pools {", ".join(pools)}')
txs = []
for pool, info in zip(pools, pool_infos): for pool, info in zip(pools, pool_infos):
# noinspection PyBroadException # noinspection PyBroadException
try: try:
# anvil had trouble estimating the gas, so we hardcode it. # anvil had trouble estimating the gas, so we hardcode it.
tx = await mirrorenv.transact.mirrorPool(info, gas=5_500_000) tx = await mirrorenv.transact.mirrorPool(info, gas=5_500_000)
await tx.wait()
except Exception: except Exception:
log.exception(f'Failed to mirror pool {pool}') log.exception(f'Failed to mirror pool {pool}')
exit(1) exit(1)
txs.append(tx.wait())
await asyncio.gather(*txs)
log.info('Pools deployed') log.info('Pools deployed')
mirror_pool_list = [] mirror_pool_list = []
@@ -197,6 +190,7 @@ async def main():
while True: while True:
wake_up = now() + delay wake_up = now() + delay
# log.debug(f'querying {pool}') # log.debug(f'querying {pool}')
tx = None
try: try:
price = await get_pool_price(pool) price = await get_pool_price(pool)
if price != last_prices.get(pool): if price != last_prices.get(pool):
@@ -207,7 +201,10 @@ async def main():
addr, inverted = mirror_pools[pool] addr, inverted = mirror_pools[pool]
log.debug(f'Mirrored {addr} {price}') log.debug(f'Mirrored {addr} {price}')
except Exception as x: except Exception as x:
log.debug(f'Could not update {pool}: {x}') log.debug(f'Could not update {pool}: {x} {tx}')
if tx is not None:
tx.account.reset_nonce()
tx.account.release()
continue continue
try: try:
pool = next(pool_iter) pool = next(pool_iter)
@@ -223,4 +220,4 @@ async def main():
if __name__ == '__main__': if __name__ == '__main__':
execute(main()) execute(main)

View File

@@ -1,9 +1,9 @@
import logging import logging
from sqlalchemy import select from sqlalchemy import select, text
from dexorder import db, blockchain from dexorder import db, blockchain
from dexorder.accounting import reconcile from dexorder.accounting import accounting_reconcile
from dexorder.bin.executable import execute from dexorder.bin.executable import execute
from dexorder.blocks import fetch_latest_block, current_block from dexorder.blocks import fetch_latest_block, current_block
from dexorder.database.model import DbAccount from dexorder.database.model import DbAccount
@@ -15,10 +15,11 @@ async def main():
db.connect() db.connect()
block = await fetch_latest_block() block = await fetch_latest_block()
current_block.set(block) current_block.set(block)
db.session.execute(text("LOCK TABLE account, accounting, reconciliation IN EXCLUSIVE MODE"))
try: try:
accounts = db.session.execute(select(DbAccount)).scalars().all() accounts = db.session.execute(select(DbAccount)).scalars().all()
for account in accounts: for account in accounts:
await reconcile(account) await accounting_reconcile(account)
db.session.commit() db.session.commit()
log.info('Reconciliation complete') log.info('Reconciliation complete')
except: except:
@@ -27,5 +28,4 @@ async def main():
if __name__ == '__main__': if __name__ == '__main__':
execute(main()) execute(main)

View File

@@ -1,14 +1,23 @@
import logging import logging
from dataclasses import dataclass
from dexorder import blockchain, db from dexorder import blockchain, db, dec
from dexorder.bin.executable import execute from dexorder.bin.executable import execute
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
async def main():
await blockchain.connect()
db.connect()
@dataclass
class RefillConfig:
refill_level: dec
refill_accounts: list[str]
async def main(refill_config: RefillConfig):
# await blockchain.connect()
# db.connect()
log.info(f'Refilling to {refill_config.refill_level:.18f} ETH')
log.info(f'Refilling accounts: {refill_config.refill_accounts}')
if __name__ == '__main__': if __name__ == '__main__':
execute(main()) execute(main, parse_args=RefillConfig)

View File

@@ -2,22 +2,21 @@ import asyncio
import itertools import itertools
import logging import logging
from random import random from random import random
from typing import Any, Optional, Union, Callable from typing import Any, Optional, Union
# noinspection PyPackageRequirements # noinspection PyPackageRequirements
from aiohttp import ClientResponseError, ClientSession, ClientTimeout, TCPConnector from aiohttp import ClientResponseError, ClientSession, ClientTimeout, TCPConnector
from eth_typing import URI from eth_typing import URI
from hexbytes import HexBytes from hexbytes import HexBytes
from web3 import WebsocketProviderV2, AsyncWeb3, AsyncHTTPProvider from web3 import WebsocketProviderV2, AsyncWeb3, AsyncHTTPProvider
from web3.exceptions import Web3Exception
from web3.middleware.signing import async_construct_sign_and_send_raw_middleware from web3.middleware.signing import async_construct_sign_and_send_raw_middleware
from web3.providers.async_base import AsyncJSONBaseProvider
from web3.types import RPCEndpoint, RPCResponse from web3.types import RPCEndpoint, RPCResponse
from .. import current_w3, Blockchain, config, Account, NARG from .. import current_w3, Blockchain, config, Account, NARG
from ..base.chain import current_chain from ..base.chain import current_chain
from ..contract import get_contract_data from ..contract import get_contract_data
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -56,39 +55,22 @@ async def create_w3(rpc_url: Union[str,list[str]]=None, account=NARG, autosign=T
if not rpc_urls: if not rpc_urls:
raise ValueError("No rpc_url configured") raise ValueError("No rpc_url configured")
w3_instances = []
archive_instances = []
for (url, archive) in itertools.chain(((url, False) for url in rpc_urls), ((url, True) for url in archive_urls)):
connector = TCPConnector(limit=config.concurrent_rpc_connections)
session = ClientSession(connector=connector, timeout=ClientTimeout(config.rpc_timeout))
http_provider = RetryHTTPProvider(url)
await http_provider.cache_async_session(session)
w3 = AsyncWeb3(http_provider)
w3.middleware_onion.add(archive_intercept_middleware, 'block_number_intercept_middleware')
w3.middleware_onion.remove('attrdict')
w3.middleware_onion.add(clean_input_async, 'clean_input')
w3.eth.Contract = _make_contract(w3.eth)
# Highest block number that has reported a -32000 error indicating a lack of history that far back
w3.archive_fault_height = -1
if autosign:
if account is NARG:
account = Account.get()
if account is not None:
# noinspection PyTypeChecker
w3.middleware_onion.add(await async_construct_sign_and_send_raw_middleware(account))
w3.eth.default_account = account.address
if archive:
archive_instances.append(w3)
else:
w3_instances.append(w3)
# Ensure all instances share the same chain ID provider = await RoundRobinHTTPProvider.construct(rpc_urls, archive_urls) if len(rpc_urls) > 1 or archive_urls else await RetryHTTPProvider.construct(rpc_urls[0])
chain_ids = await asyncio.gather(*(w3.eth.chain_id for w3 in itertools.chain(w3_instances, archive_instances))) w3 = AsyncWeb3(provider)
if len(set(chain_ids)) != 1: if archive_urls:
raise RuntimeError("All RPC URLs must belong to the same blockchain") w3.middleware_onion.add(archive_intercept_middleware, 'block_number_intercept_middleware')
w3.middleware_onion.remove('attrdict')
# noinspection PyTypeChecker w3.middleware_onion.add(clean_input_async, 'clean_input')
return RoundRobinWebProxy(w3_instances, archive_instances) if len(w3_instances) > 1 or archive_instances else w3_instances[0] w3.eth.Contract = _make_contract(w3.eth)
if autosign:
if account is NARG:
account = Account.get()
if account is not None:
# noinspection PyTypeChecker
w3.middleware_onion.add(await async_construct_sign_and_send_raw_middleware(account))
w3.eth.default_account = account.address
return w3
async def create_w3_ws(ws_url=None) -> AsyncWeb3: async def create_w3_ws(ws_url=None) -> AsyncWeb3:
@@ -199,31 +181,29 @@ ARCHIVE_ERRORS = {
'state recreation l2 gas depth limit exceeded', 'state recreation l2 gas depth limit exceeded',
} }
def is_archive_method(method, params):
expected_args = ARCHIVE_METHODS.get(method, -1)
return len(params) == expected_args
async def archive_intercept_middleware(make_request, w3): async def archive_intercept_middleware(make_request, w3):
""" """
Middleware to intercept any call with `block_number` and manage marking archive_fault_height Middleware to intercept any call with `block_number` and manage marking archive_fault_height
""" """
async def middleware(method, params): async def middleware(method, params):
# Only intercept relevant methods # Only intercept relevant methods
expected_args = ARCHIVE_METHODS.get(method,-1) is_archival = is_archive_method(method, params)
is_archive_method = len(params) == expected_args
block_height = None block_height = None
if is_archive_method:
block_identifier = params[-1]
if block_identifier != 'latest':
block_height = int(block_identifier, 16) if type(block_identifier) is str else int(params[-1])
if block_height <= w3.archive_fault_height:
# this block is at least as old as another block that already failed to fetch history from this RPC
raise ArchiveException(method, block_height)
resp = await make_request(method, params) resp = await make_request(method, params)
if is_archive_method and 'error' in resp and resp['error']['message'] in ARCHIVE_ERRORS: if is_archival and 'error' in resp and resp['error']['message'] in ARCHIVE_ERRORS:
if block_height is None: block_identifier = params[-1]
if block_identifier not in ('latest', 'pending',):
block_height = int(block_identifier, 16) if type(block_identifier) is str else int(params[-1])
raise ArchiveException(method, block_height)
else:
# noinspection PyUnboundLocalVariable # noinspection PyUnboundLocalVariable
raise Exception(f'Got an archive fault using a block_identifier of {block_identifier}: {w3.provider.endpoint_uri} {method} {params}\n{resp}') raise Exception(f'Got an archive fault using a block_identifier of {block_identifier}: {w3.provider.endpoint_uri} {method} {params}\n{resp}')
# noinspection PyTypeChecker
w3.archive_fault_height = max(w3.archive_fault_height, block_height)
raise ArchiveException(method, block_height)
resp = await make_request(method, params)
return resp return resp
return middleware return middleware
@@ -236,77 +216,27 @@ class ArchiveException (Exception):
self.block_number = block_number self.block_number = block_number
class RoundRobinWebProxy:
def __init__(self, w3_instances, archive_instances):
if not w3_instances:
raise ValueError("At least one w3 instance is required")
self._w3_instances = w3_instances
self._archive_instances = archive_instances
self._index = 0
self._archive_index = 0
for w3 in self._w3_instances:
w3.manager.coro_request = self.make_coro_request_function(w3)
def __getattr__(self, name):
# proxy in a round-robin fashion
return getattr(self._current(), name)
def _current(self):
if self._index >= len(self._w3_instances):
self._index = 0
current_instance = self._w3_instances[self._index]
self._index += 1
return current_instance
def _current_archive(self):
if self._archive_index >= len(self._archive_instances):
self._archive_index = 0
current_instance = self._archive_instances[self._archive_index]
self._archive_index += 1
return current_instance
def make_coro_request_function(rrwp, w3):
# This replaces w3.manager.coro_request with our own version that catches ArchiveExceptions and retries them
# on an archive w3
### NOTE!!! ###
# we use `self` to mean the RequestManager so we can copy that code directly over here.
# instead we rename the RoundRobinWebProxy rrwp and name the w3 instance too
self = w3.manager
async def RequestManager__coro_request(
method: Union[RPCEndpoint, Callable[..., RPCEndpoint]],
params: Any,
error_formatters: Optional[Callable[..., Any]] = None,
null_result_formatters: Optional[Callable[..., Any]] = None,
) -> Any:
"""
Coroutine for making a request using the provider
"""
try:
response = await self._coro_make_request(method, params)
return self.formatted_response(
response, params, error_formatters, null_result_formatters
)
except ArchiveException as e:
w3.archive_fault_height = max(w3.archive_fault_height, e.block_number)
return await rrwp._current_archive().manager.coro_request(method, params, error_formatters, null_result_formatters)
return RequestManager__coro_request
class RetryHTTPProvider (AsyncHTTPProvider): class RetryHTTPProvider (AsyncHTTPProvider):
def __init__(self, endpoint_uri: Optional[Union[URI, str]] = None, request_kwargs: Optional[Any] = None) -> None: @staticmethod
async def construct(endpoint_uri: Optional[Union[URI, str]] = None, request_kwargs: Optional[Any] = None, retries: int = 10):
result = RetryHTTPProvider(endpoint_uri, request_kwargs, retries)
connector = TCPConnector(limit=config.concurrent_rpc_connections)
session = ClientSession(connector=connector, timeout=ClientTimeout(config.rpc_timeout))
await result.cache_async_session(session)
return result
def __init__(self, endpoint_uri: Optional[Union[URI, str]] = None, request_kwargs: Optional[Any] = None, retries: int = 10) -> None:
super().__init__(endpoint_uri, request_kwargs) super().__init__(endpoint_uri, request_kwargs)
self.in_flight = asyncio.Semaphore(config.concurrent_rpc_connections) self.in_flight = asyncio.Semaphore(config.concurrent_rpc_connections)
self.rate_allowed = asyncio.Event() self.rate_allowed = asyncio.Event()
self.rate_allowed.set() self.rate_allowed.set()
self.retries = retries
async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse: async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
MAX_TRIES = 10
wait = 0 wait = 0
for _ in range(MAX_TRIES): for _ in range(self.retries):
try: try:
async with self.in_flight: async with self.in_flight:
await self.rate_allowed.wait() await self.rate_allowed.wait()
@@ -325,6 +255,63 @@ class RetryHTTPProvider (AsyncHTTPProvider):
await asyncio.sleep(wait) await asyncio.sleep(wait)
finally: finally:
self.rate_allowed.set() self.rate_allowed.set()
# finally: raise IOError(f'Could not query rpc server after {self.retries} tries: {method} {params}')
# log.debug(f'Ended request of RPC call {method}')
raise IOError(f'Could not query rpc server after {MAX_TRIES} tries: {method} {params}')
class RoundRobinHTTPProvider (AsyncJSONBaseProvider):
@staticmethod
async def construct(endpoint_uris: list[str], archive_uris: list[str]):
providers = [RetryHTTPProvider.construct(uri, retries=1) for uri in endpoint_uris]
archive_providers = [RetryHTTPProvider.construct(uri, retries=1) for uri in archive_uris]
providers = await asyncio.gather(*providers)
archive_providers = await asyncio.gather(*archive_providers)
# Ensure all instances share the same chain ID
chain_ids = await asyncio.gather(*(AsyncWeb3(provider).eth.chain_id for provider in itertools.chain(providers, archive_providers)))
if len(set(chain_ids)) != 1:
raise RuntimeError("All RPC URLs must belong to the same blockchain")
return RoundRobinHTTPProvider(providers, archive_providers)
def __init__(self, providers: list[RetryHTTPProvider], archive_providers: list[RetryHTTPProvider]):
super().__init__()
self.providers = providers
self.archive_providers = archive_providers
for provider in self.providers:
provider.archive_fault_height = 0
self.index = 0
self.archive_index = 0
async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
provider = self._current()
is_archival = is_archive_method(method, params)
try:
if is_archival:
block_identifier = params[-1]
if block_identifier not in ('latest', 'pending',):
block_height = int(block_identifier, 16) if type(block_identifier) is str else int(params[-1])
if block_height <= provider.archive_fault_height:
# this block is at least as old as another block that already failed to fetch history from this RPC
raise ArchiveException(method, block_height)
return await provider.make_request(method, params)
except ArchiveException as e:
provider.archive_fault_height = max(provider.archive_fault_height, e.block_number)
if not self.archive_providers:
raise
return await self._current_archive().make_request(method, params)
def _current(self) -> RetryHTTPProvider:
if self.index >= len(self.providers):
self.index = 0
current_provider = self.providers[self.index]
self.index += 1
return current_provider
def _current_archive(self) -> RetryHTTPProvider:
if self.archive_index >= len(self.archive_providers):
self.archive_index = 0
current_provider = self.archive_providers[self.archive_index]
self.archive_index += 1
return current_provider

View File

@@ -22,6 +22,11 @@ from dexorder.util import hexbytes
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
def blocktime():
""" timestamp of the most recent block seen in real-time, NOT the current block being worked on """
return latest_block[current_chain.get().id].timestamp
async def get_block_timestamp(block_id: Union[bytes,int]) -> int: async def get_block_timestamp(block_id: Union[bytes,int]) -> int:
block = await get_block(block_id) block = await get_block(block_id)
if block is None: if block is None:

View File

@@ -64,7 +64,7 @@ class BlockData (Generic[T]):
if self.lazy_getitem: if self.lazy_getitem:
lazy = self.lazy_getitem(self, item) lazy = self.lazy_getitem(self, item)
if lazy is not NARG: if lazy is not NARG:
state.set(state.root_fork, self.series, item, lazy) state.set(state.root_fork, self.series, item, lazy, readonly_override=True)
result = lazy result = lazy
if result is NARG: if result is NARG:
raise KeyError raise KeyError

View File

@@ -53,7 +53,10 @@ class BlockState:
with a diff height of the root branch or older is always part of the finalized blockchain. with a diff height of the root branch or older is always part of the finalized blockchain.
""" """
class ReadOnlyError(Exception): ...
def __init__(self): def __init__(self):
self.readonly = False
self._root_branch: Optional[Branch] = None self._root_branch: Optional[Branch] = None
self._root_fork: Optional[Fork] = None self._root_fork: Optional[Fork] = None
self.height: int = 0 # highest branch seen self.height: int = 0 # highest branch seen
@@ -80,6 +83,8 @@ class BlockState:
@root_branch.setter @root_branch.setter
def root_branch(self, value: Branch): def root_branch(self, value: Branch):
if self.readonly:
raise self.ReadOnlyError()
self._root_branch = value self._root_branch = value
self._root_fork = Fork([value]) self._root_fork = Fork([value])
@@ -92,6 +97,8 @@ class BlockState:
return self._root_branch.head return self._root_branch.head
def init_root_block(self, root_block: Block) -> Fork: def init_root_block(self, root_block: Block) -> Fork:
if self.readonly:
raise self.ReadOnlyError()
assert self.root_branch is None assert self.root_branch is None
return self.add_branch(Branch.from_block(root_block)) return self.add_branch(Branch.from_block(root_block))
@@ -113,6 +120,8 @@ class BlockState:
should only be set to False when it is assured that the branch may be joined by height alone, because should only be set to False when it is assured that the branch may be joined by height alone, because
the branch join is known to be at a live-blockchain-finalized height. the branch join is known to be at a live-blockchain-finalized height.
""" """
if self.readonly:
raise self.ReadOnlyError()
assert branch.id not in self.branches_by_id assert branch.id not in self.branches_by_id
if self.root_branch is None: if self.root_branch is None:
@@ -150,11 +159,13 @@ class BlockState:
self.branches_by_height[branch.height].append(branch) self.branches_by_height[branch.height].append(branch)
self.branches_by_id[branch.id] = branch self.branches_by_id[branch.id] = branch
self.height = max(self.height, branch.height) self.height = max(self.height, branch.height)
state_log.info(f'added branch {fork}') # state_log.debug(f'added branch {fork}')
return fork return fork
def remove_branch(self, branch: Branch, *, remove_series_diffs=True): def remove_branch(self, branch: Branch, *, remove_series_diffs=True):
if self.readonly:
raise self.ReadOnlyError()
if branch.height == self.height and len(self.branches_by_height[branch.height]) == 1: if branch.height == self.height and len(self.branches_by_height[branch.height]) == 1:
# this is the only branch at this height: compute the new lower height # this is the only branch at this height: compute the new lower height
other_heights = [b.height for b in self.branches_by_id.values() if b is not branch] other_heights = [b.height for b in self.branches_by_id.values() if b is not branch]
@@ -210,7 +221,9 @@ class BlockState:
return DELETE return DELETE
def set(self, fork: Fork, series, key, value, overwrite=True): def set(self, fork: Fork, series, key, value, overwrite=True, *, readonly_override=False):
if not readonly_override and self.readonly:
raise self.ReadOnlyError()
# first look for an existing value # first look for an existing value
branch = fork.branch branch = fork.branch
diffs = self.diffs_by_series.get(series,{}).get(key) diffs = self.diffs_by_series.get(series,{}).get(key)
@@ -236,6 +249,8 @@ class BlockState:
return old_value return old_value
def unload(self, fork: Optional[Fork], series, key): def unload(self, fork: Optional[Fork], series, key):
if self.readonly:
raise self.ReadOnlyError()
self.unloads[fork.branch_id].append((series, key)) self.unloads[fork.branch_id].append((series, key))
def iteritems(self, fork: Optional[Fork], series): def iteritems(self, fork: Optional[Fork], series):
@@ -285,6 +300,8 @@ class BlockState:
Returns the set of diffs for the promoted fork. Returns the set of diffs for the promoted fork.
""" """
if self.readonly:
raise self.ReadOnlyError()
found_root = False found_root = False
promotion_branches = [] promotion_branches = []
for branch in reversed(fork.branches): for branch in reversed(fork.branches):
@@ -350,6 +367,7 @@ class FinalizedBlockState:
""" """
def __init__(self): def __init__(self):
self.readonly = False
self.data = {} self.data = {}
self.by_hash = {} self.by_hash = {}
@@ -361,6 +379,8 @@ class FinalizedBlockState:
def set(self, _fork: Optional[Fork], series, key, value, overwrite=True): def set(self, _fork: Optional[Fork], series, key, value, overwrite=True):
assert overwrite assert overwrite
if self.readonly:
raise BlockState.ReadOnlyError()
self.data.setdefault(series, {})[key] = value self.data.setdefault(series, {})[key] = value
def iteritems(self, _fork: Optional[Fork], series): def iteritems(self, _fork: Optional[Fork], series):
@@ -373,6 +393,8 @@ class FinalizedBlockState:
return self.data.get(series,{}).values() return self.data.get(series,{}).values()
def delete_series(self, _fork: Optional[Fork], series: str): def delete_series(self, _fork: Optional[Fork], series: str):
if self.readonly:
raise BlockState.ReadOnlyError()
del self.data[series] del self.data[series]

View File

@@ -8,7 +8,7 @@ from omegaconf.errors import OmegaConfBaseException
from .schema import Config from .schema import Config
schema = OmegaConf.structured(Config()) schema = OmegaConf.structured(Config(), flags={'struct': False})
_config_file = 'dexorder.toml' _config_file = 'dexorder.toml'

View File

@@ -16,6 +16,7 @@ class Config:
ws_url: Optional[str] = 'ws://localhost:8545' ws_url: Optional[str] = 'ws://localhost:8545'
rpc_urls: Optional[dict[str,str]] = field(default_factory=dict) rpc_urls: Optional[dict[str,str]] = field(default_factory=dict)
db_url: Optional[str] = 'postgresql://dexorder:redroxed@localhost/dexorder' db_url: Optional[str] = 'postgresql://dexorder:redroxed@localhost/dexorder'
db_readonly: bool = False
dump_sql: bool = False dump_sql: bool = False
redis_url: Optional[str] = 'redis://localhost:6379' redis_url: Optional[str] = 'redis://localhost:6379'
@@ -33,14 +34,25 @@ class Config:
backfill: int = 0 # if not 0, then runner will initialize an empty database by backfilling from the given block height. Use negative numbers to indicate a number of blocks before the present. backfill: int = 0 # if not 0, then runner will initialize an empty database by backfilling from the given block height. Use negative numbers to indicate a number of blocks before the present.
accounts: list[str] = field(default_factory=list) # the pool of accounts is used round-robin accounts: list[str] = field(default_factory=list) # the pool of accounts is used round-robin
adjuster: Optional[str] = None # special account allowed to adjust fees. must NOT be listed in accounts.
order_gas: int = 425000 # cost to place a conditional order
execution_gas: int = 275000 # cost to perform a successful execution
order_gas_multiplier: float = 2.0 # multiply the gas amount by this to get the fee
exeution_gas_multiplier: float = 2.0 # multiply the gas amount by this to get the fee
fee_leeway = 0.1 # do not adjust fees if they are within this proportion
min_gas: str = '0' min_gas: str = '0'
mark_publish_seconds: float = 60 # publish mark prices every this number of seconds
# Order slashing # Order slashing
slash_kill_count: int = 5 slash_kill_count: int = 5
slash_delay_base: float = 60 # one minute slash_delay_base: float = 60 # one minute
slash_delay_mul: float = 2 # double the delay each time slash_delay_mul: float = 2 # double the delay each time
slash_delay_max: int = 15 * 60 slash_delay_max: int = 15 * 60
# Tranches are paused for this long after they trigger a slippage control
slippage_control_delay: float = 10 # matches the 10-second TWAP used by our uniswap router
walker_name: str = 'default' walker_name: str = 'default'
walker_flush_interval: float = 300 walker_flush_interval: float = 300
walker_stop: Optional[int] = None # block number of the last block the walker should process walker_stop: Optional[int] = None # block number of the last block the walker should process
@@ -54,6 +66,3 @@ class Config:
stablecoins: list[str] = field(default_factory=list) # primary stablecoins which are marked to $1 stablecoins: list[str] = field(default_factory=list) # primary stablecoins which are marked to $1
quotecoins: list[str] = field(default_factory=list) # quote tokens like WETH that have stablecoin markets quotecoins: list[str] = field(default_factory=list) # quote tokens like WETH that have stablecoin markets
nativecoin: Optional[str] = None # used for accounting of native values. e.g. address of WETH nativecoin: Optional[str] = None # used for accounting of native values. e.g. address of WETH
# account: target_balance
refill: dict[str,str] = field(default_factory=dict)

View File

@@ -8,7 +8,6 @@ from web3.types import TxReceipt, TxData
from dexorder import current_w3, Account from dexorder import current_w3, Account
from dexorder.blocks import current_block from dexorder.blocks import current_block
from dexorder.blockstate.fork import current_fork
from dexorder.util import hexstr from dexorder.util import hexstr
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -19,10 +18,11 @@ class ContractTransaction:
# This is the standard RPC transaction dictionary # This is the standard RPC transaction dictionary
self.tx = tx self.tx = tx
# These three fields are populated only after signing # These fields are populated only after signing
self.id_bytes: Optional[bytes] = None self.id_bytes: Optional[bytes] = None
self.id: Optional[str] = None self.id: Optional[str] = None
self.data: Optional[bytes] = None self.data: Optional[bytes] = None
self.account: Optional[Account] = None
# This field is populated only after the transaction has been mined # This field is populated only after the transaction has been mined
self.receipt: Optional[TxReceipt] = None # todo could be multiple receipts for different branches! self.receipt: Optional[TxReceipt] = None # todo could be multiple receipts for different branches!
@@ -33,6 +33,8 @@ class ContractTransaction:
async def wait(self) -> TxReceipt: async def wait(self) -> TxReceipt:
if self.receipt is None: if self.receipt is None:
self.receipt = await current_w3.get().eth.wait_for_transaction_receipt(self.id) self.receipt = await current_w3.get().eth.wait_for_transaction_receipt(self.id)
if self.account is not None:
self.account.release()
return self.receipt return self.receipt
async def sign(self, account: Account): async def sign(self, account: Account):
@@ -42,6 +44,7 @@ class ContractTransaction:
self.data = signed['rawTransaction'] self.data = signed['rawTransaction']
self.id_bytes = signed['hash'] self.id_bytes = signed['hash']
self.id = hexstr(self.id_bytes) self.id = hexstr(self.id_bytes)
self.account = account
class DeployTransaction (ContractTransaction): class DeployTransaction (ContractTransaction):
@@ -74,19 +77,22 @@ def call_wrapper(addr, name, func):
def transact_wrapper(addr, name, func): def transact_wrapper(addr, name, func):
async def f(*args, **kwargs): async def f(*args, **kwargs):
tx = await func(*args).build_transaction(kwargs)
ct = ContractTransaction(tx)
account = await Account.acquire()
if account is None:
raise ValueError(f'No account to sign transaction {addr}.{name}()')
try: try:
tx = await func(*args).build_transaction(kwargs)
ct = ContractTransaction(tx)
account = Account.get()
if account is None:
raise ValueError(f'No account to sign transaction {addr}.{name}()')
await ct.sign(account) await ct.sign(account)
tx_id = await current_w3.get().eth.send_raw_transaction(ct.data) try:
assert tx_id == ct.id_bytes tx_id = await current_w3.get().eth.send_raw_transaction(ct.data)
return ct assert tx_id == ct.id_bytes
except Web3Exception as e: return ct
e.args += addr, name except Web3Exception as e:
raise e e.args += addr, name
raise e
finally:
account.release()
return f return f
@@ -148,10 +154,14 @@ class ContractProxy:
def __getattr__(self, item): def __getattr__(self, item):
if item == 'constructor': if item == 'constructor':
found = self.contract.constructor found = self.contract.constructor
elif item in self.contract.functions:
found = self.contract.functions[item]
else: else:
raise AttributeError(item) funcs = self.contract.functions
# In web3.py v6+, contract functions are exposed as attributes, not via __getitem__.
# Using getattr ensures we obtain the callable factory for the function; indexing may return None.
# Additionally, guard against unexpected None to fail fast with a clear error.
found = getattr(funcs, item, None)
if not callable(found):
raise AttributeError(f"Function '{item}' not found on contract {self._interface_name} at {self.address}")
return self._wrapper(self.address, item, found) return self._wrapper(self.address, item, found)
def __repr__(self): def __repr__(self):

View File

@@ -1,24 +0,0 @@
import logging
from dexorder import db
from dexorder.contract import ERC20, CONTRACT_ERRORS
log = logging.getLogger(__name__)
async def token_decimals(addr):
key = f'td|{addr}'
try:
return db.kv[key]
except KeyError:
# noinspection PyBroadException
try:
decimals = await ERC20(addr).decimals()
except CONTRACT_ERRORS:
log.warning(f'token {addr} has no decimals()')
decimals = 0
except Exception:
log.debug(f'could not get token decimals for {addr}')
return None
db.kv[key] = decimals
return decimals

View File

@@ -3,7 +3,7 @@ import logging
from contextvars import ContextVar from contextvars import ContextVar
import sqlalchemy import sqlalchemy
from sqlalchemy import Engine from sqlalchemy import Engine, event
from sqlalchemy.orm import Session, SessionTransaction from sqlalchemy.orm import Session, SessionTransaction
from .migrate import migrate_database from .migrate import migrate_database
@@ -99,7 +99,7 @@ class Db:
_session.set(None) _session.set(None)
# noinspection PyShadowingNames # noinspection PyShadowingNames
def connect(self, url=None, migrate=True, reconnect=False, dump_sql=None): def connect(self, url=None, migrate=True, reconnect=False, dump_sql=None, readonly:bool=None):
if _engine.get() is not None and not reconnect: if _engine.get() is not None and not reconnect:
return None return None
if url is None: if url is None:
@@ -114,6 +114,19 @@ class Db:
if dump_sql is None: if dump_sql is None:
dump_sql = config.dump_sql dump_sql = config.dump_sql
engine = sqlalchemy.create_engine(url, echo=dump_sql, json_serializer=json.dumps, json_deserializer=json.loads) engine = sqlalchemy.create_engine(url, echo=dump_sql, json_serializer=json.dumps, json_deserializer=json.loads)
if readonly is None:
readonly = config.db_readonly
if readonly:
@event.listens_for(engine, "connect")
def set_readonly(dbapi_connection, _connection_record):
cursor = dbapi_connection.cursor()
try:
cursor.execute("SET default_transaction_read_only = on;")
log.info('database connection set to READ ONLY')
finally:
cursor.close()
if migrate: if migrate:
migrate_database(url) migrate_database(url)
with engine.connect() as connection: with engine.connect() as connection:

View File

@@ -10,3 +10,4 @@ from .ofac import OFAC, OFACAlerts
from .accounting import Accounting, DbAccount from .accounting import Accounting, DbAccount
from .vaultcreationrequest import VaultCreationRequest from .vaultcreationrequest import VaultCreationRequest
from .tos import TOSAcceptance from .tos import TOSAcceptance
from .sharedata import ShareData

View File

@@ -1,7 +1,7 @@
import logging import logging
from datetime import datetime from datetime import datetime
from decimal import Decimal as dec from decimal import Decimal as dec
from enum import Enum from enum import Enum, auto
from sqlalchemy import ForeignKeyConstraint from sqlalchemy import ForeignKeyConstraint
from sqlalchemy.ext.mutable import MutableDict from sqlalchemy.ext.mutable import MutableDict
@@ -17,34 +17,37 @@ log = logging.getLogger(__name__)
class AccountingCategory (Enum): class AccountingCategory (Enum):
Transfer = 0 Transfer = auto()
Income = 1 Income = auto()
Expense = 2 Expense = auto()
Trade = 3 Trade = auto()
Special = 4 Special = auto()
class AccountingSubcategory (Enum): class AccountingSubcategory (Enum):
# Income # Income
OrderFee = 0 OrderFee = auto()
GasFee = 1 GasFee = auto()
FillFee = 2 FillFee = auto()
# Expense # Expense
VaultCreation = 3 Admin = auto() # contract deployments and upgrades, changing adjuster address, etc.
Execution = 4 TransactionGas = auto()
VaultCreation = auto()
Execution = auto()
FeeAdjustment = auto() # includes adjusting fee limits
# Transfer # Transfer
# Transfers have no subcategories, but the note field will be the address of the other account. Both a debit and a # Transfers have no subcategories, but the note field will be the address of the other account. Both a debit and a
# credit entry will be created, one for each account participating in the transfer. # credit entry will be created, one for each account participating in the transfer.
# Special Codes # Special Codes
InitialBalance = 5 InitialBalance = auto()
class Accounting (Base): class Accounting (Base):
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True) id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
time: Mapped[datetime] = mapped_column(default=now(), index=True) time: Mapped[datetime] = mapped_column(default=now(), index=True)
chain_id: Mapped[int] = mapped_column(index=True) chain_id: Mapped[int] = mapped_column(index=True) # chain_id
account: Mapped[str] = mapped_column(index=True) account: Mapped[str] = mapped_column(index=True)
category: Mapped[AccountingCategory] = mapped_column(index=True) category: Mapped[AccountingCategory] = mapped_column(index=True)
subcategory: Mapped[Optional[AccountingSubcategory]] = mapped_column(index=True) subcategory: Mapped[Optional[AccountingSubcategory]] = mapped_column(index=True)

View File

@@ -0,0 +1,12 @@
import logging
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import Mapped, mapped_column
from dexorder.database.model import Base
log = logging.getLogger(__name__)
class ShareData (Base):
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
data: Mapped[dict] = mapped_column(JSONB)

View File

@@ -3,8 +3,8 @@ import logging
from web3.types import EventData from web3.types import EventData
from dexorder import db, metric from dexorder import db, metric, current_w3, timestamp
from dexorder.accounting import accounting_fill, accounting_placement, accounting_transfer, is_tracked_address from dexorder.accounting import accounting_fill, accounting_placement
from dexorder.base.chain import current_chain from dexorder.base.chain import current_chain
from dexorder.base.order import TrancheKey, OrderKey from dexorder.base.order import TrancheKey, OrderKey
from dexorder.base.orderlib import SwapOrderState from dexorder.base.orderlib import SwapOrderState
@@ -42,11 +42,12 @@ async def handle_order_placed(event: EventData):
except KeyError: except KeyError:
log.warning(f'Rogue DexorderSwapPlaced in tx {hexstr(event["transactionHash"])}') log.warning(f'Rogue DexorderSwapPlaced in tx {hexstr(event["transactionHash"])}')
return return
await accounting_placement(event)
log.debug(f'DexorderSwapPlaced {addr} {start_index} {num_orders}') log.debug(f'DexorderSwapPlaced {addr} {start_index} {num_orders}')
if not await verify_vault(addr): if not await verify_vault(addr):
log.warning(f'Discarding order from rogue vault {addr}.') log.warning(f'Discarding order from rogue vault {addr}.')
return return
await accounting_placement(event)
metric.orders.inc()
contract = None contract = None
for index in range(start_index, start_index+num_orders): for index in range(start_index, start_index+num_orders):
key = OrderKey(addr, index) key = OrderKey(addr, index)
@@ -57,7 +58,7 @@ async def handle_order_placed(event: EventData):
log.debug(f'raw order status {obj}') log.debug(f'raw order status {obj}')
order = Order.create(addr, index, event['transactionHash'], obj) order = Order.create(addr, index, event['transactionHash'], obj)
await activate_order(order) await activate_order(order)
log.debug(f'new order {order.key}{order}') log.debug(f'new order {order.key} {await order.pprint()}')
async def handle_swap_filled(event: EventData): async def handle_swap_filled(event: EventData):
@@ -80,7 +81,9 @@ async def handle_swap_filled(event: EventData):
except KeyError: except KeyError:
log.warning(f'DexorderSwapFilled IGNORED due to missing order {vault} {order_index}') log.warning(f'DexorderSwapFilled IGNORED due to missing order {vault} {order_index}')
return return
await accounting_fill(event, order.order.tokenOut) value = await accounting_fill(event, order.order.tokenOut)
if value is not None:
metric.volume.inc(float(value))
order.status.trancheStatus[tranche_index].activationTime = next_execution_time # update rate limit order.status.trancheStatus[tranche_index].activationTime = next_execution_time # update rate limit
try: try:
triggers = OrderTriggers.instances[order.key] triggers = OrderTriggers.instances[order.key]
@@ -134,10 +137,11 @@ async def handle_transfer(transfer: EventData):
vault = None vault = None
if vault is not None: if vault is not None:
await adjust_balance(vault, token_address, amount) await adjust_balance(vault, token_address, amount)
await update_balance_triggers(vault, token_address, amount) await update_balance_triggers(vault, token_address)
if is_tracked_address(to_address): # This wuold double-count fill fees. Instead, we book the transfer when sending money to the account as part of a refill.
# noinspection PyTypeChecker # if is_tracked_address(to_address):
await accounting_transfer(transfer, token_address, from_address, to_address, amount, adjust_decimals=True) # # noinspection PyTypeChecker
# await accounting_transfer(transfer, token_address, from_address, to_address, amount, adjust_decimals=True)
async def handle_uniswap_swaps(swaps: list[EventData]): async def handle_uniswap_swaps(swaps: list[EventData]):
# asynchronously prefetch the block timestamps we'll need # asynchronously prefetch the block timestamps we'll need
@@ -209,8 +213,21 @@ async def handle_vault_impl_changed(upgrade: EventData):
log.debug(f'Vault {addr} upgraded to impl version {version}') log.debug(f'Vault {addr} upgraded to impl version {version}')
def update_metrics(): slow_metric_update = 0
metric.vaults.set_function(vault_owners.upper_len) async def update_metrics():
metric.open_orders.set_function(Order.open_orders.upper_len)
metric.triggers_time.set_function(lambda: len(TimeTrigger.all)) # called at the end of the runloop in the worker context
metric.triggers_line.set_function(lambda: len(PriceLineTrigger.triggers_set)) metric.vaults.set(vault_owners.upper_len())
metric.open_orders.set(Order.open_orders.upper_len())
metric.triggers_time.set(len(TimeTrigger.all))
metric.triggers_line.set(sum(len(s) for s in PriceLineTrigger.by_pool.values()))
# slow updates
global slow_metric_update
now = timestamp()
if now - slow_metric_update >= 60:
slow_metric_update = now
# put slow updates here
price = await current_w3.get().eth.gas_price
metric.gas_price.observe(price)

163
src/dexorder/gas_fees.py Normal file
View File

@@ -0,0 +1,163 @@
import logging
import math
from dataclasses import dataclass
from typing import Optional
import eth_account
from web3.types import EventData
from dexorder import current_w3, config, Account
from dexorder.accounting import accounting_transaction_gas
from dexorder.alert import warningAlert
from dexorder.base import TransactionReceiptDict, TransactionRequest
from dexorder.contract.contract_proxy import ContractTransaction
from dexorder.contract.dexorder import get_fee_manager_contract
from dexorder.database.model import TransactionJob
from dexorder.database.model.accounting import AccountingSubcategory
from dexorder.transactions import TransactionHandler, submit_transaction_request
from dexorder.util.convert import to_base_exp
log = logging.getLogger(__name__)
order_fee: Optional[int] = None
gas_fee: Optional[int] = None
fill_fee_half_bps: Optional[int] = None
order_fee_limit: Optional[int] = None
gas_fee_limit: Optional[int] = None
fill_fee_half_bps_limit: Optional[int] = None
adjuster_account: Optional[Account] = None
adjuster_locked = False
@dataclass
class AdjustFeeTransactionRequest (TransactionRequest):
TYPE = 'adjust'
order_fee: int
order_exp: int
gas_fee: int
gas_exp:int
fill_fee_half_bps: int
# noinspection PyShadowingNames
def __init__(self, order_fee: int, order_exp: int, gas_fee: int, gas_exp:int, fill_fee_half_bps: int):
super().__init__(AdjustFeeTransactionRequest.TYPE, (order_fee, order_exp, gas_fee, gas_exp, fill_fee_half_bps))
self.order_fee = order_fee
self.order_exp = order_exp
self.gas_fee = gas_fee
self.gas_exp = gas_exp
self.fill_fee_half_bps = fill_fee_half_bps
@property
def schedule(self):
return self.order_fee, self.order_exp, self.gas_fee, self.gas_exp, self.fill_fee_half_bps
class AdjustFeeTransactionHandler (TransactionHandler):
async def build_transaction(self, job_id: int, tr: TransactionRequest) -> Optional[ContractTransaction]:
tr: AdjustFeeTransactionRequest
fee_manager = await get_fee_manager_contract()
return await fee_manager.build.setFees(tr.schedule)
async def complete_transaction(self, job: TransactionJob, receipt: TransactionReceiptDict) -> None:
await accounting_transaction_gas(receipt, AccountingSubcategory.FeeAdjustment) # vault creation gas
async def transaction_exception(self, job: TransactionJob, e: Exception) -> None:
pass
async def acquire_account(self) -> Optional[Account]:
global adjuster_account, adjuster_locked
if adjuster_locked:
return None
if config.adjuster is None:
return None
if adjuster_account is None:
local_account = eth_account.Account.from_key(config.adjuster)
adjuster_account = Account(local_account)
adjuster_locked = True
return adjuster_account
async def release_account(self, account: Account):
global adjuster_locked
adjuster_locked = False
async def ensure_gas_fee_data():
global order_fee, gas_fee, fill_fee_half_bps, order_fee_limit, gas_fee_limit, fill_fee_half_bps_limit
if order_fee is None or gas_fee is None or order_fee_limit is None or gas_fee_limit is None:
fee_manager = await get_fee_manager_contract()
order_fee_base, order_fee_exp, gas_fee_base, gas_fee_exp, fill_fee_half_bps = await fee_manager.fees()
order_fee = order_fee_base << order_fee_exp
gas_fee = gas_fee_base << gas_fee_exp
order_fee_base_limit, order_fee_exp_limit, gas_fee_base_limit, gas_fee_exp_limit, fill_fee_half_bps_limit = await fee_manager.fee_limits()
order_fee_limit = order_fee_base_limit << order_fee_exp_limit
gas_fee_limit = gas_fee_base_limit << gas_fee_exp_limit
return fee_manager
return None
async def adjust_gas():
if not config.adjuster:
return
w3 = current_w3.get()
price = await w3.eth.gas_price
new_order_fee = round(config.order_gas * config.order_gas_multiplier * price)
new_gas_fee = round(config.order_gas * config.order_gas_multiplier * price)
log.debug(f'avg gas price: {price/10**18}')
await ensure_gas_fee_data()
global order_fee, gas_fee
if abs(1 - new_order_fee / order_fee) >= config.fee_leeway or abs(1 - new_gas_fee / gas_fee) >= config.fee_leeway:
if new_order_fee > order_fee_limit or new_gas_fee > gas_fee_limit:
warningAlert('Fees Hit Limits', 'Adjusting fees would exceed existing fee limits.')
new_order_fee = min(order_fee_limit, new_order_fee)
new_gas_fee = min(gas_fee_limit, new_gas_fee)
# TODO check if the new fee is adjusting upwards too fast and cap it
# TODO check if the new fees are already proposed and pending
# if new_order_fee/order_fee - 1 >
if new_order_fee != order_fee or new_gas_fee != gas_fee:
log.info(f'adjusting gas fees: orderFee={new_order_fee/10**18}, gasFee={new_gas_fee/10**18}')
new_order_fee_base, new_order_fee_exp = to_base_exp(new_order_fee, math.floor)
new_gas_fee_base, new_gas_fee_exp = to_base_exp(new_gas_fee, math.floor)
req = AdjustFeeTransactionRequest(new_order_fee_base, new_order_fee_exp,
new_gas_fee_base, new_gas_fee_exp, fill_fee_half_bps)
submit_transaction_request(req)
# noinspection DuplicatedCode
async def handle_fee_limits_changed(event: EventData):
try:
fees = event['args']['fees']
new_order_fee_limit = fees['orderFee']
new_order_exp_limit = fees['orderExp']
new_gas_fee_limit = fees['gasFee']
new_gas_exp_limit = fees['gasExp']
new_fill_fee_half_bps_limit = fees['fillFeeHalfBps']
except KeyError:
return
global order_fee_limit, gas_fee_limit
order_fee_limit = new_order_fee_limit << new_order_exp_limit
gas_fee_limit = new_gas_fee_limit << new_gas_exp_limit
fill_fee_limit = new_fill_fee_half_bps_limit / 200
log.info(f'gas fee limits updated: orderFeeLimit={new_order_fee_limit/10**18}, gasFeeLimit={new_gas_fee_limit/10**18}, fillFeeLimit={fill_fee_limit:.3%}')
# noinspection DuplicatedCode
async def handle_fees_changed(event: EventData):
try:
fees = event['args']['fees']
new_order_fee = fees['orderFee']
new_order_exp = fees['orderExp']
new_gas_fee = fees['gasFee']
new_gas_exp = fees['gasExp']
new_fill_fee_half_bps = fees['fillFeeHalfBps']
except KeyError:
return
global order_fee, gas_fee
order_fee = new_order_fee << new_order_exp
gas_fee = new_gas_fee << new_gas_exp
fill_fee = new_fill_fee_half_bps / 200
log.info(f'gas fees updated: orderFee={new_order_fee/10**18}, gasFee={new_gas_fee/10**18}, fillFee={fill_fee:.3%}')

44
src/dexorder/marks.py Normal file
View File

@@ -0,0 +1,44 @@
"""
"marks" are mark-to-market USD values of a selected set of tokens called quote tokens. Publishing a set of USD marks
for the quote tokens allows almost any token to be marked to USD via one hop.
"""
import logging
import time
from dexorder import dec, NATIVE_TOKEN, config
from dexorder.base.chain import current_chain
from dexorder.blockstate import BlockDict
from dexorder.pools import quotes, mark_to_market
log = logging.getLogger(__name__)
def pub_marks(_s,k,v):
chain_id = current_chain.get().id
return str(chain_id), 'marks.usd', (chain_id, k, str(v))
marks: BlockDict[str, dec] = BlockDict('mark.usd', db=False, redis=True, pub=pub_marks, value2str=str)
class RateLimiter:
def __init__(self, rate: float):
self.rate = rate
self.last_update = 0.0
def ready(self):
now = time.monotonic()
if now - self.last_update < self.rate:
return False
self.last_update = now
return True
mark_publish_rate = RateLimiter(config.mark_publish_seconds)
def publish_marks():
if mark_publish_rate.ready():
for token_addr in [NATIVE_TOKEN]+quotes:
# overwrite=False checks the previous value and does not generate a diff if the values match. This prevents
# excessive updates to Redis
value = mark_to_market(token_addr)
if value is not None:
marks.setitem(token_addr, value, overwrite=False)

View File

@@ -1,3 +1,4 @@
import itertools
import logging import logging
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from contextvars import ContextVar from contextvars import ContextVar
@@ -10,16 +11,70 @@ from dexorder import config
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
BATCH_SIZE = 1_000
class PipelineProxy:
def __init__(self, pipe: Pipeline):
self.pipe = pipe
self.ops = 0
async def push(self, num=1):
self.ops += num
if self.ops >= BATCH_SIZE:
self.ops = 0
await self.pipe.execute()
async def sadd(self, series, *keys):
while keys:
most = min(BATCH_SIZE-self.ops, len(keys))
assert most > 0
send = keys[:most]
keys = keys[most:]
await self.pipe.sadd(series, *send)
await self.push(len(send))
async def srem(self, series, *keys):
while keys:
most = min(BATCH_SIZE-self.ops, len(keys))
assert most > 0
send = keys[:most]
keys = keys[most:]
await self.pipe.srem(series, *send)
await self.push(len(send))
async def hset(self, series, *, mapping):
items = list(mapping.items())
while items:
most = min(BATCH_SIZE-self.ops, len(items))
assert most > 0
send = items[:most]
items = items[most:]
await self.pipe.hset(series, mapping={k:v for k,v in send})
await self.push(len(send))
async def hdel(self, series, *keys):
while keys:
most = min(BATCH_SIZE-self.ops, len(keys))
assert most > 0
send = keys[:most]
keys = keys[most:]
await self.pipe.hdel(series, *send)
await self.push(len(send))
def __getattr__(self, item):
return getattr(self.pipe, item)
class Memcache: class Memcache:
@staticmethod @staticmethod
@asynccontextmanager @asynccontextmanager
async def batch(): async def batch(transaction=True):
old_redis: Redis = current_redis.get() old_redis: Redis = current_redis.get()
pipe: Pipeline = old_redis.pipeline() pipe = old_redis.pipeline(transaction=transaction)
# noinspection PyTypeChecker
current_redis.set(pipe) current_redis.set(pipe)
try: try:
yield pipe yield PipelineProxy(pipe)
await pipe.execute() await pipe.execute()
finally: finally:
current_redis.set(old_redis) current_redis.set(old_redis)

View File

@@ -12,7 +12,7 @@ from dexorder.blockstate.blockdata import SeriesCollection, BlockData
from dexorder.blockstate.diff import DiffEntryItem from dexorder.blockstate.diff import DiffEntryItem
from dexorder.blockstate.fork import Fork from dexorder.blockstate.fork import Fork
from dexorder.blockstate.state import compress_diffs from dexorder.blockstate.state import compress_diffs
from dexorder.memcache import current_redis, memcache from dexorder.memcache import current_redis, memcache, PipelineProxy
from dexorder.util import hexstr from dexorder.util import hexstr
from dexorder.util.async_util import maywait from dexorder.util.async_util import maywait
from dexorder.util.json import json_encoder from dexorder.util.json import json_encoder
@@ -40,11 +40,11 @@ class RedisState (SeriesCollection):
for series in self.datas.keys(): for series in self.datas.keys():
for k, v in state.iteritems(fork, series): for k, v in state.iteritems(fork, series):
diffs.append(DiffItem(series, k, v)) diffs.append(DiffItem(series, k, v))
await self.save(fork, diffs) await self.save(fork, diffs, use_transaction=False, skip_pubs=True) # use_transaction=False if the data is too big
# noinspection PyAsyncCall # noinspection PyAsyncCall
async def save(self, fork: Fork, diffs: Reversible[Union[DiffItem, DiffEntryItem]]): async def save(self, fork: Fork, diffs: Reversible[Union[DiffItem, DiffEntryItem]], *, use_transaction=True, skip_pubs=False):
# the diffs must be already compressed such that there is only one action per key # the diffs must be already compressed such that there is only one action per key
chain = current_chain.get() chain = current_chain.get()
chain_id = chain.id chain_id = chain.id
@@ -91,22 +91,23 @@ class RedisState (SeriesCollection):
hsets[series][key] = value hsets[series][key] = value
else: else:
raise NotImplementedError raise NotImplementedError
async with memcache.batch() as r:
r: Pipeline async with memcache.batch(use_transaction) as r:
r: PipelineProxy
for series, keys in sadds.items(): for series, keys in sadds.items():
r.sadd(series, *keys) await r.sadd(series, *keys)
for series, keys in sdels.items(): for series, keys in sdels.items():
r.srem(series, *keys) await r.srem(series, *keys)
for series, kvs in hsets.items(): for series, kvs in hsets.items():
r.hset(series, mapping=kvs) await r.hset(series, mapping=kvs)
for series, keys in hdels.items(): for series, keys in hdels.items():
r.hdel(series, *keys) await r.hdel(series, *keys)
block_series = f'{chain_id}|head' block_series = f'{chain_id}|head'
headstr = hexstr(fork.head) headstr = hexstr(fork.head)
r.json(json_encoder).set(block_series,'$',[fork.height, headstr]) r.json(json_encoder).set(block_series,'$',[fork.height, headstr])
pubs.append((str(chain_id), 'head', [fork.height, headstr])) pubs.append((str(chain_id), 'head', [fork.height, headstr]))
# separate batch for pubs # separate batch for pubs
if pubs: if pubs and not skip_pubs:
await publish_all(pubs) await publish_all(pubs)

View File

@@ -11,10 +11,15 @@ runner_loops = Counter("runner_loops", "Number of times the runner loop has been
runner_latency = Summary("runner_latency", "How old the current block being processed is, in seconds") runner_latency = Summary("runner_latency", "How old the current block being processed is, in seconds")
vaults = Gauge("vaults", "Total vault count", ) vaults = Gauge("vaults", "Total vault count", )
orders = Counter("orders", "Orders placed", )
open_orders = Gauge("open_orders", "Total active orders", ) open_orders = Gauge("open_orders", "Total active orders", )
triggers_time = Gauge("triggers_time", "Total active time triggers", ) triggers_time = Gauge("triggers_time", "Total active time triggers", )
triggers_line = Gauge("triggers_line", "Total active line triggers", ) triggers_line = Gauge("triggers_line", "Total active line triggers", )
executions = Counter("executions", "Total executions attempted")
executions_failed = Counter("executions_failed", "Number of failed execution attempts")
volume = Counter("volume", "Total volume of successful executions in USD")
account_balance = Gauge("account_balance", "Account balance", ["address", "token"])
account_total = Gauge('account_total', 'Total number of accounts configured') account_total = Gauge('account_total', 'Total number of accounts configured')
account_available = Gauge('account_available', 'Number of accounts that do not have any pending transactions') account_available = Gauge('account_available', 'Number of accounts that do not have any pending transactions')
gas_price = Summary('gas_price', 'Gas price in wei')

View File

@@ -359,7 +359,7 @@ class OHLCRepository:
if price is None, then bars are advanced based on the time but no new price is added to the series. if price is None, then bars are advanced based on the time but no new price is added to the series.
""" """
if OHLC_LIMIT_POOLS_DEBUG is not None and (symbol,period) not in OHLC_LIMIT_POOLS_DEBUG: if OHLC_LIMIT_POOLS_DEBUG is not None and (symbol,period) not in OHLC_LIMIT_POOLS_DEBUG:
return return None
# logname = f'{symbol} {period_name(period)}' # logname = f'{symbol} {period_name(period)}'
# log.debug(f'Updating OHLC {logname} {minutely(time)} {price}') # log.debug(f'Updating OHLC {logname} {minutely(time)} {price}')
if price is not None: if price is not None:
@@ -371,33 +371,31 @@ class OHLCRepository:
# log.debug(f'got recent {historical}') # log.debug(f'got recent {historical}')
if not historical: if not historical:
if create is False or price is None: if create is False or price is None:
return # do not track symbols which have not been explicity set up return None # do not track symbols which have not been explicity set up
historical = []
updated = [NativeOHLC(ohlc_start_time(time, period), price, price, price, price)] updated = [NativeOHLC(ohlc_start_time(time, period), price, price, price, price)]
# log.debug(f'\tcreated new bars {updated}') # log.debug(f'\tcreated new bars {updated}')
else: else:
updated = update_ohlc(historical[-1], period, time, price) updated = update_ohlc(historical[-1], period, time, price)
# drop any historical bars that are older than we need # overlap the updated OHLC's on top of the historical ones
# oldest_needed = cover the root block time plus one period prior
root_branch = current_blockstate.get().root_branch
root_hash = root_branch.head
if root_hash is not None:
root_timestamp = await get_block_timestamp(root_hash)
oldest_needed = from_timestamp(root_timestamp) - period
# noinspection PyTypeChecker
trim = (oldest_needed - historical[0].start) // period
if trim > 0:
historical = historical[trim:]
# now overlap the updated data on top of the historical data
if not historical or not updated:
updated = historical + updated
else:
last_bar = historical[-1].start last_bar = historical[-1].start
first_updated = updated[0].start first_updated = updated[0].start
overlap = (first_updated - last_bar) // period + 1 overlap = (first_updated - last_bar) // period + 1
updated = historical[:-overlap] + updated if overlap > 0 else historical + updated updated = historical[:-overlap] + updated if overlap > 0 else historical + updated
# log.debug(f'\tnew recents: {updated}')
# drop any bars that are older than we need
# oldest_needed = cover the root block time plus one period prior
root_branch = current_blockstate.get().root_branch
root_hash = root_branch.head
if root_hash is not None:
root_timestamp = await get_block_timestamp(root_hash)
oldest_needed = from_timestamp(root_timestamp) - period
# noinspection PyTypeChecker
trim = (oldest_needed - updated[0].start) // period
if trim > 0:
updated = updated[trim:]
# if len(updated) > 3:
# log.debug(f'\tnew recents ({len(updated)}): {updated}')
recent_ohlcs.setitem(key, updated) recent_ohlcs.setitem(key, updated)
return updated return updated

View File

@@ -6,7 +6,7 @@ from uuid import UUID
from web3.exceptions import ContractPanicError, ContractLogicError from web3.exceptions import ContractPanicError, ContractLogicError
from web3.types import EventData from web3.types import EventData
from dexorder import db from dexorder import db, metric, config
from dexorder.accounting import accounting_transaction_gas from dexorder.accounting import accounting_transaction_gas
from dexorder.base import TransactionReceiptDict, TransactionRequest, transaction_request_deserializers from dexorder.base import TransactionReceiptDict, TransactionRequest, transaction_request_deserializers
from dexorder.base.order import TrancheKey, OrderKey from dexorder.base.order import TrancheKey, OrderKey
@@ -15,10 +15,11 @@ from dexorder.contract.dexorder import get_dexorder_contract
from dexorder.database.model.accounting import AccountingSubcategory from dexorder.database.model.accounting import AccountingSubcategory
from dexorder.database.model.transaction import TransactionJob from dexorder.database.model.transaction import TransactionJob
from dexorder.order.orderstate import Order from dexorder.order.orderstate import Order
from dexorder.order.triggers import (inflight_execution_requests, OrderTriggers, from dexorder.order.triggers import (OrderTriggers,
TrancheState, active_tranches, order_error) TrancheState, active_tranches, order_error)
from dexorder.transactions import TransactionHandler, submit_transaction_request from dexorder.transactions import TransactionHandler, submit_transaction_request
from dexorder.util import hexbytes from dexorder.util import hexbytes
from dexorder.vault_blockdata import refresh_vault_balances
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -79,7 +80,7 @@ class TrancheExecutionHandler (TransactionHandler):
errcode = hexbytes(x.args[1]).decode('utf-8') errcode = hexbytes(x.args[1]).decode('utf-8')
log.error(f'While building execution for tranche {tk}: {errcode}') log.error(f'While building execution for tranche {tk}: {errcode}')
# if there's a logic error we shouldn't keep trying # if there's a logic error we shouldn't keep trying
finish_execution_request(tk, errcode) await finish_execution_request(tk, errcode)
raise exception raise exception
async def complete_transaction(self, job: TransactionJob, receipt: TransactionReceiptDict) -> None: async def complete_transaction(self, job: TransactionJob, receipt: TransactionReceiptDict) -> None:
@@ -90,13 +91,13 @@ class TrancheExecutionHandler (TransactionHandler):
log.error('Could not build execution transaction due to exception', exc_info=e) log.error('Could not build execution transaction due to exception', exc_info=e)
# noinspection PyTypeChecker # noinspection PyTypeChecker
req: TrancheExecutionRequest = job.request req: TrancheExecutionRequest = job.request
finish_execution_request(req.tranche_key, '') await finish_execution_request(req.tranche_key, '')
TrancheExecutionHandler() # map 'te' to a TrancheExecutionHandler TrancheExecutionHandler() # map 'te' to a TrancheExecutionHandler
def finish_execution_request(tk: TrancheKey, error: Optional[str]=None): async def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
order_key = OrderKey(tk.vault, tk.order_index) order_key = OrderKey(tk.vault, tk.order_index)
try: try:
order: Order = Order.of(order_key) order: Order = Order.of(order_key)
@@ -104,11 +105,6 @@ def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
log.error(f'Could not get order {order_key}') log.error(f'Could not get order {order_key}')
return return
try:
inflight_execution_requests.remove(tk)
except KeyError:
pass
def get_trigger(): def get_trigger():
try: try:
return OrderTriggers.instances[order_key].triggers[tk.tranche_index] return OrderTriggers.instances[order_key].triggers[tk.tranche_index]
@@ -125,6 +121,16 @@ def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
if trig is not None: if trig is not None:
trig.touch() trig.touch()
def delay(secs=None):
trig = get_trigger()
if trig is not None:
trig.deactivate(secs if secs is not None else config.slippage_control_delay)
if error is None:
metric.executions.inc()
else:
metric.executions_failed.inc()
# #
# execute() error handling # execute() error handling
# #
@@ -134,7 +140,9 @@ def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
# Insufficient Input Amount # Insufficient Input Amount
token = order.order.tokenIn token = order.order.tokenIn
log.debug(f'insufficient funds {tk.vault} {token} ') log.debug(f'insufficient funds {tk.vault} {token} ')
slash()
retry() retry()
await refresh_vault_balances(tk.vault, order.order.tokenIn, order.order.tokenOut)
elif error == 'SPL': elif error == 'SPL':
# todo tight slippage can cause excessive executions as the backend repeatedly retries the remainder. The symptom is error 'SPL'. # todo tight slippage can cause excessive executions as the backend repeatedly retries the remainder. The symptom is error 'SPL'.
# Square-root price limit from Uniswap means we asked for a limit price that isn't met. This is a fault of # Square-root price limit from Uniswap means we asked for a limit price that isn't met. This is a fault of
@@ -147,7 +155,7 @@ def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
order_error(order, error) # We do not know if it was filled or not so only Error status can be given order_error(order, error) # We do not know if it was filled or not so only Error status can be given
elif error == 'TF': elif error == 'TF':
# Tranche Filled # Tranche Filled
log.warning(f'tranche already filled {tk}') log.debug(f'tranche already filled {tk}')
tranche_trigger = get_trigger() tranche_trigger = get_trigger()
if tranche_trigger is not None: if tranche_trigger is not None:
tranche_trigger.status = TrancheState.Filled tranche_trigger.status = TrancheState.Filled
@@ -159,6 +167,7 @@ def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
retry() retry()
elif error == 'RL': elif error == 'RL':
log.debug(f'tranche {tk} execution failed due to "RL" rate limit') log.debug(f'tranche {tk} execution failed due to "RL" rate limit')
delay()
retry() retry()
elif error == 'TE': elif error == 'TE':
log.debug(f'tranche {tk} execution failed due to "TE" too early') log.debug(f'tranche {tk} execution failed due to "TE" too early')
@@ -194,27 +203,27 @@ def finish_execution_request(tk: TrancheKey, error: Optional[str]=None):
def execute_tranches(): def execute_tranches():
new_execution_requests = [] new_execution_requests = []
for tk, proof in active_tranches.items(): for tk, proof in active_tranches.items():
if tk not in inflight_execution_requests: new_execution_requests.append((tk, proof))
new_execution_requests.append((tk, proof))
else:
log.debug(f'execute {tk} already in flight')
# todo order requests and batch # todo order requests and batch
for tk, proof in new_execution_requests: for tk, proof in new_execution_requests:
create_execution_request(tk, proof) create_execution_request(tk, proof)
def create_execution_request(tk: TrancheKey, proof: PriceProof): def create_execution_request(tk: TrancheKey, proof: PriceProof):
inflight_execution_requests.add(tk)
job = submit_transaction_request(new_tranche_execution_request(tk, proof)) job = submit_transaction_request(new_tranche_execution_request(tk, proof))
if job is not None: if job is not None:
log.debug(f'Executing {tk} as job {job.id}') log.debug(f'Executing {tk} as job {job.id}')
return job return job
def handle_dexorderexecutions(event: EventData): async def handle_dexorderexecutions(event: EventData):
log.debug(f'executions {event}') log.debug(f'executions {event}')
exe_id = UUID(bytes=event['args']['id']) exe_id = UUID(bytes=event['args']['id'])
errors = event['args']['errors'] try:
errors = event['args']['errors']
except KeyError:
log.warning(f'Rogue DexorderExecutions event {event}')
return
if len(errors) == 0: if len(errors) == 0:
log.warning(f'No errors found in DexorderExecutions event: {event}') log.warning(f'No errors found in DexorderExecutions event: {event}')
return return
@@ -227,4 +236,4 @@ def handle_dexorderexecutions(event: EventData):
# noinspection PyTypeChecker # noinspection PyTypeChecker
req: TrancheExecutionRequest = job.request req: TrancheExecutionRequest = job.request
tk = TrancheKey(req.vault, req.order_index, req.tranche_index) tk = TrancheKey(req.vault, req.order_index, req.tranche_index)
finish_execution_request(tk, None if errors[0] == '' else errors[0]) await finish_execution_request(tk, None if errors[0] == '' else errors[0])

View File

@@ -3,13 +3,14 @@ import logging
from dataclasses import dataclass from dataclasses import dataclass
from typing import overload from typing import overload
from dexorder import DELETE, db, order_log from dexorder import DELETE, db, order_log, from_timestamp
from dexorder.base.chain import current_chain from dexorder.base.chain import current_chain
from dexorder.base.order import OrderKey, TrancheKey from dexorder.base.order import OrderKey, TrancheKey
from dexorder.base.orderlib import SwapOrderState, ElaboratedSwapOrderStatus, Fill from dexorder.base.orderlib import SwapOrderState, ElaboratedSwapOrderStatus, Fill
from dexorder.blockstate import BlockDict, BlockSet from dexorder.blockstate import BlockDict, BlockSet
from dexorder.database.model.orderindex import OrderIndex from dexorder.database.model.orderindex import OrderIndex
from dexorder.routing import pool_address from dexorder.routing import pool_address
from dexorder.tokens import adjust_decimals
from dexorder.util import json from dexorder.util import json
from dexorder.vault_blockdata import vault_owners from dexorder.vault_blockdata import vault_owners
@@ -127,7 +128,7 @@ class Order:
key = a if b is None else OrderKey(a, b) key = a if b is None else OrderKey(a, b)
assert key not in Order.instances assert key not in Order.instances
self.key = key self.key = key
self.status: ElaboratedSwapOrderStatus = Order.order_statuses[key].copy() self._status: ElaboratedSwapOrderStatus = Order.order_statuses[key].copy()
self.pool_address: str = pool_address(self.status.order) self.pool_address: str = pool_address(self.status.order)
self.tranche_keys = [TrancheKey(key.vault, key.order_index, i) for i in range(len(self.status.trancheStatus))] self.tranche_keys = [TrancheKey(key.vault, key.order_index, i) for i in range(len(self.status.trancheStatus))]
# flattenings of various static data # flattenings of various static data
@@ -138,6 +139,14 @@ class Order:
self.tranche_amounts = [t.fraction_of(self.amount) for t in self.order.tranches] self.tranche_amounts = [t.fraction_of(self.amount) for t in self.order.tranches]
Order.instances[self.key] = self Order.instances[self.key] = self
@property
def status(self):
return self._status
@status.setter
def status(self, v):
self._status = Order.order_statuses[self.key] = v
@property @property
def state(self): def state(self):
return self.status.state return self.status.state
@@ -279,6 +288,33 @@ class Order:
Order.vault_recently_closed_orders.listremove(key.vault, key.order_index) Order.vault_recently_closed_orders.listremove(key.vault, key.order_index)
def __str__(self):
return str(self.key)
async def pprint(self):
amount_token = self.order.tokenIn if self.order.amountIsInput else self.order.tokenOut
msg = f'''
SwapOrder {self.key}
status: {self.state.name}
placed: {from_timestamp(self.status.startTime)}
in: {self.order.tokenIn}
out: {self.order.tokenOut}
exchange: {self.order.route.exchange.name, self.order.route.fee}
amount: {"input" if self.order.amountIsInput else "output"} {await adjust_decimals(amount_token, self.filled):f}/{await adjust_decimals(amount_token, self.amount):f}{" to owner" if self.order.outputDirectlyToOwner else ""}
minFill: {await adjust_decimals(amount_token, self.min_fill_amount):f}
inverted: {self.order.inverted}
tranches:
'''
for i in range(len(self.order.tranches)):
tranche = self.order.tranches[i]
msg += f' {tranche}'
filled_amount = self.tranche_filled(i)
if filled_amount:
msg += f' filled {await adjust_decimals(amount_token, filled_amount)}'
msg += '\n'
return msg
# ORDER STATE # ORDER STATE
# various blockstate fields hold different aspects of an order's state. # various blockstate fields hold different aspects of an order's state.
@@ -310,8 +346,6 @@ class Order:
'of', db=True, redis=True, pub=pub_order_fills, 'of', db=True, redis=True, pub=pub_order_fills,
str2key=OrderKey.str2key, value2str=lambda v: json.dumps(v.dump()), str2value=lambda s:OrderFilled.load(json.loads(s))) str2key=OrderKey.str2key, value2str=lambda v: json.dumps(v.dump()), str2value=lambda s:OrderFilled.load(json.loads(s)))
def __str__(self):
return str(self.order)
# "active" means the order wants to be executed now. this is not BlockData because it's cleared every block # "active" means the order wants to be executed now. this is not BlockData because it's cleared every block
active_orders: dict[OrderKey,Order] = {} active_orders: dict[OrderKey,Order] = {}

View File

@@ -2,13 +2,15 @@ import asyncio
import logging import logging
from abc import abstractmethod from abc import abstractmethod
from collections import defaultdict from collections import defaultdict
from datetime import timedelta
from enum import Enum, auto from enum import Enum, auto
from typing import Optional, Sequence from typing import Optional, Sequence, Union
import numpy as np import numpy as np
from sortedcontainers import SortedList from sortedcontainers import SortedList
from dexorder.base.orderlib import SwapOrderState, PriceProof, DISTANT_FUTURE, DISTANT_PAST, Line from dexorder.base.orderlib import SwapOrderState, PriceProof, DISTANT_FUTURE, DISTANT_PAST, Line, MIN_SLIPPAGE, \
MIN_SLIPPAGE_EPSILON
from dexorder.blockstate import BlockDict from dexorder.blockstate import BlockDict
from .orderstate import Order from .orderstate import Order
from .. import dec, order_log, timestamp, from_timestamp, config from .. import dec, order_log, timestamp, from_timestamp, config
@@ -39,9 +41,6 @@ execution should be attempted on the tranche.
# tranches which have passed all constraints and should be executed # tranches which have passed all constraints and should be executed
active_tranches: BlockDict[TrancheKey, Optional[PriceProof]] = BlockDict('at') active_tranches: BlockDict[TrancheKey, Optional[PriceProof]] = BlockDict('at')
# tranches which have an execute() transaction sent but not completed
inflight_execution_requests: set[TrancheKey] = set()
class OrderTriggers: class OrderTriggers:
instances: dict[OrderKey, 'OrderTriggers'] = {} instances: dict[OrderKey, 'OrderTriggers'] = {}
@@ -58,13 +57,13 @@ class OrderTriggers:
self.order = order self.order = order
self.triggers = triggers self.triggers = triggers
OrderTriggers.instances[order.key] = self OrderTriggers.instances[order.key] = self
log.debug(f'created OrderTriggers for {order.key}') # log.debug(f'created OrderTriggers for {order.key}')
def disable(self): def disable(self):
for t in self.triggers: for t in self.triggers:
t.disable() t.disable()
del OrderTriggers.instances[self.order.key] del OrderTriggers.instances[self.order.key]
log.debug(f'disabled OrderTriggers for {self.order.key}') # log.debug(f'disabled OrderTriggers for {self.order.key}')
@property @property
def closed(self): def closed(self):
@@ -74,6 +73,10 @@ class OrderTriggers:
def open(self): def open(self):
return not self.closed return not self.closed
@property
def error(self):
return any(t.error for t in self.triggers)
def check_complete(self): def check_complete(self):
if self.closed: if self.closed:
final_state = SwapOrderState.Filled if self.order.remaining == 0 or self.order.remaining < self.order.min_fill_amount else SwapOrderState.Expired final_state = SwapOrderState.Filled if self.order.remaining == 0 or self.order.remaining < self.order.min_fill_amount else SwapOrderState.Expired
@@ -102,7 +105,8 @@ def start_trigger_updates():
PriceLineTrigger.clear_data() PriceLineTrigger.clear_data()
async def update_balance_triggers(vault: str, token: str, balance: int): async def update_balance_triggers(vault: str, token: str):
balance = vault_balances.get(vault, {}).get(token)
updates = [bt.update(balance) for bt in BalanceTrigger.by_vault_token.get((vault, token), [])] updates = [bt.update(balance) for bt in BalanceTrigger.by_vault_token.get((vault, token), [])]
await asyncio.gather(*updates) await asyncio.gather(*updates)
@@ -121,15 +125,15 @@ async def end_trigger_updates():
PriceLineTrigger.end_updates(current_clock.get().timestamp) PriceLineTrigger.end_updates(current_clock.get().timestamp)
while _dirty: while _dirty:
tk = _dirty.pop() tk = _dirty.pop()
log.debug(f'check dirty tranche {tk}') # log.debug(f'check dirty tranche {tk}')
if _trigger_state.get(tk,0) == 0: if _trigger_state.get(tk,0) == 0:
# all clear for execution. add to active list with any necessary proofs # all clear for execution. add to active list with any necessary proofs
active_tranches[tk] = PriceProof(0) active_tranches[tk] = PriceProof(0)
log.debug(f'active tranche {tk}') # log.debug(f'active tranche {tk}')
else: else:
# blocked by one or more triggers being False (nonzero mask) # blocked by one or more triggers being False (nonzero mask)
reason = ', '.join(t.name for t in TrancheTrigger.all[tk].blocking_triggers) reason = ', '.join(t.name for t in TrancheTrigger.all[tk].blocking_triggers)
log.debug(f'tranche {tk} blocked by {reason}') # log.debug(f'tranche {tk} blocked by {reason}')
# check expiry constraint # check expiry constraint
try: try:
TrancheTrigger.all[tk].check_expire() TrancheTrigger.all[tk].check_expire()
@@ -213,13 +217,13 @@ class Trigger:
async def has_funds(tk: TrancheKey): async def has_funds(tk: TrancheKey):
log.debug(f'has funds? {tk.vault}') # log.debug(f'has funds? {tk.vault}')
order = Order.of(tk) order = Order.of(tk)
balances = vault_balances.get(tk.vault, {}) balances = vault_balances.get(tk.vault, {})
log.debug(f'balances {balances}') # log.debug(f'balances {balances}')
token_addr = order.status.order.tokenIn token_addr = order.status.order.tokenIn
token_balance = balances.get(token_addr) token_balance = balances.get(token_addr)
log.debug(f'amount of {token_addr} = {token_balance}') # log.debug(f'amount of {token_addr} = {token_balance}')
if token_balance is None: if token_balance is None:
# unknown balance # unknown balance
token_balance = balances[token_addr] = await ERC20(token_addr).balanceOf(tk.vault) token_balance = balances[token_addr] = await ERC20(token_addr).balanceOf(tk.vault)
@@ -259,11 +263,12 @@ class BalanceTrigger (Trigger):
self.order = Order.of(self.tk) self.order = Order.of(self.tk)
self.vault_token = self.tk.vault, self.order.status.order.tokenIn self.vault_token = self.tk.vault, self.order.status.order.tokenIn
BalanceTrigger.by_vault_token[self.vault_token].add(self) BalanceTrigger.by_vault_token[self.vault_token].add(self)
self._value_changed()
# log.debug(f'initializing Balance Trigger {id(self)} {tk} {value} {self.value}') # log.debug(f'initializing Balance Trigger {id(self)} {tk} {value} {self.value}')
async def update(self, balance): async def update(self, balance):
self.value = await input_amount_is_sufficient(self.order, balance) self.value = await input_amount_is_sufficient(self.order, balance)
# log.debug(f'update balance {balance} was sufficient? {self.value}') # log.debug(f'update balance {balance} was sufficient? {self.value} {self.order.key}')
def remove(self): def remove(self):
try: try:
@@ -271,6 +276,17 @@ class BalanceTrigger (Trigger):
except (KeyError, ValueError): except (KeyError, ValueError):
pass pass
def _value_changed(self):
ok = self.value
order = Order.of(self.tk)
old_state = order.status.state
if not ok and old_state == SwapOrderState.Open:
order.status = order.status.copy()
order.status.state = SwapOrderState.Underfunded
elif ok and old_state == SwapOrderState.Underfunded:
order.status = order.status.copy()
order.status.state = SwapOrderState.Open
class TimeTrigger (Trigger): class TimeTrigger (Trigger):
@@ -307,11 +323,8 @@ class TimeTrigger (Trigger):
if time == self._time: if time == self._time:
return return
self._time = time self._time = time
if self.active: in_future = time_now >= time
# remove old trigger self.value = in_future is self.is_start
TimeTrigger.all.remove(self)
self.active = False
self.update_active(time_now)
def update_active(self, time_now: int = None, time: int = None): def update_active(self, time_now: int = None, time: int = None):
if time_now is None: if time_now is None:
@@ -380,7 +393,7 @@ class PriceLineTrigger (Trigger):
if inverted: if inverted:
price_now = 1/price_now price_now = 1/price_now
activated = value_now < price_now if is_min else value_now > price_now activated = value_now < price_now if is_min else value_now > price_now
log.debug(f'initial price line {value_now} {"<" if is_min else ">"} {price_now} {activated}') # log.debug(f'initial price line {value_now} {"<" if is_min else ">"} {price_now} {activated}')
trigger_type = Trigger.TriggerType.MinLine if is_min else Trigger.TriggerType.MaxLine trigger_type = Trigger.TriggerType.MinLine if is_min else Trigger.TriggerType.MaxLine
super().__init__(trigger_type, tk, activated) super().__init__(trigger_type, tk, activated)
self.inverted = inverted self.inverted = inverted
@@ -418,7 +431,7 @@ class PriceLineTrigger (Trigger):
if self.inverted: if self.inverted:
price = 1/price price = 1/price
self.last_price = price self.last_price = price
log.debug(f'price trigger {price}') # log.debug(f'price trigger {price}')
if self not in PriceLineTrigger.triggers_set: if self not in PriceLineTrigger.triggers_set:
self.add_computation(price) self.add_computation(price)
else: else:
@@ -449,8 +462,8 @@ class PriceLineTrigger (Trigger):
line_value = m * time + b line_value = m * time + b
price_diff = sign * (y - line_value) price_diff = sign * (y - line_value)
activated = price_diff > 0 activated = price_diff > 0
for price, line, s, a, diff in zip(y, line_value, sign, activated, price_diff): # for price, line, s, a, diff in zip(y, line_value, sign, activated, price_diff):
log.debug(f'price: {line} {"<" if s == 1 else ">"} {price} {a} ({diff:+})') # log.debug(f'price: {line} {"<" if s == 1 else ">"} {price} {a} ({diff:+})')
for t, activated in zip(PriceLineTrigger.triggers, activated): for t, activated in zip(PriceLineTrigger.triggers, activated):
t.handle_result(activated) t.handle_result(activated)
PriceLineTrigger.clear_data() PriceLineTrigger.clear_data()
@@ -495,7 +508,8 @@ async def activate_order(order: Order):
triggers = await OrderTriggers.create(order) triggers = await OrderTriggers.create(order)
if triggers.closed: if triggers.closed:
log.debug(f'order {order.key} was immediately closed') log.debug(f'order {order.key} was immediately closed')
final_state = SwapOrderState.Filled if order.remaining == 0 or order.remaining < order.min_fill_amount \ final_state = SwapOrderState.Error if triggers.error \
else SwapOrderState.Filled if order.remaining == 0 or order.remaining < order.min_fill_amount \
else SwapOrderState.Expired else SwapOrderState.Expired
order.complete(final_state) order.complete(final_state)
@@ -556,13 +570,14 @@ class TrancheTrigger:
tranche_remaining = tranche.fraction_of(order.amount) - order.tranche_filled(self.tk.tranche_index) tranche_remaining = tranche.fraction_of(order.amount) - order.tranche_filled(self.tk.tranche_index)
self.status = \ self.status = \
TrancheState.Error if self.market_order and self.slippage < MIN_SLIPPAGE - MIN_SLIPPAGE_EPSILON else \
TrancheState.Filled if tranche_remaining == 0 or tranche_remaining < self.order.min_fill_amount else \ TrancheState.Filled if tranche_remaining == 0 or tranche_remaining < self.order.min_fill_amount else \
TrancheState.Expired if self.expiration_trigger is not None and not self.expiration_trigger else \ TrancheState.Expired if self.expiration_trigger is not None and not self.expiration_trigger else \
TrancheState.Early if self.activation_trigger is None and not self.activation_trigger else \ TrancheState.Early if self.activation_trigger is not None and not self.activation_trigger else \
TrancheState.Active TrancheState.Active
_dirty.add(tk) _dirty.add(tk)
TrancheTrigger.all[tk] = self TrancheTrigger.all[tk] = self
log.debug(f'Tranche {tk} initial status {self.status} {self}') # log.debug(f'Tranche {tk} initial status {self.status} {self}')
@property @property
@@ -593,7 +608,8 @@ class TrancheTrigger:
else: else:
order_log.debug(f'tranche part-filled {self.tk} in:{_amount_in} out:{_amount_out} remaining:{remaining}') order_log.debug(f'tranche part-filled {self.tk} in:{_amount_in} out:{_amount_out} remaining:{remaining}')
if self.market_order: if self.market_order:
self.expire() order_log.debug(f'tranche {self.tk} delayed {config.slippage_control_delay} seconds due to slippage control')
self.deactivate(config.slippage_control_delay)
self.slash_count = 0 # reset slash count self.slash_count = 0 # reset slash count
def touch(self): def touch(self):
@@ -605,11 +621,11 @@ class TrancheTrigger:
self.order_trigger.expire_tranche(self.tk.tranche_index) self.order_trigger.expire_tranche(self.tk.tranche_index)
def expire(self): def expire(self):
self.disable()
if self.closed: if self.closed:
return return
order_log.debug(f'tranche expired {self.tk}') order_log.debug(f'tranche expired {self.tk}')
self.status = TrancheState.Expired self.status = TrancheState.Expired
self.disable()
def kill(self): def kill(self):
order_log.warning(f'tranche KILLED {self.tk}') order_log.warning(f'tranche KILLED {self.tk}')
@@ -625,15 +641,26 @@ class TrancheTrigger:
self.kill() self.kill()
else: else:
delay = round(config.slash_delay_base * config.slash_delay_mul ** (self.slash_count-1)) delay = round(config.slash_delay_base * config.slash_delay_mul ** (self.slash_count-1))
self.deactivate(timestamp()+delay) self.deactivate(delay)
def deactivate(self, until): def deactivate(self, interval: Union[timedelta, int, float]):
# todo this timestamp should be consistent with the trigger time which is blockchain
now = current_clock.get().timestamp
self.deactivate_until(now + (interval.total_seconds() if isinstance(interval, timedelta) else interval))
def deactivate_until(self, until):
# Temporarily deactivate the tranche due to a rate limit. Use disable() to permanently halt the trigger. # Temporarily deactivate the tranche due to a rate limit. Use disable() to permanently halt the trigger.
log.debug(f'deactivating tranche {self.tk} until {from_timestamp(until)}') now = current_clock.get().timestamp
if until < now:
return
if self.activation_trigger is None: if self.activation_trigger is None:
self.activation_trigger = TimeTrigger.create(True, self.tk, until) self.activation_trigger = TimeTrigger.create(True, self.tk, until)
else: else:
self.activation_trigger.time = until self.activation_trigger.time = max(until, self.activation_trigger.time)
try:
del active_tranches[self.tk]
except KeyError:
pass
def disable(self): def disable(self):
# permanently stop this trigger and deconstruct # permanently stop this trigger and deconstruct
@@ -671,6 +698,10 @@ class TrancheTrigger:
def open(self): def open(self):
return not self.closed return not self.closed
@property
def error(self):
return self.status == TrancheState.Error
def __str__(self): def __str__(self):
trigs = [] trigs = []
if self.balance_trigger is not None: if self.balance_trigger is not None:

View File

@@ -148,7 +148,7 @@ class MarkPool:
mark_pools: dict[str, MarkPool] = {} mark_pools: dict[str, MarkPool] = {}
quotes = [] # ordered list of preferred quote tokens quotes = [] # ordered list of preferred quote token addresses
def add_mark_pool(addr: str, base: str, quote: str, fee: int): def add_mark_pool(addr: str, base: str, quote: str, fee: int):
@@ -200,7 +200,7 @@ async def mark_to_market_adj_dec(token: str, amount: dec, adjust_decimals=True)
return mark_to_market(token, amount) return mark_to_market(token, amount)
def mark_to_market(token: str, amount: dec) -> Optional[dec]: def mark_to_market(token: str, amount: dec = dec(1)) -> Optional[dec]:
""" """
amount must already be adjusted for decimals amount must already be adjusted for decimals
""" """

View File

@@ -5,7 +5,6 @@ from datetime import timedelta
from typing import Any, Iterable, Callable, Optional from typing import Any, Iterable, Callable, Optional
from eth_bloom import BloomFilter from eth_bloom import BloomFilter
# noinspection PyPackageRequirements
from websockets.exceptions import ConnectionClosedError from websockets.exceptions import ConnectionClosedError
from dexorder import Blockchain, db, current_pub, async_yield, current_w3, config, now, timestamp, metric from dexorder import Blockchain, db, current_pub, async_yield, current_w3, config, now, timestamp, metric
@@ -81,8 +80,7 @@ class BlockStateRunner(BlockProgressor):
async with w3ws as w3ws: async with w3ws as w3ws:
log.debug('connecting to ws provider') log.debug('connecting to ws provider')
await w3ws.provider.connect() await w3ws.provider.connect()
subscription = await w3ws.eth.subscribe('newHeads') # the return value of this call is not consistent between anvil/hardhat/rpc. await w3ws.eth.subscribe('newHeads') # the return value of this call is not consistent between anvil/hardhat/rpc.
log.debug(f'subscribed to newHeads {subscription}')
while self.running: while self.running:
async for message in w3ws.ws.process_subscriptions(): async for message in w3ws.ws.process_subscriptions():
block = Block(chain_id, message['result']) block = Block(chain_id, message['result'])
@@ -90,15 +88,19 @@ class BlockStateRunner(BlockProgressor):
self.new_head_event.set() self.new_head_event.set()
log.debug(f'new head {block}') log.debug(f'new head {block}')
if abs(block.timestamp-timestamp()) > 3: if abs(block.timestamp-timestamp()) > 3:
log.warning(f'Blockchain {chain_id} time is off by {block.timestamp-timestamp():.1f}s') log.info(f'Blockchain {chain_id} time is off by {block.timestamp-timestamp():.1f}s')
if not self.running: if not self.running:
break break
await async_yield() await async_yield()
except (ConnectionClosedError, TimeoutError, asyncio.TimeoutError) as e: except (TimeoutError, asyncio.TimeoutError) as e:
log.debug(f'runner timeout {e}') log.debug(f'runner timeout {e}')
except ConnectionClosedError as e:
log.info(f'websocket connection closed {e}')
except ConnectionRefusedError: except ConnectionRefusedError:
log.warning(f'Could not connect to websocket {config.ws_url}') log.warning(f'Could not connect to websocket {config.ws_url}')
await asyncio.sleep(1) await asyncio.sleep(1)
except StopAsyncIteration:
log.info(f'websocket stream ended')
except Exception: except Exception:
log.exception(f'Unhandled exception during run_ws()') log.exception(f'Unhandled exception during run_ws()')
finally: finally:
@@ -397,21 +399,21 @@ class BlockStateRunner(BlockProgressor):
# propragate to the DB or Redis. # propragate to the DB or Redis.
# TIME TICKS ARE DISABLED FOR THIS REASON # TIME TICKS ARE DISABLED FOR THIS REASON
return return
current_fork.set(fork) # current_fork.set(fork)
session = db.session # session = db.session
session.begin() # session.begin()
try: # try:
for callback, on_timer in self.callbacks: # for callback, on_timer in self.callbacks:
if on_timer: # if on_timer:
# noinspection PyCallingNonCallable # # noinspection PyCallingNonCallable
await maywait(callback()) # await maywait(callback())
except BaseException: # except BaseException:
session.rollback() # session.rollback()
raise # raise
else: # else:
session.commit() # session.commit()
finally: # finally:
db.close_session() # db.close_session()
async def do_state_init_cbs(self): async def do_state_init_cbs(self):

View File

@@ -91,8 +91,9 @@ async def load_token(address: str) -> Optional[OldTokenDict]:
try: try:
decimals = await dec_prom decimals = await dec_prom
except CONTRACT_ERRORS: except CONTRACT_ERRORS:
log.warning(f'token {address} has no decimals()') log.info(f'token {address} has no decimals()')
decimals = 0 decimals = 0
return None # we do not support coins that don't specify decimals.
approved = False # never approve new coins approved = False # never approve new coins
chain_id = current_chain.get().id chain_id = current_chain.get().id
symbol = await symbol_prom symbol = await symbol_prom

View File

@@ -4,14 +4,13 @@ from abc import abstractmethod
from typing import Optional from typing import Optional
from uuid import uuid4 from uuid import uuid4
from sqlalchemy import select
from web3.exceptions import TransactionNotFound, ContractPanicError, ContractLogicError from web3.exceptions import TransactionNotFound, ContractPanicError, ContractLogicError
from dexorder import db, current_w3, Account from dexorder import db, current_w3, Account
from dexorder.alert import warningAlert
from dexorder.base import TransactionReceiptDict, TransactionRequest from dexorder.base import TransactionReceiptDict, TransactionRequest
from dexorder.base.chain import current_chain from dexorder.base.chain import current_chain
from dexorder.blockstate.diff import DiffEntryItem from dexorder.blockstate.fork import current_fork
from dexorder.blockstate.fork import current_fork, Fork
from dexorder.contract.contract_proxy import ContractTransaction from dexorder.contract.contract_proxy import ContractTransaction
from dexorder.database.model.transaction import TransactionJob, TransactionJobState from dexorder.database.model.transaction import TransactionJob, TransactionJobState
from dexorder.util import hexstr from dexorder.util import hexstr
@@ -39,6 +38,18 @@ class TransactionHandler:
@abstractmethod @abstractmethod
async def transaction_exception(self, job: TransactionJob, e: Exception) -> None: ... async def transaction_exception(self, job: TransactionJob, e: Exception) -> None: ...
# noinspection PyMethodMayBeStatic
async def acquire_account(self) -> Optional[Account]:
try:
async with asyncio.timeout(1):
return await Account.acquire()
except asyncio.TimeoutError:
return None
# noinspection PyMethodMayBeStatic
async def release_account(self, account: Account):
account.release()
in_flight = set() in_flight = set()
accounts_in_flight: dict[bytes, Account] = {} # tx_id_bytes: account accounts_in_flight: dict[bytes, Account] = {} # tx_id_bytes: account
@@ -81,9 +92,8 @@ async def create_and_send_transactions():
# these errors can be thrown immediately when the tx is tested for gas # these errors can be thrown immediately when the tx is tested for gas
log.warning(f'failed to build transaction request for {job.request.__class__.__name__} {job.id}') log.warning(f'failed to build transaction request for {job.request.__class__.__name__} {job.id}')
job.state = TransactionJobState.Error job.state = TransactionJobState.Error
db.session.add(job)
await handler.transaction_exception(job, x) await handler.transaction_exception(job, x)
in_flight.discard((job.request.type, job.request.key)) end_job(job)
return return
except Exception as x: except Exception as x:
log.warning(f'unable to send transaction for job {job.id}', exc_info=x) log.warning(f'unable to send transaction for job {job.id}', exc_info=x)
@@ -91,36 +101,42 @@ async def create_and_send_transactions():
if ctx is None: if ctx is None:
log.info(f'Transaction request {job.request.__class__.__name__} {job.id} declined to build a tx.') log.info(f'Transaction request {job.request.__class__.__name__} {job.id} declined to build a tx.')
job.state = TransactionJobState.Declined job.state = TransactionJobState.Declined
db.session.add(job) end_job(job)
in_flight.discard((job.request.type, job.request.key))
return return
w3 = current_w3.get() w3 = current_w3.get()
try: account = await handler.acquire_account()
async with asyncio.timeout(1):
account = await Account.acquire()
except asyncio.TimeoutError:
account = None
if account is None: if account is None:
log.warning(f'No account available for job {job.id} type "{handler.tag}"') warningAlert(f'No account available for job {job.id} type "{handler.tag}"', 'no account available')
continue continue
await ctx.sign(account) try:
await ctx.sign(account)
except Exception:
await handler.release_account(account)
raise
log.info(f'servicing job {job.request.__class__.__name__} {job.id} with account {account.address} nonce {ctx.tx["nonce"]} tx {ctx.id}') log.info(f'servicing job {job.request.__class__.__name__} {job.id} with account {account.address} nonce {ctx.tx["nonce"]} tx {ctx.id}')
# noinspection PyBroadException # noinspection PyBroadException
try: try:
sent = await w3.eth.send_raw_transaction(ctx.data) sent = await w3.eth.send_raw_transaction(ctx.data)
except ValueError as e: except ValueError as e:
if e.args[0]['code'] == -32003: try:
msg = e.args[0].get('message','')
except IndexError:
msg = ''
if msg.startswith('nonce too low'):
# Nonce too low # Nonce too low
log.warning(f'Account {account.address} nonce too low') log.warning(f'Account {account.address} nonce too low')
log.info(f'Account nonce is {account._nonce} ({ctx.tx["nonce"]}) but should be {await current_w3.get().eth.get_transaction_count(account.address, 'pending')}')
account.reset_nonce() account.reset_nonce()
elif msg.startswith('insufficient funds'):
warningAlert('Account Empty', f'Account {account.address} is out of funds!')
else: else:
log.exception(f'Failure sending transaction for job {job.id}') log.exception(f'Failure sending transaction for job {job.id}')
account.release() await handler.release_account(account)
except: except:
log.exception(f'Failure sending transaction for job {job.id}') log.exception(f'Failure sending transaction for job {job.id}')
# todo pager # todo pager
# todo send state unknown! # todo send state unknown!
account.release() await handler.release_account(account)
else: else:
account.tx_id = hexstr(ctx.id_bytes) account.tx_id = hexstr(ctx.id_bytes)
accounts_in_flight[ctx.id_bytes] = account accounts_in_flight[ctx.id_bytes] = account
@@ -128,8 +144,8 @@ async def create_and_send_transactions():
job.tx_id = ctx.id_bytes job.tx_id = ctx.id_bytes
job.tx_data = ctx.data job.tx_data = ctx.data
assert sent == job.tx_id assert sent == job.tx_id
db.session.add(job)
ended_jobs = []
async def handle_transaction_receipts(): async def handle_transaction_receipts():
# log.debug('handle_transaction_receipts') # log.debug('handle_transaction_receipts')
@@ -147,6 +163,8 @@ async def handle_transaction_receipts():
assert fork is not None assert fork is not None
if fork.branch.contiguous and receipt['blockHash'] in fork.branch.path or \ if fork.branch.contiguous and receipt['blockHash'] in fork.branch.path or \
fork.branch.disjoint and receipt['blockNumber'] <= fork.height: fork.branch.disjoint and receipt['blockNumber'] <= fork.height:
job.state = TransactionJobState.Mined
job.receipt = receipt
try: try:
handler = TransactionHandler.of(job.request.type) handler = TransactionHandler.of(job.request.type)
except KeyError: except KeyError:
@@ -154,24 +172,26 @@ async def handle_transaction_receipts():
log.warning(f'ignoring transaction request with bad type "{job.request.type}"') log.warning(f'ignoring transaction request with bad type "{job.request.type}"')
else: else:
await handler.complete_transaction(job, receipt) await handler.complete_transaction(job, receipt)
end_job(job)
try:
await handler.release_account(accounts_in_flight.pop(job.tx_id))
except KeyError:
pass
def end_job(job):
ended_jobs.append(job)
async def cleanup_jobs():
for job in ended_jobs:
log.debug(f'ending job {job.id}')
if job.tx_id in accounts_in_flight:
try:
handler = TransactionHandler.of(job.request.type)
await handler.release_account(accounts_in_flight.pop(job.tx_id))
except KeyError:
log.warning(f'ignoring transaction request with bad type "{job.request.type}"')
in_flight.discard((job.request.type, job.request.key)) in_flight.discard((job.request.type, job.request.key))
try: db.session.delete(job)
accounts_in_flight.pop(job.tx_id).release() ended_jobs.clear()
except KeyError:
pass
def finalize_transactions(_fork: Fork, diffs: list[DiffEntryItem]):
# noinspection PyTypeChecker
open_jobs = db.session.scalars(select(TransactionJob).where(
TransactionJob.chain == current_chain.get(),
TransactionJob.state == TransactionJobState.Sent
)).all()
open_txs = {job.tx_id:job for job in open_jobs}
for diff in diffs:
if diff.series == 'mined_txs' and diff.key in open_txs:
job = open_txs[diff.key]
job.state = TransactionJobState.Mined
job.receipt = diff.value
db.session.add(job)

View File

@@ -4,8 +4,8 @@ from typing import Union, Awaitable, TypeVar
async def async_yield(): async def async_yield():
# a value of exactly 0 doesn't seem to work as well, so we set 1 nanosecond # a value of exactly 0 doesn't seem to work as well, so we use 1 microsecond
await asyncio.sleep(1e-9) await asyncio.sleep(1e-6)
Args = TypeVar('Args') Args = TypeVar('Args')

View File

@@ -25,3 +25,22 @@ def encode_IEEE754(value: float) -> int:
def decode_IEEE754(value: int) -> float: def decode_IEEE754(value: int) -> float:
return struct.unpack('>f', struct.pack('>I', value))[0] return struct.unpack('>f', struct.pack('>I', value))[0]
def to_base_exp(value, precision=8, roundingFunc=round) -> tuple[int, int]:
"""
Convert a value to base-2 exponent form.
Precision is the number of bits available to the base component
"""
if value <= 0:
raise ValueError("Value must be greater than zero")
max_base = 2 ** precision
exp = int(math.log2(value)) - precision
base = roundingFunc(value / (2 ** exp))
if base >= max_base:
base //= 2
exp += 1
return base, exp
def from_base_exp(base, exp):
return base << exp

View File

@@ -1,12 +1,13 @@
import asyncio
import functools import functools
import logging import logging
from dexorder import current_pub from dexorder import current_pub, dec
from dexorder.base.chain import current_chain from dexorder.base.chain import current_chain
from dexorder.blockstate import BlockDict from dexorder.blockstate import BlockDict
from dexorder.contract import ERC20, CONTRACT_ERRORS from dexorder.contract import ERC20, CONTRACT_ERRORS
from dexorder.contract.dexorder import VaultContract, vault_address from dexorder.contract.dexorder import VaultContract, vault_address
from dexorder.util import json from dexorder.util import json, align_decimal
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -90,3 +91,17 @@ def publish_vaults(chain_id, owner):
break break
log.debug(f'publish_vaults {chain_id} {owner} {vaults}') log.debug(f'publish_vaults {chain_id} {owner} {vaults}')
current_pub.get()(f'{chain_id}|{owner}', 'vaults', chain_id, owner, vaults) current_pub.get()(f'{chain_id}|{owner}', 'vaults', chain_id, owner, vaults)
async def refresh_vault_balances(vault, *tokens):
amounts = await asyncio.gather(*(ERC20(token).balanceOf(vault) for token in tokens))
def _adjust(vaddr, toks, amts, old_balances):
result = dict(old_balances) # copy
for t, a in zip(toks, amts):
result[t] = a
return result
vault_balances.modify(vault, functools.partial(_adjust, vault, tokens, amounts))
def pretty_balances(b: dict[str,dec], padding=8) -> str:
return '\n'.join(f'{k:>} {align_decimal(v,padding)}' for k,v in b.items())

View File

@@ -16,7 +16,7 @@ from dexorder.database.model import TransactionJob
from dexorder.database.model import VaultCreationRequest as DbVaultCreationRequest from dexorder.database.model import VaultCreationRequest as DbVaultCreationRequest
from dexorder.database.model.accounting import AccountingSubcategory from dexorder.database.model.accounting import AccountingSubcategory
from dexorder.transactions import TransactionHandler, submit_transaction_request from dexorder.transactions import TransactionHandler, submit_transaction_request
from dexorder.vault_blockdata import publish_vaults from dexorder.vault_blockdata import publish_vaults, vault_owners
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -54,17 +54,21 @@ class VaultCreationHandler (TransactionHandler):
super().__init__(VaultCreationRequest.TYPE) super().__init__(VaultCreationRequest.TYPE)
async def build_transaction(self, job_id: int, tr: VaultCreationRequest) -> Optional[ContractTransaction]: async def build_transaction(self, job_id: int, tr: VaultCreationRequest) -> Optional[ContractTransaction]:
owner_addr = to_checksum_address(tr.owner)
vault_addr = vault_address(owner_addr, tr.num)
if vault_owners.get(vault_addr):
# existing vault detected
publish_vaults(tr.chain_id, owner_addr)
return None
factory = get_factory_contract() factory = get_factory_contract()
owner_address = to_checksum_address(tr.owner)
try: try:
return await factory.build.deployVault(owner_address, tr.num) return await factory.build.deployVault(owner_addr, tr.num)
except ContractLogicError: except ContractLogicError:
in_flight.discard((tr.chain_id, tr.owner, tr.num)) in_flight.discard((tr.chain_id, tr.owner, tr.num))
# maybe the vault already exists? # maybe the vault already exists?
addr = vault_address(tr.owner, tr.num) owner = await ContractProxy(vault_addr, 'Vault').owner()
owner = await ContractProxy(addr, 'Vault').owner() if owner == owner_addr:
if owner == owner_address: log.debug(f'detected existing vault at {vault_addr}')
log.debug(f'detected existing vault at {addr}')
publish_vaults(tr.chain_id, owner) publish_vaults(tr.chain_id, owner)
return None return None
raise raise
@@ -91,7 +95,8 @@ def handle_vault_creation_requests():
for req in db.session.query(DbVaultCreationRequest).where( for req in db.session.query(DbVaultCreationRequest).where(
DbVaultCreationRequest.vault == None, DbVaultCreationRequest.chain==current_chain.get()): DbVaultCreationRequest.vault == None, DbVaultCreationRequest.chain==current_chain.get()):
req: DbVaultCreationRequest req: DbVaultCreationRequest
key = req.chain.id, req.owner, req.num owner = to_checksum_address(req.owner)
key = req.chain.id, owner, req.num
if key not in in_flight: if key not in in_flight:
vcr = VaultCreationRequest(*key) vcr = VaultCreationRequest(*key)
submit_transaction_request(vcr) submit_transaction_request(vcr)