Compare commits
159 Commits
v0.1.0
...
e614050c88
| Author | SHA1 | Date | |
|---|---|---|---|
| e614050c88 | |||
| 1cf568dda1 | |||
| c08a694dd2 | |||
| 6992e57299 | |||
| caf33232df | |||
| a9288a3712 | |||
| 5b39d5ccc7 | |||
| 66aada0478 | |||
| a2ffe92c4d | |||
| bdf952fbd9 | |||
| b5da7fc62b | |||
| 05a8fd2094 | |||
| 0c171c4cc4 | |||
| 85fcce9b79 | |||
| 3cd489e19f | |||
| 39084e81f0 | |||
| b80ffad6f0 | |||
| 33ccfbd5da | |||
| 3fd5ad3d50 | |||
| 1e67e6f1b3 | |||
| ba6b681a78 | |||
| cd2c1153e8 | |||
| 274f2c933b | |||
| cb485c4da2 | |||
| 18a79def2b | |||
| 739f92df76 | |||
| de4366bf9c | |||
| 77a08e0627 | |||
| 9b3d6095b4 | |||
| 2edfcba297 | |||
| 09bfbec60c | |||
| 696cad385a | |||
| 94512d0c23 | |||
| 0b84244046 | |||
| 9a72c3fe8e | |||
| c27a89b956 | |||
| ac865b0bb1 | |||
| 9bcdc31c42 | |||
| 066e4a679e | |||
| 04140da82c | |||
| 9f041f7d5b | |||
| 84f5b673e3 | |||
| 8187e362a2 | |||
| 443c7bd24c | |||
| 3e9ef8e654 | |||
| 0409b08e67 | |||
| 05b52ce79b | |||
| 9840d796ec | |||
| ce10a4e2ca | |||
| c3ceca3a87 | |||
| aabc6d9441 | |||
| e3bb262364 | |||
| e5e9470e69 | |||
| f63470801e | |||
| cf294794b9 | |||
| 088918fba3 | |||
| 82bb037452 | |||
| 43347e536e | |||
| e7a062a95c | |||
| 5f7b8bc0fb | |||
| f67ffe7f91 | |||
| 3818d7f7a0 | |||
| d28e26d5d4 | |||
| 31884d1973 | |||
| 474f8d5ce7 | |||
| 08155e895b | |||
| 0e02a7f815 | |||
| d2181e03fa | |||
| 3f742a0893 | |||
| 12e96b0f87 | |||
| 42389e899b | |||
| c83fdf32d7 | |||
| 4d4bb0159a | |||
| 5ea471d972 | |||
| 82dba36b01 | |||
| 2009dd1395 | |||
| c0adf99d06 | |||
| bad7bf4a6a | |||
| 6e576b9849 | |||
| ea840dbeae | |||
| 21c0ecad9c | |||
| 8d753485f7 | |||
| 79f008acd5 | |||
| 8f23331859 | |||
| 436bc9aa60 | |||
| b255df1354 | |||
| 3f95ab5b23 | |||
| 01380bbaee | |||
| aa96fb5ead | |||
| 5efd22e006 | |||
| 26bde7cd24 | |||
| 3ce0da0c4a | |||
| 4d63015a94 | |||
| 061349b5fd | |||
| b3a851447c | |||
| 1ff6bff79d | |||
| 545455e6b9 | |||
| 5c78c7f4e1 | |||
| 3a54862c9c | |||
| 474a2c4672 | |||
| 7c4e20da79 | |||
| 79c20ff88d | |||
| d1d94b35a8 | |||
| d51e98c4a4 | |||
| 752c655298 | |||
| 3a5441cf20 | |||
| 799e95871f | |||
| 0ff213d079 | |||
| a9df693db8 | |||
| 4e8b33fc71 | |||
| e61b78d66b | |||
| 1148a6230b | |||
| c309e73639 | |||
| ce719e67f0 | |||
| 26a081ce14 | |||
| 4463bd64b0 | |||
| b7cc0feccd | |||
| 165d91599a | |||
| f612a63c23 | |||
| 0cd3029688 | |||
| 31a1de8ca6 | |||
| 48d6b06041 | |||
| 39fba496ba | |||
| d53f2603fe | |||
| ce029b8b01 | |||
| 90a65aa998 | |||
| f6a058a019 | |||
| c8e765a2be | |||
| ed2cc8bc02 | |||
| d023813771 | |||
| 615f181110 | |||
| 654354dea0 | |||
| 2a11b720b8 | |||
| 3b90a60e2b | |||
| 7c8ecf173e | |||
| 521ded0a97 | |||
| 00f0cd39fe | |||
| 3bf4806b6e | |||
| 9f83fd11ce | |||
| de63cca93c | |||
| 3eab491eac | |||
| a0a226d1bb | |||
| 8939cc6ea5 | |||
| 46affedc12 | |||
| 3b73379b9f | |||
| e2793982f6 | |||
| c6bae6d5ad | |||
| d5cc4d69aa | |||
| d0b17fbd82 | |||
| f163abee90 | |||
| 98a8ee217a | |||
| d77fbd11d6 | |||
| 64a361ea9b | |||
| c68e6048ca | |||
| 059279bc87 | |||
| f97ac62f31 | |||
| ef85930070 | |||
| 86efb44083 | |||
| 95ef05ac5c |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,8 +1,10 @@
|
||||
/tmp
|
||||
*secret*
|
||||
**secret**
|
||||
.env
|
||||
node_modules
|
||||
/hardhat.config.js
|
||||
/.idea
|
||||
/.vscode
|
||||
/ohlc
|
||||
/tradingview
|
||||
talosconfig
|
||||
|
||||
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "contract"]
|
||||
path = contract
|
||||
url = git@git.tolson.net:dexorder/contract.git
|
||||
url = git@github.com:dexorder-trade/contract.git
|
||||
branch = master
|
||||
[submodule "backend"]
|
||||
path = backend
|
||||
|
||||
78
Makefile
78
Makefile
@@ -12,7 +12,7 @@ export PATH := $(NVM_ROOT)/versions/node/$(NODE_VERSION)/bin:$(PATH)
|
||||
|
||||
default startall : FORCE
|
||||
# startall runs mock.sh that does a force rebuild
|
||||
bin/startall.sh
|
||||
bin/startall
|
||||
|
||||
# Target to start processes
|
||||
|
||||
@@ -26,20 +26,25 @@ versions :
|
||||
@echo "\nPATH=$(PATH)"
|
||||
|
||||
version.json :
|
||||
./bin/build_version.json.sh
|
||||
./bin/build-version-json
|
||||
|
||||
web : FORCE version.json
|
||||
cd web && node --version && npm --version
|
||||
cd web && npm run build && npm run preview # static build for production experience
|
||||
# cd web && npm run dev # hot reload for dev experience
|
||||
|
||||
webhost : FORCE version.json
|
||||
cd web && node --version && npm --version
|
||||
cd web && npm run build && npm run host # static build for production experience
|
||||
# cd web && npm run dev # hot reload for dev experience
|
||||
|
||||
mock : init_postgres
|
||||
@echo "\n*** wait until script prints \"ONCHAIN EXECUTION COMPLETE & SUCCESSFUL\" ***\n"
|
||||
TAG=mock ./bin/mock.sh # Anvil
|
||||
# DEXORDER_USE_HARDHAT=1 TAG=mock ./bin/mock.sh # HardHat
|
||||
TAG=mock ./bin/mock # Anvil
|
||||
# DEXORDER_USE_HARDHAT=1 TAG=mock ./bin/mock # HardHat
|
||||
|
||||
mirrorprice : FORCE
|
||||
MIRROR_POLLING=15 bin/mirrorprice
|
||||
MIRROR_POLLING=5 bin/mirrorprice
|
||||
|
||||
backend : FORCE
|
||||
cd backend \
|
||||
@@ -54,8 +59,10 @@ server : FORCE
|
||||
|
||||
# Install targets
|
||||
|
||||
install_all : install_postgres install_docker install_node install_python install_foundry install_submodules
|
||||
|
||||
install_postgres :
|
||||
sudo apt install postgresql
|
||||
sudo apt-get install -y postgresql
|
||||
|
||||
install_docker_snap : # full docker desktop
|
||||
snap install docker
|
||||
@@ -63,57 +70,56 @@ install_docker_snap : # full docker desktop
|
||||
sudo usermod -aG docker $(USER)
|
||||
|
||||
install_docker :
|
||||
# https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-22-04
|
||||
sudo apt update
|
||||
sudo apt install apt-transport-https ca-certificates curl software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
echo "deb [arch=$$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt update
|
||||
apt-cache policy docker-ce
|
||||
sudo apt install docker-ce
|
||||
-sudo systemctl status docker --lines 0
|
||||
-sudo groupadd docker
|
||||
sudo usermod -aG docker $(USER)
|
||||
sudo apt-get install -y docker.io
|
||||
# # Add Docker's official GPG key:
|
||||
# sudo apt-get install ca-certificates curl
|
||||
# sudo install -m 0755 -d /etc/apt/keyrings
|
||||
# sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
|
||||
# sudo chmod a+r /etc/apt/keyrings/docker.asc
|
||||
#
|
||||
# # Add the repository to Apt sources:
|
||||
# echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
|
||||
# | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
|
||||
|
||||
install_node :
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash
|
||||
# . ~/.nvm/nvm.sh && nvm install v20.8.1 && npm install --global yarn
|
||||
. ~/.nvm/nvm.sh && nvm install $(NODE_VERSION) && npm install --global yarn
|
||||
. ~/.nvm/nvm.sh && nvm install $(NODE_VERSION) && nvm alias default $(NODE_VERSION) && npm install --global yarn
|
||||
|
||||
install_python :
|
||||
sudo apt install python3-virtualenv
|
||||
sudo add-apt-repository ppa:deadsnakes/ppa -y
|
||||
sudo add-apt-repository ppa:deadsnakes/nightly -y
|
||||
sudo apt update
|
||||
sudo apt install $(PYTHON_VERSION) -y
|
||||
sudo apt install $(PYTHON_VERSION)-dev -y
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y python3-virtualenv $(PYTHON_VERSION) $(PYTHON_VERSION)-dev
|
||||
$(PYTHON_VERSION) --version
|
||||
|
||||
install_foundry :
|
||||
curl -L https://foundry.paradigm.xyz | bash
|
||||
~/.foundry/bin/foundryup
|
||||
sudo apt install jq
|
||||
sudo apt-get install -y jq
|
||||
|
||||
install_submodules :
|
||||
git submodule init && git submodule update
|
||||
cd contract && git submodule init && git submodule update
|
||||
|
||||
# Init targets
|
||||
|
||||
init_all : init_postgres init_yarns init_venv init_configs init_foundry
|
||||
init_all : init_postgres init_yarn init_venv init_configs init_foundry
|
||||
|
||||
init_postgres :
|
||||
cd /tmp; \
|
||||
echo "\
|
||||
drop database dexorder ; \
|
||||
drop user dexorder; \
|
||||
drop database if exists dexorder; \
|
||||
drop user if exists dexorder; \
|
||||
create database dexorder; \
|
||||
create user dexorder with password 'redroxed'; \
|
||||
grant all on database dexorder to dexorder; \
|
||||
" | \
|
||||
sudo -u postgres psql
|
||||
|
||||
init_yarns :
|
||||
init_yarn :
|
||||
# cd web \
|
||||
# && . ~/.nvm/nvm.sh \
|
||||
# && nvm use v20.8.1
|
||||
@@ -130,11 +136,11 @@ init_venv :
|
||||
&& pip install -r requirements-lock.txt
|
||||
|
||||
init_configs :
|
||||
cp backend/dexorder-mock.toml backend/dexorder.toml
|
||||
cp backend/.secret-mock.toml backend/.secret.toml
|
||||
cp backend/conf/mock/dexorder-mock.toml backend/dexorder.toml
|
||||
cp backend/conf/mock/.secret-mock.toml backend/.secret.toml
|
||||
cp backend/conf/logging-default.toml backend/logging.toml
|
||||
cp server/.env-mock server/.env
|
||||
cp web/.env-mock web/.env
|
||||
cp contract/src/VaultAddress-default.sol contract/src/VaultAddress.sol
|
||||
cp contract/foundry-default.toml contract/foundry.toml
|
||||
@echo "\nMake sure to set vault address in contract/src/VaultAddress.sol file\n"
|
||||
@echo "Make sure to set the arbitrum_mock and arbitrum_test aliases at the bottom of foundry.toml file\n"
|
||||
@@ -148,22 +154,22 @@ diff_configs :
|
||||
-diff contract/foundry-default.toml contract/foundry.toml
|
||||
|
||||
init_foundry :
|
||||
cd contract/lib/forge-std && git submodule init && git submodule update
|
||||
#cd contract && git submodule init && git submodule update
|
||||
|
||||
# deploy
|
||||
|
||||
deploy-contract :
|
||||
cd contract; make build
|
||||
cd contract; RPCURL=https://rpc.alpha.dexorder.trade ./bin/deploy.sh alpha mock
|
||||
cd contract; RPCURL=https://rpc.alpha.dexorder.trade ./bin/deploy alpha mock
|
||||
|
||||
deploy-backend :
|
||||
bin/deploy.sh backend
|
||||
bin/deploy backend
|
||||
|
||||
deploy-server :
|
||||
bin/deploy.sh server
|
||||
bin/deploy server
|
||||
|
||||
deploy-web :
|
||||
bin/deploy.sh web
|
||||
bin/deploy web
|
||||
|
||||
# GIT
|
||||
|
||||
|
||||
@@ -3,44 +3,44 @@ Please use this procedure to manually test:
|
||||
|
||||
Kill previous execution:
|
||||
|
||||
0. Ctl-C previous `make` to kill all servers.
|
||||
1. Use Chrome browser.
|
||||
2. Reset MetaMask.
|
||||
3. Quit Chrome.
|
||||
1. Ctl-C previous `make` to kill all servers.
|
||||
2. Use Chrome browser.
|
||||
3. Reset MetaMask.
|
||||
4. Quit Chrome.
|
||||
|
||||
Start server an page:
|
||||
|
||||
4. use `make` to start all servers.
|
||||
5. Wait until the script prints "done"
|
||||
6. Start Chrome.
|
||||
7. Log into MetaMask. Check that tokens are populated.
|
||||
8. Open "localhost:3000".
|
||||
9. You should be a the vault page.
|
||||
5. use `make` to start all servers.
|
||||
6. Wait until the script prints "done"
|
||||
7. Start Chrome.
|
||||
8. Log into MetaMask. Check that tokens are populated.
|
||||
9. Open "localhost:3000".
|
||||
10. You should be a the vault page.
|
||||
|
||||
Vault creation and funding:
|
||||
|
||||
10. Wait until vault has been created.
|
||||
10. Click the `Gib` button and wait until the vault has been funded.
|
||||
11. Wait until vault has been created.
|
||||
12. Click the `Gib` button and wait until the vault has been funded.
|
||||
|
||||
Vault withdraw:
|
||||
|
||||
11. Click the three dots to the right of the MEH balance and withdraw 1.
|
||||
12. MetaMask will pop up to ask you to confirm the withdraw transaction. Confirm.
|
||||
13. Go to MetaMask and observe that 1 MEH has been transferred. It might take a few seconds.
|
||||
13. Click the three dots to the right of the MEH balance and withdraw 1.
|
||||
14. MetaMask will pop up to ask you to confirm the withdraw transaction. Confirm.
|
||||
15. Go to MetaMask and observe that 1 MEH has been transferred. It might take a few seconds.
|
||||
|
||||
Vault receive:
|
||||
|
||||
14. Now from MetaMask, send 1 WETH to your vault.
|
||||
15. Confirm on the vault page that the WETH has been received.
|
||||
16. Now from MetaMask, send 1 WETH to your vault.
|
||||
17. Confirm on the vault page that the WETH has been received.
|
||||
|
||||
TWAP submission:
|
||||
|
||||
16. Use the menu to the DCA/TWAP page (localhost:3000/twap)
|
||||
17. Change the amount in default transaction to 1 and place the order.
|
||||
18. MetaMask will pop up and ask you to confirm the transaction. Confirm.
|
||||
18. Use the menu to the DCA/TWAP page (localhost:3000/twap)
|
||||
19. Change the amount in default transaction to 1 and place the order.
|
||||
20. MetaMask will pop up and ask you to confirm the transaction. Confirm.
|
||||
|
||||
TWAP operation:
|
||||
|
||||
18. The app will switch to the order page.
|
||||
19. Observe your order in the list.
|
||||
20. Observe three tranches fill within 1 minute.
|
||||
21. The app will switch to the order page.
|
||||
22. Observe your order in the list.
|
||||
23. Observe three tranches fill within 1 minute.
|
||||
|
||||
2
backend
2
backend
Submodule backend updated: ce50c9adaf...48fdfeeb3f
3
bin/PROD_DB
Executable file
3
bin/PROD_DB
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
kubectl exec -it postgres-0 -- psql -U postgres dexorder
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
#usage: "$0 "'{backend|server|web} [''dev''] [config] [deployment] [kubernetes]'
|
||||
bin/deploy.sh backend dev finaldata alpha backfill
|
||||
bin/deploy backend dev finaldata alpha backfill
|
||||
2
bin/alpha/finaldata-deploy
Executable file
2
bin/alpha/finaldata-deploy
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
bin/deploy backend $1 finaldata alpha finaldata
|
||||
2
bin/alpha/mirrorprice-deploy
Executable file
2
bin/alpha/mirrorprice-deploy
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
bin/deploy backend alpha alpha mirrorprice
|
||||
3
bin/arb1/backfill-deploy
Executable file
3
bin/arb1/backfill-deploy
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086
|
||||
bin/deploy backend $1 arb1 arb1 backfill
|
||||
3
bin/arb1/backfill-progress
Executable file
3
bin/arb1/backfill-progress
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086
|
||||
kubectl logs backfill-0 --since=5m | grep flushed | tail -1
|
||||
3
bin/arb1/finaldata-deploy
Executable file
3
bin/arb1/finaldata-deploy
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086
|
||||
bin/deploy backend $1 arb1 arb1 finaldata
|
||||
3
bin/arb1/finaldata-progress
Executable file
3
bin/arb1/finaldata-progress
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086
|
||||
kubectl logs finaldata-0 -c finaldata --since=5m | grep flushed | tail -1
|
||||
37
bin/arb1/init-deploy
Executable file
37
bin/arb1/init-deploy
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TAG=arb1
|
||||
RPC=${RPC_URL:-arbitrum_alchemy}
|
||||
CHAINID=42161
|
||||
|
||||
cd contract || exit 1
|
||||
|
||||
# force clean build
|
||||
rm -rf out/ broadcast/ cache/
|
||||
bin/build --skip Test || exit 1
|
||||
|
||||
rm -rf deployment/"$TAG"/out
|
||||
mkdir -p deployment/"$TAG"
|
||||
cp -r out deployment/"$TAG"/
|
||||
|
||||
cpbroadcast() {
|
||||
CONTRACT=$1
|
||||
mkdir -p deployment/"$TAG"/broadcast/"$CONTRACT"/"$CHAINID"
|
||||
cp broadcast/"$CONTRACT"/"$CHAINID"/run-latest.json deployment/"$TAG"/broadcast/"$CONTRACT"/"$CHAINID"/run-latest.json
|
||||
}
|
||||
|
||||
if [ "$1" == "broadcast" ]; then
|
||||
BROADCAST=--broadcast
|
||||
else
|
||||
BROADCAST=
|
||||
fi
|
||||
|
||||
# trezor account 0 is the admin account
|
||||
forge script script/DeployArbitrum.sol -vvvv --fork-url "$RPC" --trezor --mnemonic-indexes 0 "$BROADCAST"
|
||||
|
||||
if [ "$1" == 'broadcast' ]; then
|
||||
cpbroadcast DeployArbitrum.sol
|
||||
echo Saved deployment files.
|
||||
else
|
||||
echo Deployment NOT broadcast \(use \'broadcast\' argument\)
|
||||
fi
|
||||
14
bin/arb1/upgrade
Executable file
14
bin/arb1/upgrade
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [ "$1" == "broadcast" ]; then
|
||||
BROADCAST=--broadcast
|
||||
else
|
||||
BROADCAST=
|
||||
fi
|
||||
|
||||
cd contract || (echo invoke from top-level directory && exit 1)
|
||||
forge script script/UpgradeArbitrum.sol -vvvv --fork-url arbitrum_alchemy --trezor --sender 0x12DB90820DAFed100E40E21128E40Dcd4fF6B331 "$BROADCAST"
|
||||
|
||||
if [ "$BROADCAST" == "" ]; then
|
||||
echo 'NOT sent to chain. Use {upgrade broadcast} to deploy.'
|
||||
fi
|
||||
27
bin/arbsep/build-version-json
Executable file
27
bin/arbsep/build-version-json
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
CHAINID=421614
|
||||
export CHAINID
|
||||
TAG=arbsep
|
||||
VERSION=$TAG
|
||||
export VERSION
|
||||
REVISION="$( cd contract && git log --oneline | head -1 | cut -d ' ' -f 1 )"
|
||||
export REVISION
|
||||
DEPLOYMENT=contract/deployment/$TAG
|
||||
BROADCAST=$DEPLOYMENT/broadcast
|
||||
DEPLOY_SCRIPT=DeployArbitrumSepolia.sol
|
||||
MIRRORENV=$(jq -r '.transactions[] | select(.contractName=="MirrorEnv") | select(.function==null).contractAddress' "$BROADCAST/DeployMirror.sol/$CHAINID/run-latest.json" | cast to-check-sum-address) || echo WARNING no MirrorEnv detected
|
||||
export MIRRORENV
|
||||
FACTORY=$(jq -r '.transactions[] | select(.contractName=="VaultFactory") | select(.function==null).contractAddress' "$BROADCAST/$DEPLOY_SCRIPT/$CHAINID/run-latest.json" | cast to-check-sum-address) || exit 1
|
||||
export FACTORY
|
||||
HELPER=$(jq -r '.transactions[] | select(.contractName=="QueryHelper") | select(.function==null).contractAddress' "$BROADCAST/$DEPLOY_SCRIPT/$CHAINID/run-latest.json" | cast to-check-sum-address) || exit 1
|
||||
export HELPER
|
||||
DEXORDER=$(jq -r '.transactions[] | select(.contractName=="Dexorder") | select(.function==null).contractAddress' "$BROADCAST/$DEPLOY_SCRIPT/$CHAINID/run-latest.json" | cast to-check-sum-address) || exit 1
|
||||
export DEXORDER
|
||||
VAULT_INIT_CODE_HASH=$(contract/bin/vault-init-code-hash) || exit 1
|
||||
export VAULT_INIT_CODE_HASH
|
||||
|
||||
envsubst < conf/version-"$TAG".json | tee "$DEPLOYMENT"/version.json || err version file
|
||||
|
||||
echo
|
||||
echo wrote "$DEPLOYMENT"/version.json
|
||||
3
bin/arbsep/finaldata-deploy
Executable file
3
bin/arbsep/finaldata-deploy
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086
|
||||
bin/deploy backend $1 arbsep arbsep finaldata
|
||||
43
bin/arbsep/init-deploy
Executable file
43
bin/arbsep/init-deploy
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [ "$PRIVKEY_MIRROR" == "" ]; then
|
||||
echo You must set PRIVKEY_MIRROR environment variable before running this script.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd contract || exit 1
|
||||
|
||||
TAG=arbsep
|
||||
PRIVKEY=$PRIVKEY_MIRROR
|
||||
RPC=${RPC_URL:-arbsep_alchemy}
|
||||
CHAINID=421614
|
||||
|
||||
|
||||
# force clean build
|
||||
rm -rf out/ broadcast/ cache/
|
||||
bin/build --skip Test || exit 1
|
||||
|
||||
rm -rf deployment/"$TAG"/out
|
||||
mkdir -p deployment/"$TAG"
|
||||
cp -r out deployment/"$TAG"/
|
||||
|
||||
cpbroadcast() {
|
||||
CONTRACT=$1
|
||||
mkdir -p deployment/"$TAG"/broadcast/"$CONTRACT"/"$CHAINID"
|
||||
cp broadcast/"$CONTRACT"/"$CHAINID"/run-latest.json deployment/"$TAG"/broadcast/"$CONTRACT"/"$CHAINID"/run-latest.json
|
||||
}
|
||||
|
||||
if [ "$1" == "broadcast" ]; then
|
||||
BROADCAST=--broadcast
|
||||
else
|
||||
BROADCAST=
|
||||
fi
|
||||
|
||||
forge script script/DeployArbitrumSepolia.sol -vvvv --fork-url "$RPC" --private-key "$PRIVKEY" "$BROADCAST"
|
||||
|
||||
if [ "$1" == 'broadcast' ]; then
|
||||
cpbroadcast DeployArbitrumSepolia.sol
|
||||
echo Saved deployment files.
|
||||
else
|
||||
echo Deployment NOT broadcast \(use \'broadcast\' argument\)
|
||||
fi
|
||||
40
bin/arbsep/mirrorenv-deploy
Executable file
40
bin/arbsep/mirrorenv-deploy
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [ "$PRIVKEY_MIRROR" == "" ]; then
|
||||
echo You must set PRIVKEY_MIRROR environment variable before running this script.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd contract || exit 1
|
||||
|
||||
# force clean build
|
||||
rm -rf out/ broadcast/ cache/
|
||||
bin/build --skip Test || exit 1
|
||||
|
||||
TAG=arbsep
|
||||
PRIVKEY=$PRIVKEY_MIRROR
|
||||
RPC=${RPC_URL:-arbsep_alchemy}
|
||||
CHAINID=421614
|
||||
|
||||
cpbroadcast() {
|
||||
CONTRACT=$1
|
||||
mkdir -p deployment/"$TAG"/broadcast/"$CONTRACT"/"$CHAINID"
|
||||
cp broadcast/"$CONTRACT"/"$CHAINID"/run-latest.json deployment/"$TAG"/broadcast/"$CONTRACT"/"$CHAINID"/run-latest.json
|
||||
}
|
||||
|
||||
if [ "$1" == "broadcast" ]; then
|
||||
BROADCAST=--broadcast
|
||||
else
|
||||
BROADCAST=
|
||||
fi
|
||||
|
||||
export NFPM=0x6b2937Bde17889EDCf8fbD8dE31C3C2a70Bc4d65
|
||||
export SWAP_ROUTER=0xE592427A0AEce92De3Edee1F18E0157C05861564
|
||||
forge script script/DeployMirror.sol -vvvv --fork-url "$RPC" --private-key "$PRIVKEY" "$BROADCAST"
|
||||
|
||||
if [ "$1" == 'broadcast' ]; then
|
||||
cpbroadcast DeployMirror.sol
|
||||
echo Saved deployment files.
|
||||
else
|
||||
echo Deployment NOT broadcast \(use \'broadcast\' argument\)
|
||||
fi
|
||||
2
bin/arbsep/mirrorprice-deploy
Executable file
2
bin/arbsep/mirrorprice-deploy
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
bin/deploy backend $1 arbsep arbsep mirrorprice
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
CONTRACT=${1:-0x82aF49447D8a07e3bd95BD0d56f35241523fBab1}
|
||||
DECIMALS=$(cast call $CONTRACT "decimals()" | cast to-dec)
|
||||
BALANCE=$(cast call $CONTRACT 'balanceOf(address)' "${2:-$VAULT}" | cast to-dec)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "$1" != "backend" ] && [ "$1" != "server" ] && [ "$1" != "web" ]; then
|
||||
if [ "$1" != "backend" ] && [ "$1" != "server" ] && [ "$1" != "web" ] && [ "$1" != "dev" ]; then
|
||||
echo
|
||||
echo usage: "$0 "'{backend|server|web} [''dev''] [config] [deployment] [kubernetes]'
|
||||
echo usage: "$0 "'{backend|server|web|dev} [''dev''] [config] [deployment] [kubernetes] [image_tag]'
|
||||
echo
|
||||
echo ' [''dev''] if the literal string ''dev'' is not the second argument, then the build refuses to run if source code is not checked in. Otherwise, the git revision numbers are used in the image tag.'
|
||||
echo
|
||||
@@ -12,6 +12,8 @@ if [ "$1" != "backend" ] && [ "$1" != "server" ] && [ "$1" != "web" ]; then
|
||||
echo
|
||||
echo ' [kubernetes] is used for the base image name and also to find the yaml file for deployment: deploy/k8s/{kubernetes}.yaml. Defaults to project name.'
|
||||
echo
|
||||
echo ' [image_tag] will be used for the container image name. The standard tag will always be generated as well.'
|
||||
echo
|
||||
exit 1
|
||||
else
|
||||
PROJECT=$1
|
||||
@@ -21,6 +23,15 @@ fi
|
||||
if [ "$1" == "dev" ]; then
|
||||
echo A
|
||||
shift
|
||||
DEV=1
|
||||
fi
|
||||
|
||||
if [ "$PROJECT" == "dev" ]; then
|
||||
DEV=1
|
||||
# NO_CACHE=--no-cache
|
||||
fi
|
||||
|
||||
if [ "$DEV" == "1" ]; then
|
||||
TAG="dev`date +%Y%m%d%H%M%S`"
|
||||
if [ "$1" != "" ]; then
|
||||
CONFIG=$1
|
||||
@@ -62,26 +73,43 @@ else
|
||||
KUBERNETES=$PROJECT
|
||||
fi
|
||||
|
||||
if [ $(basename "$0") == 'deploy.sh' ]; then
|
||||
if [ "$1" != "" ]; then
|
||||
IMG_TAG=$1
|
||||
else
|
||||
IMG_TAG=
|
||||
fi
|
||||
|
||||
if [ $(basename "$0") == 'deploy' ]; then
|
||||
DEPLOY=1
|
||||
else
|
||||
DEPLOY=0
|
||||
fi
|
||||
|
||||
REMOTE=gcr.io/cointrader-211623/dexorder
|
||||
REMOTE=northamerica-northeast2-docker.pkg.dev/dexorder-430504/dexorder
|
||||
|
||||
# todo nocache as a script option
|
||||
#docker build --no-cache -f deploy/Dockerfile-$PROJECT -t dexorder/$PROJECT:latest . || exit 1
|
||||
echo Building $PROJECT config=$CONFIG deployment=$DEPLOYMENT '=>' $TAG
|
||||
docker build -f deploy/docker/Dockerfile-$PROJECT --build-arg="CONFIG=$CONFIG" --build-arg="DEPLOYMENT=$DEPLOYMENT" -t dexorder/$PROJECT:latest . || exit 1
|
||||
if [ "$DEPLOY" == "0" ]; then
|
||||
ACTION=Building
|
||||
NO_CACHE=--no-cache
|
||||
else
|
||||
ACTION=Making
|
||||
fi
|
||||
|
||||
|
||||
echo $ACTION $PROJECT config=$CONFIG deployment=$DEPLOYMENT '=>' $TAG
|
||||
docker build $NO_CACHE -f deploy/docker/Dockerfile-$PROJECT --build-arg="CONFIG=$CONFIG" --build-arg="DEPLOYMENT=$DEPLOYMENT" -t dexorder/$PROJECT:latest . || exit 1
|
||||
docker tag dexorder/$PROJECT:latest dexorder/$PROJECT:$TAG
|
||||
docker tag dexorder/$PROJECT:$TAG $REMOTE/$PROJECT:$TAG
|
||||
docker tag $REMOTE/$PROJECT:$TAG $REMOTE/$PROJECT:latest
|
||||
if [ "$IMG_TAG" != "" ]; then
|
||||
docker tag dexorder/$PROJECT:$TAG $REMOTE/$PROJECT:$IMG_TAG
|
||||
TAG=$IMG_TAG
|
||||
fi
|
||||
echo "$(date)" built $REMOTE/$PROJECT:$TAG
|
||||
|
||||
if [ "$DEPLOY" != "0" ]; then
|
||||
if [ "$DEPLOY" == "1" ]; then
|
||||
docker push $REMOTE/$PROJECT:$TAG
|
||||
# docker push $REMOTE/$PROJECT:latest
|
||||
sed "s#dexorder/$PROJECT*#$REMOTE/$PROJECT:$TAG#" deploy/k8s/$KUBERNETES.yaml | kubectl apply -f - || (echo kubectl apply failed && exit 1)
|
||||
YAML=$(sed "s#image: dexorder/$PROJECT*#image: $REMOTE/$PROJECT:$TAG#" deploy/k8s/$KUBERNETES.yaml)
|
||||
echo "$YAML" | kubectl apply -f - || echo "$YAML" "\nkubectl apply failed" && exit 1
|
||||
echo deployed $KUBERNETES.yaml $REMOTE/$PROJECT:$TAG
|
||||
fi
|
||||
@@ -1,14 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
TAG=${TAG:-mock}
|
||||
CHAIN_ID=${CHAIN_ID:-31337}
|
||||
|
||||
# this script takes an RPC_URL environment var and exports env vars for init contracts
|
||||
. ./bin/shmockenv.sh
|
||||
. bin/shmockenv
|
||||
|
||||
if [ "$TAG" == "mock" ]; then
|
||||
VERSION=dev
|
||||
REVISION="$(date '+%Y-%m-%d %H:%M')"
|
||||
DEST=web/public
|
||||
DEST=contract
|
||||
else
|
||||
VERSION=$TAG
|
||||
REVISION="$( cd contract && git log --oneline | head -1 | cut -d ' ' -f 1 )"
|
||||
@@ -16,5 +17,6 @@ else
|
||||
fi
|
||||
export VERSION
|
||||
export REVISION
|
||||
envsubst < version-"$TAG".json > "$DEST"/version.json || err version file
|
||||
export CHAIN_ID
|
||||
envsubst < conf/version-"$TAG".json > "$DEST"/version.json || err version file
|
||||
echo wrote "$DEST"/version.json
|
||||
9
bin/cluster
Executable file
9
bin/cluster
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo cluster [tag]
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kubectl config use-context $1 || exit 1
|
||||
gcloud config configurations activate $1 || exit 1
|
||||
1
bin/deploy
Symbolic link
1
bin/deploy
Symbolic link
@@ -0,0 +1 @@
|
||||
build
|
||||
@@ -1 +0,0 @@
|
||||
build.sh
|
||||
3
bin/dev-remote-init
Executable file
3
bin/dev-remote-init
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
curl https://dev.dexorder.trade/contract/version.json -o web/public/contract/version.json
|
||||
curl https://dev.dexorder.trade/metadata.json -o web/public/metadata.json
|
||||
17
bin/dragonfly
Executable file
17
bin/dragonfly
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
echo running dragonfly
|
||||
if [ "$USE_DRAGONFLY_BINARY" == "1" ]; then
|
||||
DF="dragonfly"
|
||||
else
|
||||
DF="docker run --network=host --ulimit memlock=-1 docker.dragonflydb.io/dragonflydb/dragonfly:latest"
|
||||
fi
|
||||
$DF --proactor_threads 1 --maxmemory 256MiB --hz=1 --dbfilename '' "$@"
|
||||
RESULT=$?
|
||||
if [ $RESULT -eq 0 ]; then
|
||||
echo dragonfly exited successfully
|
||||
elif [ $RESULT -eq 1 ]; then
|
||||
# "normal" error like SIGTERM
|
||||
echo dragonfly exited
|
||||
else
|
||||
echo dragonfly exited with an ERROR code "$?"
|
||||
fi
|
||||
2
bin/dragonfly-update
Executable file
2
bin/dragonfly-update
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
docker pull docker.dragonflydb.io/dragonflydb/dragonfly:latest
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
bin/deploy.sh backend $1 finaldata alpha finaldata
|
||||
1
bin/getprice
Symbolic link
1
bin/getprice
Symbolic link
@@ -0,0 +1 @@
|
||||
../contract/bin/getprice
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
source ./bin/shmockenv.sh
|
||||
source bin/shmockenv
|
||||
# ^^ sets the POOL variable
|
||||
else
|
||||
POOL=$1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
. ./bin/shmockenv.sh
|
||||
. bin/shmockenv
|
||||
|
||||
envsubst < metadata-mock.json > web/public/metadata.json
|
||||
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
. ./bin/shmockenv.sh
|
||||
. bin/shmockenv
|
||||
|
||||
# 0xC31E54c7a869B9FcBEcc14363CF510d1c41fa443 is WETH/USDC
|
||||
# 0xC6F780497A95e246EB9449f5e4770916DCd6396A is ARB/WETH
|
||||
# 0xcda53b1f66614552f834ceef361a8d12a0b8dad8 is ARB/USDC
|
||||
MIRROR_POOLS=${MIRROR_POOLS:-'["0xC31E54c7a869B9FcBEcc14363CF510d1c41fa443","0xC6F780497A95e246EB9449f5e4770916DCd6396A","0xcDa53B1F66614552F834cEeF361A8D12a0B8DaD8"]'}
|
||||
#MIRROR_POOLS=${MIRROR_POOLS:-'["0xC31E54c7a869B9FcBEcc14363CF510d1c41fa443","0xC6F780497A95e246EB9449f5e4770916DCd6396A","0xcDa53B1F66614552F834cEeF361A8D12a0B8DaD8"]'}
|
||||
#MIRROR_POOLS=${MIRROR_POOLS:-'["0xC31E54c7a869B9FcBEcc14363CF510d1c41fa443"]'}
|
||||
MIRROR_POOLS=${MIRROR_POOLS:-'["0xC6962004f452bE9203591991D15f6b388e09E8D0","0xc473e2aEE3441BF9240Be85eb122aBB059A3B57c","0x9360c3c95CA53b18221b97fbE22334C26A560511"]'} # WETH/USDC 0.05% and 0.30%; ZRO/USDC
|
||||
MIRROR_METADATA=${MIRROR_METADATA:-../web/public/metadata.json}
|
||||
MIRROR_POLLING=${MIRROR_POLLING:-'10'}
|
||||
MIRROR_POLLING=${MIRROR_POLLING:-'5'}
|
||||
cd backend || exit 1
|
||||
|
||||
term() {
|
||||
@@ -16,6 +18,6 @@ term() {
|
||||
|
||||
trap term TERM INT
|
||||
|
||||
PYTHONPATH=src venv/bin/python -m dexorder.bin.mirror "mirror_source_rpc_url=https://arbitrum-one.publicnode.com" "mirror_pools=$MIRROR_POOLS" "metadata=$MIRROR_METADATA" "polling=$MIRROR_POLLING" &
|
||||
PYTHONPATH=src venv/bin/python -m dexorder.bin.mirror 'mirror_source_rpc_url=${rpc_urls.arbitrum_alchemy}' "mirror_pools=$MIRROR_POOLS" "metadata=$MIRROR_METADATA" "polling=$MIRROR_POLLING" &
|
||||
PID=$!
|
||||
wait "$PID"
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
bin/deploy.sh backend alpha alpha mirrorprice
|
||||
@@ -1,16 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Configuration variables
|
||||
FORK_URL=http://localhost:8545
|
||||
FORK_URL=${FORK_URL:-arbitrum_mock}
|
||||
# 0xac097 is dev account #0 used for Deploy scripts
|
||||
DEFAULT_KEY='0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'
|
||||
DEFAULT_KEY=${DEFAULT_KEY:-'0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'}
|
||||
CHAIN_ID=${CHAIN_ID:-31337}
|
||||
|
||||
RESULT=0
|
||||
|
||||
if [ -z "$NOSUDO" ]; then
|
||||
SUDO=sudo
|
||||
else
|
||||
if [ -z "$MOCKSUDO" ]; then
|
||||
SUDO=
|
||||
else
|
||||
SUDO=sudo
|
||||
fi
|
||||
|
||||
killAndWait() {
|
||||
@@ -38,7 +39,7 @@ killAndWait() {
|
||||
end () {
|
||||
echo exiting
|
||||
kill $NODE_PID || echo anvil/hardhat was already dead
|
||||
kill $DF_PID || echo dragonfly was already dead
|
||||
kill $DRAGONFLY_PID || echo dragonfly was already dead
|
||||
wait
|
||||
exit $RESULT
|
||||
}
|
||||
@@ -63,19 +64,25 @@ mkdir -p tmp
|
||||
# build the contracts
|
||||
cd contract || err cd contract
|
||||
|
||||
bin/build.sh --skip Test || err build error
|
||||
bin/build --skip Test || err build error
|
||||
|
||||
# launch a node with a private fork
|
||||
if [[ "${DEXORDER_USE_HARDHAT:-0}" != "0" ]]; then
|
||||
npx hardhat node --fork https://arb-mainnet.g.alchemy.com/v2/L0eaUwWEoWzszCK9EhqHdl_p7VHStkaC 2>&1 | tee > ../tmp/hardhat.txt &
|
||||
npx hardhat node --fork $HARDHAT_FORK_URL 2>&1 | tee > ../tmp/hardhat.txt &
|
||||
else
|
||||
# changing the mining mode dynamically in anvil only works for disabling interval blocks, not re-enabling them
|
||||
# so we must use only interval mining from the start
|
||||
#anvil -f arbitrum_mock --chain-id 31337 --prune-history --block-time 5 &> ../tmp/anvil.txt &
|
||||
#anvil -f arbitrum_mock --chain-id 31337 --block-time 5 &> ../tmp/anvil.txt &
|
||||
anvil -f arbitrum_mock --chain-id 31337 &> ../tmp/anvil.txt &
|
||||
#anvil -f arbitrum_mock --chain-id $CHAIN_ID --prune-history --block-time 5 &> ../tmp/anvil.txt &
|
||||
if [[ "${EXTERNALIZE_ANVIL}" == "1" ]]; then
|
||||
ANVIL_CHAIN_ARGS="--chain-id $CHAIN_ID --host 0.0.0.0"
|
||||
else
|
||||
ANVIL_CHAIN_ARGS="--chain-id $CHAIN_ID"
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
anvil -f $FORK_URL --code-size-limit 100000 $ANVIL_CHAIN_ARGS $ANVIL_ARGS &> ../tmp/anvil.txt &
|
||||
fi
|
||||
NODE_PID=$!
|
||||
export NODE_PID
|
||||
|
||||
# deploy mock environment
|
||||
sleep 1
|
||||
@@ -92,16 +99,16 @@ done
|
||||
#fi
|
||||
|
||||
echo running script/Deploy.sol
|
||||
forge script script/Deploy.sol --skip Test --fork-url $FORK_URL --broadcast \
|
||||
forge script script/Deploy.sol --skip Test --fork-url http://localhost:8545 --broadcast \
|
||||
--private-key ${PRIVATE_KEY:-$DEFAULT_KEY} || err deploy
|
||||
|
||||
echo running script/DeployMock.sol
|
||||
forge script script/DeployMock.sol --skip Test --fork-url $FORK_URL --broadcast \
|
||||
forge script script/DeployMock.sol --skip Test --fork-url http://localhost:8545 --broadcast --code-size-limit 100000 \
|
||||
--private-key ${PRIVATE_KEY:-$DEFAULT_KEY} || err deploymock
|
||||
|
||||
ls script/
|
||||
echo running script/DeployMirror.sol
|
||||
forge script script/DeployMirror.sol --skip Test --fork-url $FORK_URL --broadcast \
|
||||
forge script script/DeployMirror.sol --skip Test --fork-url http://localhost:8545 --broadcast \
|
||||
--private-key ${PRIVATE_KEY:-$DEFAULT_KEY} || err deploymirror
|
||||
|
||||
echo deploy scripts completed
|
||||
@@ -113,39 +120,42 @@ if [[ "${DEXORDER_USE_HARDHAT:-0}" != "0" ]]; then
|
||||
fi
|
||||
|
||||
cd .. || err
|
||||
. ./bin/shmockenv.sh # export env vars for init contracts
|
||||
. bin/shmockenv # export env vars for init contracts
|
||||
|
||||
# write version file
|
||||
VERSION="dev $(date -u '+%Y-%m-%d %H:%M')"
|
||||
export VERSION
|
||||
envsubst < version-mock.json > web/public/version.json || err version.json
|
||||
export CHAIN_ID
|
||||
bin/build-version-json
|
||||
# write metadata.json file
|
||||
envsubst < metadata-mock.json > web/public/metadata.json || err metadata.json
|
||||
envsubst < conf/metadata-mock.json > web/public/metadata.json || err metadata.json
|
||||
|
||||
if [ "$USE_REDIS" == "1" ]; then
|
||||
redis-stack-server &> tmp/redis.txt &
|
||||
echo started redis
|
||||
else
|
||||
# start dragonfly
|
||||
bin/dragonfly &> tmp/dragonfly.txt &
|
||||
echo started dragonfly
|
||||
fi
|
||||
DRAGONFLY_PID=$!
|
||||
export DRAGONFLY_PID
|
||||
|
||||
cd backend || err
|
||||
|
||||
# reset database
|
||||
PYTHONPATH=src venv/bin/python -m alembic downgrade base &> ../tmp/alembic.txt || err alembic down
|
||||
PYTHONPATH=src venv/bin/python -m alembic upgrade head &>> ../tmp/alembic.txt || err alembic up
|
||||
|
||||
# start dragonfly
|
||||
bin/df.sh &> ../tmp/df.txt &
|
||||
DF_PID=$!
|
||||
echo alembic downgrade base
|
||||
venv/bin/python -m alembic downgrade base &> ../tmp/alembic.txt || err alembic down
|
||||
echo alembic upgrade head
|
||||
venv/bin/python -m alembic upgrade head &>> ../tmp/alembic.txt || err alembic up
|
||||
|
||||
cd ..
|
||||
|
||||
echo NODE_PID ${NODE_PID}
|
||||
echo DF_PID ${DF_PID}
|
||||
echo
|
||||
echo '.-----------------.'
|
||||
echo '| Mockchain ready |'
|
||||
echo "'-----------------'"
|
||||
echo
|
||||
echo "MockEnv $MOCKENV"
|
||||
echo "MirrorEnv $MIRRORENV"
|
||||
echo "MEH $MEH"
|
||||
echo "USXD $USXD"
|
||||
echo "Pool $POOL"
|
||||
echo
|
||||
|
||||
wait # wait for all background processes to terminate
|
||||
|
||||
3
bin/numVaults
Executable file
3
bin/numVaults
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
kubectl exec postgres-0 -- psql -U dexorder -t -c "select count(*) from seriesdict where series='v';" | xargs
|
||||
31
bin/price
31
bin/price
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
# mockchain price
|
||||
source ./bin/shmockenv.sh
|
||||
SQRTX96=$(cast call $MOCKENV "price()" | cast to-dec)
|
||||
D0=$T0DEC
|
||||
D1=$T1DEC
|
||||
T0INT=$(cast to-dec $TOKEN0)
|
||||
T1INT=$(cast to-dec $TOKEN1)
|
||||
#echo $T0INT $T1INT
|
||||
INVERTED=$(echo $T0INT '<' $T1INT | bc)
|
||||
else
|
||||
# pool price
|
||||
SQRTX96=$(cast call "$1" "slot0()(uint160,int24,uint16,uint16,uint16,uint8,bool)" | head -1 | cut -d ' ' -f 1)
|
||||
T0=$(cast call "$1" "token0()" | cast parse-bytes32-address)
|
||||
D0=$(cast call "$T0" "decimals()" | cast to-dec)
|
||||
T1=$(cast call "$1" "token1()" | cast parse-bytes32-address)
|
||||
D1=$(cast call "$T1" "decimals()" | cast to-dec)
|
||||
echo decimals $D0 $D1
|
||||
INVERTED=0
|
||||
fi
|
||||
#echo sqrtX96 $SQRTX96
|
||||
#echo inverted $INVERTED
|
||||
if [ "$INVERTED" == "0" ]; then
|
||||
FORMULA='scale=18; '"$SQRTX96"'^2 * 10^('"$D0"-"$D1"') / 2^(96*2)'
|
||||
else
|
||||
FORMULA='scale=18; 2^(96*2) * 10^('"$D1"-"$D0"') / '"$SQRTX96"'^2'
|
||||
fi
|
||||
#echo "$FORMULA"
|
||||
echo "$FORMULA" | bc
|
||||
18
bin/setprice
18
bin/setprice
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
. ./bin/shmockenv.sh
|
||||
|
||||
#echo inverted $INVERTED
|
||||
if [ "$INVERTED" == "1" ]; then
|
||||
INV='1/'
|
||||
else
|
||||
INV=
|
||||
fi
|
||||
#echo INV $INV
|
||||
FORMULA='scale=100; big='"$INV""$1"'*10^('"$T1DEC"'-'"$T0DEC"')*2^(96*2); scale=0; sqrt(big)'
|
||||
#echo $FORMULA
|
||||
#echo "$FORMULA" | bc
|
||||
SQRTX96=$(echo "$FORMULA" | bc | head -1 | cut -d '.' -f 1)
|
||||
#echo sqrtX96 $SQRTX96
|
||||
TMP=$(mktemp)
|
||||
cast send --private-key 0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6 "$MOCKENV" 'swapToPrice(uint160)' "$SQRTX96" > "$TMP" || cat "$TMP"
|
||||
1
bin/setprice
Symbolic link
1
bin/setprice
Symbolic link
@@ -0,0 +1 @@
|
||||
../contract/bin/setprice
|
||||
14
bin/shenv.sh
14
bin/shenv.sh
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# private key for Dev Account #9
|
||||
export DEV9=0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6
|
||||
|
||||
export WETH=0x82aF49447D8a07e3bd95BD0d56f35241523fBab1
|
||||
export USDC=0xFF970A61A04b1cA14834A43f5dE4533eBDDB5CC8
|
||||
|
||||
# WETH/USDC Pool address
|
||||
export WETHUSDC=0xc31e54c7a869b9fcbecc14363cf510d1c41fa443
|
||||
|
||||
export PATH=$PATH:$(pwd)/bin
|
||||
|
||||
source ./bin/shmockenv.sh
|
||||
1
bin/shmockenv
Symbolic link
1
bin/shmockenv
Symbolic link
@@ -0,0 +1 @@
|
||||
../contract/bin/shmockenv
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
RPC=${RPC_URL:-http://localhost:8545}
|
||||
|
||||
c() {
|
||||
# echo cast "$1" --rpc-url $RPC "${@:2}" >&2
|
||||
cast "$1" --rpc-url $RPC "${@:2}"
|
||||
}
|
||||
|
||||
FILE_TAG=${TAG:-mock}
|
||||
|
||||
if [ "$FILE_TAG" == "mock" ]; then
|
||||
BROADCAST=contract/broadcast
|
||||
FILE_TAG=latest
|
||||
else
|
||||
BROADCAST=contract/deployment/$TAG/broadcast
|
||||
fi
|
||||
|
||||
MOCKENV=$(jq -r '.transactions[] | select(.contractName=="MockEnv") | select(.function==null).contractAddress' $BROADCAST/DeployMock.sol/"${CHAINID:-$(c chain-id)}"/run-latest.json) || echo WARNING no MockEnv detected
|
||||
export MOCKENV
|
||||
MIRRORENV=$(jq -r '.transactions[] | select(.contractName=="MirrorEnv") | select(.function==null).contractAddress' $BROADCAST/DeployMirror.sol/"${CHAINID:-$(c chain-id)}"/run-latest.json) || echo WARNING no MirrorEnv detected
|
||||
export MIRRORENV
|
||||
FACTORY=$(jq -r '.transactions[] | select(.contractName=="Factory") | select(.function==null).contractAddress' $BROADCAST/Deploy.sol/"${CHAINID:-$(c chain-id)}"/run-latest.json) || exit 1
|
||||
export FACTORY
|
||||
HELPER=$(jq -r '.transactions[] | select(.contractName=="QueryHelper") | select(.function==null).contractAddress' $BROADCAST/Deploy.sol/"${CHAINID:-$(c chain-id)}"/run-latest.json) || exit 1
|
||||
export HELPER
|
||||
|
||||
VAULT_INIT_CODE_HASH=$(cast keccak $(jq -r .bytecode.object < contract/out/Vault.sol/Vault.json)) || exit 1
|
||||
export VAULT_INIT_CODE_HASH
|
||||
|
||||
POOL=$(c call $MOCKENV "pool()" | cast parse-bytes32-address) || exit 1
|
||||
export POOL
|
||||
MEH=$(c call $MOCKENV "COIN()" | cast parse-bytes32-address) || exit 1
|
||||
export MEH
|
||||
USXD=$(c call $MOCKENV "USD()" | cast parse-bytes32-address) || exit 1
|
||||
export USXD
|
||||
MOCK=$MEH
|
||||
export MOCK
|
||||
USD=$USDX
|
||||
export USD
|
||||
TOKEN0=$(c call $MOCKENV "token0()" | cast parse-bytes32-address) || exit 1
|
||||
export TOKEN0
|
||||
TOKEN1=$(c call $MOCKENV "token1()" | cast parse-bytes32-address) || exit 1
|
||||
export TOKEN1
|
||||
T0DEC=$(c call $TOKEN0 "decimals()" | cast to-dec) || exit 1
|
||||
export T0DEC
|
||||
T1DEC=$(c call $TOKEN1 "decimals()" | cast to-dec) || exit 1
|
||||
export T1DEC
|
||||
POOLDEC=$(echo $T1DEC - $T0DEC | bc)
|
||||
export POOLDEC
|
||||
MEH_INT=$(cast to-dec $MEH)
|
||||
USXD_INT=$(cast to-dec $USXD)
|
||||
#echo $MEH_INT $USXD_INT
|
||||
INVERTED=$(echo $MEH_INT '>' $USXD_INT | bc)
|
||||
export INVERTED
|
||||
|
||||
#echo "\$MOCKENV $MOCKENV"
|
||||
#echo "\$MIRRORENV $MIRRORENV"
|
||||
#echo "\$MEH $MEH"
|
||||
#echo "\$USXD $USXD"
|
||||
#echo "\$POOL $POOL"
|
||||
@@ -5,15 +5,15 @@ cleanup() {
|
||||
echo
|
||||
echo "Killing servers:"
|
||||
jobs
|
||||
kill $(jobs -p) >/dev/null 2>&1
|
||||
kill $(jobs -p) &>/dev/null
|
||||
|
||||
echo "Killing anyone still using ports 3000, 3001, 8545 (none should be)"
|
||||
lsof -i :3000,3001,8545 -s tcp:LISTEN
|
||||
lsof -i tcp:3000,3001,8545 -s tcp:LISTEN -t | xargs kill >/dev/null 2>&1
|
||||
lsof -i :3000,3001,8545 -s tcp:LISTEN -t | xargs kill >/dev/null 2>&1
|
||||
lsof -i tcp:3000,3001,8545 -s tcp:LISTEN -t | xargs kill &>/dev/null
|
||||
lsof -i :3000,3001,8545 -s tcp:LISTEN -t | xargs kill &>/dev/null
|
||||
|
||||
wait
|
||||
# killall -9 anvil >/dev/null 2>&1 # Sometimes anvil doesn't die
|
||||
# killall -9 anvil &>/dev/null # Sometimes anvil doesn't die
|
||||
echo "All should be killed now"
|
||||
|
||||
echo "Servers/ports still running/open (none should be):"
|
||||
@@ -37,29 +37,29 @@ rm -f tmp/server.log; touch tmp/server.log;
|
||||
echo -n "starting servers: "
|
||||
|
||||
echo -n mock
|
||||
( make mock > tmp/mock.log 2>&1 ; echo "*** mock exited ***" ) &
|
||||
( make mock &> tmp/mock.log; echo "*** mock exited ***" ) &
|
||||
echo -n "($!)..."
|
||||
while ! grep -q "Mockchain ready" tmp/mock.log; do sleep 1; done;
|
||||
|
||||
echo -n mirrorprice
|
||||
( make mirrorprice > tmp/mirrorprice.log 2>&1 ; echo "*** mirrorprice exited ***" ) &
|
||||
( make mirrorprice &> tmp/mirrorprice.log; echo "*** mirrorprice exited ***" ) &
|
||||
echo -n "($!)..."
|
||||
while ! grep -q "dexorder Updating pools every" tmp/mirrorprice.log; do sleep 1; done;
|
||||
while ! grep -q "Updating a pool every" tmp/mirrorprice.log; do sleep 1; done;
|
||||
|
||||
echo -n backend
|
||||
( make backend > tmp/backend.log 2>&1 ; echo "*** backend exited ***" ) &
|
||||
( make backend &> tmp/backend.log; echo "*** backend exited ***" ) &
|
||||
echo -n "($!)..."
|
||||
sleep 1 # Not sure why this one is needed
|
||||
while ! grep -q "completed block" tmp/backend.log; do sleep 1; done;
|
||||
while ! grep -q "Runner started" tmp/backend.log; do sleep 1; done;
|
||||
|
||||
echo -n server
|
||||
( make server > tmp/server.log 2>&1 ; echo "*** server exited ***" ) &
|
||||
( make server &> tmp/server.log; echo "*** server exited ***" ) &
|
||||
echo -n "($!)..."
|
||||
while ! grep -q "Started server on port" tmp/server.log; do sleep 1; done;
|
||||
|
||||
# web must be last because it depends on version-mock.json written by mock.sh
|
||||
echo -n web
|
||||
( make web > tmp/web.log 2>&1 ; echo "*** web exited ***" ) &
|
||||
( make webhost &> tmp/web.log; echo "*** web exited ***" ) &
|
||||
echo -n "($!)..."
|
||||
while ! grep -q "Network: use --host to expose" tmp/web.log; do sleep 1; done;
|
||||
|
||||
@@ -67,7 +67,7 @@ echo
|
||||
jobs -l
|
||||
|
||||
echo done
|
||||
make --no-print-directory balances
|
||||
#make --no-print-directory balances
|
||||
echo
|
||||
echo Ctl-C to stop.
|
||||
echo "Note: Don't forget to reset MetaMask: Settings>Advanced>Clear activity"
|
||||
1
conf/metadata-alpha.json
Normal file
1
conf/metadata-alpha.json
Normal file
File diff suppressed because one or more lines are too long
2096
conf/metadata-finaldata.json
Normal file
2096
conf/metadata-finaldata.json
Normal file
File diff suppressed because it is too large
Load Diff
14
conf/version-arbsep.json
Normal file
14
conf/version-arbsep.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"version": "$VERSION",
|
||||
"revision": "$REVISION",
|
||||
"chainInfo": {
|
||||
"$CHAINID": {
|
||||
"id": $CHAINID,
|
||||
"name": "Arbitrum Sepolia",
|
||||
"vaultInitCodeHash": "$VAULT_INIT_CODE_HASH",
|
||||
"factory": "$FACTORY",
|
||||
"helper": "$HELPER",
|
||||
"dexorder": "$DEXORDER"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"version": "$VERSION",
|
||||
"chainInfo": {
|
||||
"31337": {
|
||||
"id": 31337,
|
||||
"$CHAIN_ID": {
|
||||
"id": $CHAIN_ID,
|
||||
"name": "Mockchain",
|
||||
"vaultInitCodeHash": "$VAULT_INIT_CODE_HASH",
|
||||
"dexorder": "$DEXORDER",
|
||||
"factory": "$FACTORY",
|
||||
"helper": "$HELPER",
|
||||
"mockenv": "$MOCKENV",
|
||||
2
contract
2
contract
Submodule contract updated: 471f5203da...8422e4382a
@@ -1,14 +0,0 @@
|
||||
1. `cd deploy/k8s`
|
||||
2. `kubectl apply -f postgres.yaml -f redis.yaml -f anvil.yaml`
|
||||
3. `cd ../../contract` to the contracts project folder
|
||||
4. `RPC_URL=https://rpc.alpha.dexorder.trade ./bin/deploy.sh alpha mirror` This uses dev account #0 to deploy the init contracts to the alpha rpc url, and also the mockenv contract. The broadcast files and all compiler output files are labeled and organized with the tag `alpha`, and copied into the `contract/deployment/` directory. The Dockerfiles will copy this tagged deployment directory into subproject containers to provide the contract ABI's and bytecode along with the init contract deployment addresses.
|
||||
5. `cd ..` back to top-level dexorder. The init contracts should be running on anvil now
|
||||
6. `RPC_URL=https://rpc.alpha.dexorder.trade TAG=alpha ./bin/build_version.json.sh`
|
||||
7. check-in the new deployment files created under contract/deployment/alpha
|
||||
8. `./bin/mirrorprice-deploy.sh`
|
||||
* This builds the backend without metadata yet
|
||||
9. `kubectl cp mirrorprice-<tab>:metadata.json metadata-alpha.json`
|
||||
* This gets the metadata file from the mirror, so all the pools and tokens have their mirrored addresses. Subsequent builds using the "alpha" config will put this metadata file into ./web/public/metadata.json (see Dockerfiles)
|
||||
10. `./bin/deploy.sh backend alpha` no trailing slash: backend is a tag not the directory name
|
||||
11. `./bin/deploy.sh server alpha`
|
||||
12. `./bin/deploy.sh web alpha`
|
||||
@@ -5,5 +5,5 @@ COPY id_rsa_builder.pub /root/.ssh/id_rsa.pub
|
||||
RUN apk update && apk upgrade
|
||||
RUN apk add bash git docker
|
||||
WORKDIR /builder
|
||||
COPY builder.sh .
|
||||
ENTRYPOINT ["/bin/bash", "builder.sh"]
|
||||
COPY builder .
|
||||
ENTRYPOINT ["/bin/bash", "builder"]
|
||||
|
||||
@@ -7,4 +7,4 @@ err() {
|
||||
|
||||
git clone git@git.tolson.net:dexorder/dexorder.git
|
||||
cd dexorder || err git clone
|
||||
./bin/build.sh "$@"
|
||||
./bin/build "$@"
|
||||
8
deploy/builder/known_hosts_builder
Normal file
8
deploy/builder/known_hosts_builder
Normal file
@@ -0,0 +1,8 @@
|
||||
# git.tolson.net:22 SSH-2.0-OpenSSH_9.1
|
||||
git.tolson.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDeUchk/NkIimMq7NX9RJzeSF5QfyWmP/UVH321rGExUBSlWzok9xsN74H3IWNjpyzvVVMc+OnuadmdmeG+HoSf8HBBewO52NDjuXnkG6/BU3IQA+TAE1JbqQ/AMvipWkmZi5Ivr9VvXdM4wzrUqu6Wm6xYka/H9LRVByHSKTudqt4n7lB6QoP5Z2X6m+M5aSpjK7ApGC7/OOjympsRTy1JJ5h9b5tBCVB1M11msoX8vaZracMaJqpd3jOqbGY0elgaO8KoT4P4DB2P2ZJe56h4MHF8rhdai1DpFL8Kfs29C31EciYe4mNNd3rBMTX4ufm5pEKgqdXvngrkoaVd00fD
|
||||
# git.tolson.net:22 SSH-2.0-OpenSSH_9.1
|
||||
git.tolson.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOz40r7ZSJLE+y24y1O/9KsbMJzumbyHeP0ckP55Lid8FBRwEUiCuWSIAjgIuhdsO6cRWfHF2AukRAUe6bAzPhI=
|
||||
# git.tolson.net:22 SSH-2.0-OpenSSH_9.1
|
||||
git.tolson.net ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPZ2pk8c5J5oa5oCwhWEnr1u0l4ZOZxyjkJrezd+Ygww
|
||||
# git.tolson.net:22 SSH-2.0-OpenSSH_9.1
|
||||
# git.tolson.net:22 SSH-2.0-OpenSSH_9.1
|
||||
21
deploy/create_roles.sql
Normal file
21
deploy/create_roles.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
\c dexorder;
|
||||
create role backend;
|
||||
grant all on all tables in schema public to backend;
|
||||
|
||||
create role srv;
|
||||
grant connect on database dexorder to srv;
|
||||
-- for fetching orders
|
||||
grant select on seriesdict to srv;
|
||||
-- for fetching orders
|
||||
grant select on orderindex to srv;
|
||||
grant select on token to srv;
|
||||
grant select on pool to srv;
|
||||
-- for requesting vaults
|
||||
-- grant insert on dexorder.transactionjob to srv;
|
||||
|
||||
create role ro;
|
||||
grant connect on database dexorder to ro;
|
||||
-- from araqnid on Stack Overflow
|
||||
SELECT 'GRANT SELECT ON ' || relname || ' TO ro;'
|
||||
FROM pg_class JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
|
||||
WHERE nspname = 'public' AND relkind IN ('r', 'v', 'S');
|
||||
16
deploy/dev-startall.sh
Executable file
16
deploy/dev-startall.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
export CHAIN_ID=1337
|
||||
|
||||
mkdir -p tmp
|
||||
# this starts the entrypoint from the parent postgres image
|
||||
/docker-entrypoint.sh postgres &> tmp/postgres.log &
|
||||
while ! (tail -1 tmp/postgres.log | grep -q "ready to accept connections"); do sleep 1; done;
|
||||
echo Initializing postgres
|
||||
make init_postgres
|
||||
. ~/.nvm/nvm.sh
|
||||
nvm use node
|
||||
export PATH=$PATH:/root/.foundry/bin
|
||||
#export USE_REDIS=1 # use redis instead of dragonfly
|
||||
export USE_DRAGONFLY_BINARY=1
|
||||
export EXTERNALIZE_ANVIL=1
|
||||
bin/startall
|
||||
@@ -1,18 +1,15 @@
|
||||
FROM python:3.11
|
||||
FROM python:3.12
|
||||
|
||||
ARG CONFIG=alpha
|
||||
ARG CONFIG
|
||||
ARG DEPLOYMENT=$CONFIG
|
||||
|
||||
RUN apt-get update -y && apt-get upgrade -y
|
||||
|
||||
RUN mkdir -p /dexorder/backend
|
||||
WORKDIR /dexorder/backend
|
||||
ENV PYTHONPATH=/dexorder/backend/src
|
||||
|
||||
# we put this COPY before apt-get update so system upgrades happen whenever requirements-lock.txt is changed
|
||||
COPY backend/requirements-lock.txt .
|
||||
RUN apt-get update -y && apt-get upgrade -y
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install --upgrade -r requirements-lock.txt
|
||||
RUN pip install --upgrade pip && pip install --upgrade -r requirements-lock.txt
|
||||
|
||||
WORKDIR /dexorder/contract
|
||||
COPY contract/deployment/$DEPLOYMENT/ ./
|
||||
@@ -21,8 +18,5 @@ WORKDIR /dexorder/backend
|
||||
COPY backend/alembic/ alembic/
|
||||
COPY backend/alembic.ini .
|
||||
COPY backend/src/ src/
|
||||
COPY backend/logging-$CONFIG.toml* logging.toml
|
||||
COPY backend/dexorder-$CONFIG.toml* dexorder.toml
|
||||
COPY ./metadata-$CONFIG.json* metadata.json
|
||||
|
||||
CMD ["python","-u","-m","dexorder.bin.main"]
|
||||
CMD ["python","-m","dexorder.bin.main"]
|
||||
|
||||
40
deploy/docker/Dockerfile-dev
Normal file
40
deploy/docker/Dockerfile-dev
Normal file
@@ -0,0 +1,40 @@
|
||||
FROM ubuntu/postgres
|
||||
|
||||
ENV POSTGRES_USER=postgres
|
||||
ENV POSTGRES_DATABASE=dexorder
|
||||
ENV POSTGRES_PASSWORD=redroxed
|
||||
ENV EXPOSE_WEB='--host 0.0.0.0'
|
||||
COPY deploy/builder/id_rsa_builder /root/.ssh/id_rsa
|
||||
COPY deploy/builder/id_rsa_builder.pub /root/.ssh/id_rsa.pub
|
||||
COPY deploy/builder/known_hosts_builder /root/.ssh/known_hosts
|
||||
RUN chmod 600 /root/.ssh/id_rsa /root/.ssh/known_hosts && chmod 644 /root/.ssh/id_rsa.pub
|
||||
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y sudo build-essential software-properties-common git curl gettext-base bc
|
||||
|
||||
# Install Dragonfly
|
||||
RUN curl -L https://dragonflydb.gateway.scarf.sh/latest/dragonfly-x86_64.tar.gz -o dragonfly.tar.gz && tar xzf dragonfly.tar.gz && mv dragonfly-x86_64 bin/dragonfly && rm dragonfly.tar.gz
|
||||
|
||||
## Redis
|
||||
#RUN sudo apt-get install -y lsb-release curl gpg
|
||||
#RUN curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
|
||||
#RUN sudo chmod 644 /usr/share/keyrings/redis-archive-keyring.gpg
|
||||
#RUN echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list
|
||||
#RUN apt-get update && apt-get install -y redis-stack-server
|
||||
|
||||
WORKDIR /
|
||||
ARG REPO_BASE=git@git.tolson.net:dexorder
|
||||
RUN git clone $REPO_BASE/dexorder.git
|
||||
WORKDIR /dexorder
|
||||
|
||||
RUN make install_postgres install_docker install_node install_python install_foundry install_submodules
|
||||
RUN . ~/.nvm/nvm.sh && nvm use node && make init_configs init_venv init_foundry init_yarn
|
||||
|
||||
COPY deploy/dev-startall.sh dev-startall.sh
|
||||
|
||||
# Ethereum JSON-RPC
|
||||
EXPOSE 8545
|
||||
# server socket.io
|
||||
EXPOSE 3001
|
||||
# web http
|
||||
EXPOSE 3000
|
||||
CMD ["./dev-startall.sh"]
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM node:20
|
||||
|
||||
ARG CONFIG=alpha
|
||||
ARG CONFIG
|
||||
ARG DEPLOYMENT=$CONFIG
|
||||
|
||||
RUN apt-get update -y && apt-get upgrade -y
|
||||
@@ -22,7 +22,8 @@ COPY server/.env-$CONFIG .env
|
||||
WORKDIR /dexorder/web
|
||||
COPY web/package.json .
|
||||
COPY web/src/common.js src/common.js
|
||||
COPY ./metadata-$CONFIG.json* public/metadata.json
|
||||
COPY backend/conf/metadata-$CONFIG.json* public/metadata.json
|
||||
COPY backend/conf/$CONFIG/metadata-$CONFIG.json* public/metadata.json
|
||||
|
||||
WORKDIR /dexorder/server
|
||||
ENTRYPOINT []
|
||||
|
||||
@@ -1,24 +1,32 @@
|
||||
FROM nginx:stable-alpine
|
||||
|
||||
ARG CONFIG=alpha
|
||||
ARG CONFIG
|
||||
ARG DEPLOYMENT=$CONFIG
|
||||
|
||||
RUN apk update && apk upgrade
|
||||
RUN sed -i '1idaemon off;' /etc/nginx/nginx.conf
|
||||
COPY deploy/nginx.conf /etc/nginx/conf.d/default.conf
|
||||
RUN apk add yarn npm
|
||||
|
||||
WORKDIR /dexorder/web
|
||||
COPY web/package.json web/yarn.lock ./
|
||||
RUN yarn install
|
||||
RUN apk del yarn
|
||||
COPY web/ ./
|
||||
COPY web/.env-$CONFIG .env
|
||||
RUN apk add yarn npm
|
||||
RUN yarn install
|
||||
COPY contract/deployment/$DEPLOYMENT/version.json public/version.json
|
||||
COPY ./metadata-$CONFIG.json* public/metadata.json
|
||||
COPY contract/deployment/$DEPLOYMENT/version.json public/contract/version.json
|
||||
COPY contract/deployment/$DEPLOYMENT/broadcast public/contract/broadcast
|
||||
COPY contract/deployment/$DEPLOYMENT/out public/contract/out
|
||||
COPY backend/conf/metadata-$CONFIG.json* public/metadata.json
|
||||
COPY backend/conf/$CONFIG/metadata-$CONFIG.json* public/metadata.json
|
||||
COPY metadata-$CONFIG.json* public/metadata.json
|
||||
RUN npm run build
|
||||
RUN apk del npm
|
||||
RUN cp -r dist /app
|
||||
RUN apk del yarn npm
|
||||
|
||||
WORKDIR /app
|
||||
RUN rm -rf /dexorder
|
||||
|
||||
# this is for static files being served by web
|
||||
RUN sed -i '1idaemon off;' /etc/nginx/nginx.conf
|
||||
COPY deploy/web-nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
CMD ["nginx"]
|
||||
|
||||
@@ -1,15 +1,3 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: arb1
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: retained-resizable
|
||||
resources:
|
||||
requests:
|
||||
storage: 600Gi
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
@@ -34,11 +22,10 @@ spec:
|
||||
value: 'true'
|
||||
containers:
|
||||
- name: arb1
|
||||
image: offchainlabs/nitro-node:v2.3.2-064fa11
|
||||
image: offchainlabs/nitro-node:v3.2.1-d81324d
|
||||
command: [
|
||||
'--init.url=https://snapshot.arbitrum.foundation/arb1/nitro-archive.tar',
|
||||
'--parent-chain.connection.url=https://eth-mainnet.g.alchemy.com/v2/XiM8sruAHL_FKkhVdxqsOw2ardZFMZxX',
|
||||
'--parent-chain.blob-client.beacon-url=<TODO>' # TODO
|
||||
'--init.latest=archive',
|
||||
'--parent-chain.connection.url=https://eth-mainnet.g.alchemy.com/v2/Ns3gyYixF9sHKT54_ZOrzXzdG5GOqUSS',
|
||||
]
|
||||
volumeMounts:
|
||||
- name: arb1
|
||||
@@ -47,7 +34,12 @@ spec:
|
||||
requests:
|
||||
cpu: '3'
|
||||
memory: '12GB'
|
||||
volumes:
|
||||
- name: arb1
|
||||
persistentVolumeClaim:
|
||||
claimName: arb1
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: arb1
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: retained-resizable
|
||||
resources:
|
||||
requests:
|
||||
storage: 600Gi
|
||||
@@ -14,14 +14,38 @@ spec:
|
||||
labels:
|
||||
app: backend
|
||||
spec:
|
||||
# terminationGracePeriodSeconds: 1
|
||||
# terminationGracePeriodSeconds: 120
|
||||
containers:
|
||||
- name: backend
|
||||
image: dexorder/backend
|
||||
# command: ['sleep','infinity']
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: secret
|
||||
mountPath: /dexorder/backend/.secret.toml
|
||||
subPath: .secret.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/dexorder.toml
|
||||
subPath: dexorder.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/logging.toml
|
||||
subPath: logging.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/metadata.json
|
||||
subPath: metadata.json
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 500M
|
||||
memory: 2000M
|
||||
volumes:
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: backend-secret
|
||||
- name: conf
|
||||
configMap:
|
||||
name: backend
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: ohlc-backfill
|
||||
name: backfill
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: retained-resizable
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
storage: 200Gi
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
@@ -16,8 +16,8 @@ kind: StatefulSet
|
||||
metadata:
|
||||
name: backfill
|
||||
spec:
|
||||
replicas: 1
|
||||
serviceName: backfill
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: backfill
|
||||
@@ -30,25 +30,44 @@ spec:
|
||||
containers:
|
||||
- name: backfill
|
||||
image: dexorder/backend
|
||||
command: [ "python", "-u", "-m", "dexorder.bin.finaldata",
|
||||
# "rpc_url=https://arb-mainnet.g.alchemy.com/v2/icB000_hg01vv7A1z3DhU59dhDLHioby", # tim@dexorder.trade
|
||||
"rpc_url=https://arb-mainnet.g.alchemy.com/v2/xy5dDYdWkgrowhdngeop5pXLNsg0V6t3", # timolsoncrypto@gmail.com
|
||||
command: [
|
||||
# since backfill uses the backend image and finaldata toml, we override some settings here
|
||||
"python", "-m", "dexorder.bin.finaldata", # do NOT use -u unbuffered mode!
|
||||
"backfill=1", # In the beginning...
|
||||
"walker_stop=192499170",
|
||||
"concurrent_rpc_connections=9999",
|
||||
"walker_name=backfill", # use a separate cursor
|
||||
'walker_flush_interval=60',
|
||||
'walker_name=backfill',
|
||||
"ohlc_dir=/ohlc",
|
||||
'concurrent_rpc_connections=4',
|
||||
"ws_url=",
|
||||
]
|
||||
volumeMounts:
|
||||
- name: ohlc-backfill
|
||||
- name: secret
|
||||
mountPath: /dexorder/backend/.secret.toml
|
||||
subPath: .secret.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/dexorder.toml
|
||||
subPath: dexorder.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/logging.toml
|
||||
subPath: logging.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/metadata.json
|
||||
subPath: metadata.json
|
||||
readOnly: true
|
||||
- name: ohlc
|
||||
mountPath: /ohlc
|
||||
resources:
|
||||
requests:
|
||||
cpu: '.1'
|
||||
memory: 200M
|
||||
cpu: 200m
|
||||
memory: 500M
|
||||
volumes:
|
||||
- name: ohlc-backfill
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: backend-secret
|
||||
- name: conf
|
||||
configMap:
|
||||
name: backend-finaldata
|
||||
- name: ohlc
|
||||
persistentVolumeClaim:
|
||||
claimName: ohlc-backfill
|
||||
claimName: backfill
|
||||
|
||||
73
deploy/k8s/certificate.yaml
Normal file
73
deploy/k8s/certificate.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: dexorder-trade
|
||||
spec:
|
||||
secretName: dexorder-trade-tls
|
||||
commonName: dexorder.trade
|
||||
dnsNames:
|
||||
- dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: beta-dexorder-trade
|
||||
spec:
|
||||
secretName: beta-dexorder-trade-tls
|
||||
commonName: beta.dexorder.trade
|
||||
dnsNames:
|
||||
- beta.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ws-beta-dexorder-trade-tls
|
||||
spec:
|
||||
secretName: ws-beta-dexorder-trade-tls
|
||||
commonName: ws.beta.dexorder.trade
|
||||
dnsNames:
|
||||
- ws.beta.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: rpc-eth-beau-dexorder-trade
|
||||
namespace: crypto-node
|
||||
spec:
|
||||
secretName: rpc-eth-beau-dexorder-trade-tls
|
||||
commonName: rpc.eth.beau.dexorder.trade
|
||||
dnsNames:
|
||||
- rpc.eth.beau.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ws-eth-beau-dexorder-trade
|
||||
namespace: crypto-node
|
||||
spec:
|
||||
secretName: ws-eth-beau-dexorder-trade-tls
|
||||
commonName: ws.eth.beau.dexorder.trade
|
||||
dnsNames:
|
||||
- ws.eth.beau.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
257
deploy/k8s/dev.yaml
Normal file
257
deploy/k8s/dev.yaml
Normal file
@@ -0,0 +1,257 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: dev
|
||||
labels:
|
||||
app: dev
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dev
|
||||
serviceName: "dev"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dev
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- name: dev
|
||||
image: dexorder/dev
|
||||
env:
|
||||
- name: PGDATA
|
||||
value: '/postgres/data'
|
||||
- name: FORK_URL
|
||||
value: 'https://arb-mainnet.g.alchemy.com/v2/KiEyekrM14csrTLzopxq4rzaAPI1RZoR'
|
||||
ports:
|
||||
- containerPort: 6545
|
||||
name: rpc
|
||||
- containerPort: 3001
|
||||
name: server
|
||||
- containerPort: 3000
|
||||
name: web
|
||||
volumeMounts:
|
||||
- name: anvil-data
|
||||
mountPath: /root/.foundry
|
||||
- name: postgres-data
|
||||
mountPath: /postgres
|
||||
- name: secret
|
||||
mountPath: /dexorder/backend/.secret.toml
|
||||
subPath: .secret.toml
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: backend-secret-dev
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: anvil-data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: standard
|
||||
resources:
|
||||
requests:
|
||||
storage: 200Gi
|
||||
- metadata:
|
||||
name: postgres-data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: standard
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: reboot-dev
|
||||
spec:
|
||||
schedule: "0 2 * * *" # This cron expression represents midnight every day
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: bitnami/kubectl:latest
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- kubectl delete pod -l app=dev
|
||||
restartPolicy: OnFailure
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rpc-dev
|
||||
labels:
|
||||
app: dev
|
||||
spec:
|
||||
selector:
|
||||
app: dev
|
||||
ports:
|
||||
- name: rpc
|
||||
port: 8545
|
||||
protocol: TCP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: server-dev
|
||||
labels:
|
||||
app: server-dev
|
||||
spec:
|
||||
type: NodePort # we use NodePort because it's a long-lived websockets connection and we dont want cluster routing
|
||||
selector:
|
||||
app: dev
|
||||
ports:
|
||||
- name: server-dev
|
||||
port: 3001
|
||||
protocol: TCP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: web-dev
|
||||
labels:
|
||||
app: web-dev
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: dev
|
||||
ports:
|
||||
- name: web-dev
|
||||
port: 3000
|
||||
targetPort: 3000
|
||||
protocol: TCP
|
||||
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rpc-dev
|
||||
annotations:
|
||||
# websocket connection timeouts
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "86400"
|
||||
nginx.ingress.kubernetes.io/proxy-write-timeout: "86400"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- rpc.dev.dexorder.trade
|
||||
secretName: rpc-dev-dexorder-trade-tls
|
||||
rules:
|
||||
- host: rpc.dev.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: rpc-dev
|
||||
port:
|
||||
number: 8545
|
||||
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: server-dev
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/cors-allow-origin: "*"
|
||||
# websocket connection timeouts
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "86400"
|
||||
nginx.ingress.kubernetes.io/proxy-write-timeout: "86400"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- ws.dev.dexorder.trade
|
||||
secretName: ws-dev-dexorder-trade-tls
|
||||
rules:
|
||||
- host: ws.dev.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: server-dev
|
||||
port:
|
||||
number: 3001
|
||||
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: web-dev
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- secretName: dev-dexorder-trade-tls
|
||||
hosts:
|
||||
- dev.dexorder.trade
|
||||
rules:
|
||||
- host: dev.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: web-dev
|
||||
port:
|
||||
number: 3000
|
||||
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: dev-dexorder-trade
|
||||
spec:
|
||||
secretName: dev-dexorder-trade-tls
|
||||
commonName: dev.dexorder.trade
|
||||
dnsNames:
|
||||
- dev.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ws-dev-dexorder-trade-tls
|
||||
spec:
|
||||
secretName: ws-dev-dexorder-trade-tls
|
||||
commonName: ws.dev.dexorder.trade
|
||||
dnsNames:
|
||||
- ws.dev.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: rpc-dev-dexorder-trade-tls
|
||||
spec:
|
||||
secretName: rpc-dev-dexorder-trade-tls
|
||||
commonName: rpc.dev.dexorder.trade
|
||||
dnsNames:
|
||||
- rpc.dev.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
248
deploy/k8s/ethereum.yaml
Normal file
248
deploy/k8s/ethereum.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: jwt
|
||||
namespace: crypto-node
|
||||
data:
|
||||
# openssl rand -hex 32 | tr -d "\n" | base64
|
||||
jwt.hex: 'NTlhMTYxNmUyMjhhNjM1MDg0NGZiMzM5ZWU1NTliMzkxYmMzNmIxNWVmY2Q4NjY5NDdlNzc1NDRhZDE0OTk4Zg=='
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: ethereum
|
||||
namespace: crypto-node
|
||||
spec:
|
||||
replicas: 1
|
||||
serviceName: ethereum
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ethereum
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ethereum
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 120
|
||||
containers:
|
||||
- name: lighthouse
|
||||
image: sigp/lighthouse
|
||||
command: [
|
||||
'lighthouse', 'beacon_node',
|
||||
'--datadir', '/lighthouse',
|
||||
'--network', 'mainnet',
|
||||
'--http',
|
||||
'--http-address', '0.0.0.0',
|
||||
'--execution-endpoint', 'http://reth:8551',
|
||||
'--execution-jwt', '/jwt/jwt.hex',
|
||||
'--checkpoint-sync-url', 'https://mainnet.checkpoint.sigp.io',
|
||||
'--checkpoint-sync-url-timeout', '300',
|
||||
'--disable-deposit-contract-sync', # since we are not staking
|
||||
'--historic-state-cache-size', '4', # hierarchical snap-diff cache
|
||||
]
|
||||
volumeMounts:
|
||||
- name: lighthouse
|
||||
mountPath: /lighthouse
|
||||
- name: jwt
|
||||
mountPath: /jwt
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 5052
|
||||
# peering ports (if exposed)
|
||||
- containerPort: 9000
|
||||
protocol: TCP
|
||||
- containerPort: 9000
|
||||
protocol: UDP
|
||||
- containerPort: 9001
|
||||
protocol: UDP
|
||||
resources:
|
||||
requests:
|
||||
cpu: '1'
|
||||
memory: '2G'
|
||||
- name: reth
|
||||
image: ghcr.io/paradigmxyz/reth
|
||||
command: [
|
||||
'reth', 'node',
|
||||
'--datadir', '/reth',
|
||||
'--authrpc.jwtsecret', '/jwt/jwt.hex',
|
||||
'--authrpc.addr', '0.0.0.0',
|
||||
'--authrpc.port', '8551',
|
||||
'--http',
|
||||
'--http.addr', '0.0.0.0',
|
||||
# '--http.api', 'eth,net,trace',
|
||||
'--http.api', 'eth',
|
||||
'--ws',
|
||||
]
|
||||
volumeMounts:
|
||||
- name: reth
|
||||
mountPath: /reth
|
||||
- name: jwt
|
||||
mountPath: /jwt
|
||||
readOnly: true
|
||||
ports:
|
||||
# peering ports (if exposed)
|
||||
- containerPort: 30303
|
||||
protocol: TCP
|
||||
- containerPort: 30303
|
||||
protocol: UDP
|
||||
# execution port
|
||||
- containerPort: 8551
|
||||
protocol: TCP
|
||||
name: execution
|
||||
# rpc port
|
||||
- containerPort: 8545
|
||||
protocol: TCP
|
||||
name: rpc
|
||||
# ws port
|
||||
- containerPort: 8546
|
||||
protocol: TCP
|
||||
name: ws
|
||||
resources:
|
||||
requests:
|
||||
cpu: '2'
|
||||
memory: '10G'
|
||||
volumes:
|
||||
- name: jwt
|
||||
secret:
|
||||
secretName: jwt
|
||||
- name: lighthouse
|
||||
persistentVolumeClaim:
|
||||
claimName: lighthouse
|
||||
- name: reth
|
||||
persistentVolumeClaim:
|
||||
claimName: reth
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: lighthouse
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
volumeName: lighthouse
|
||||
storageClassName: bulk-a
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Ti
|
||||
- metadata:
|
||||
name: reth
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
volumeName: reth
|
||||
storageClassName: bulk-a
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Ti
|
||||
|
||||
#---
|
||||
#apiVersion: v1
|
||||
#kind: Service
|
||||
#metadata:
|
||||
# name: lighthouse-peers
|
||||
# namespace: crypto-node
|
||||
#spec:
|
||||
# selector:
|
||||
# app: ethereum
|
||||
# type: NodePort
|
||||
# ports:
|
||||
# - protocol: TCP
|
||||
# port: 9000
|
||||
# name: peer1
|
||||
# - protocol: UDP
|
||||
# port: 9000
|
||||
# name: peer2
|
||||
# - protocol: UDP
|
||||
# port: 9001
|
||||
# name: peer3
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: reth
|
||||
namespace: crypto-node
|
||||
spec:
|
||||
selector:
|
||||
app: ethereum
|
||||
ports:
|
||||
# - protocol: TCP
|
||||
# port: 30303
|
||||
# name: peer1
|
||||
# - protocol: UDP
|
||||
# port: 30303
|
||||
# name: peer2
|
||||
# expose these ports for the lighthouse container
|
||||
- protocol: TCP
|
||||
port: 8551
|
||||
name: execution
|
||||
- protocol: TCP
|
||||
port: 8546
|
||||
name: ws
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: eth-rpc
|
||||
namespace: crypto-node
|
||||
spec:
|
||||
selector:
|
||||
app: ethereum
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8545
|
||||
name: rpc
|
||||
- protocol: TCP
|
||||
port: 8546
|
||||
name: ws
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eth-rpc
|
||||
namespace: crypto-node
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- rpc.eth.beau.dexorder.trade
|
||||
secretName: rpc-eth-beau-dexorder-trade-tls
|
||||
rules:
|
||||
- host: rpc.eth.beau.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: eth-rpc
|
||||
port:
|
||||
number: 8545
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eth-ws
|
||||
namespace: crypto-node
|
||||
annotations:
|
||||
# websocket connection timeouts
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "86400"
|
||||
nginx.ingress.kubernetes.io/proxy-write-timeout: "86400"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- ws.eth.beau.dexorder.trade
|
||||
secretName: ws-eth-beau-dexorder-trade-tls
|
||||
rules:
|
||||
- host: ws.eth.beau.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: eth-rpc
|
||||
port:
|
||||
number: 8546
|
||||
@@ -6,18 +6,18 @@ metadata:
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: retained-resizable
|
||||
volumeName: ohlc
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
|
||||
storage: 200Gi
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: finaldata
|
||||
spec:
|
||||
replicas: 1
|
||||
serviceName: finaldata
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: finaldata
|
||||
@@ -31,20 +31,36 @@ spec:
|
||||
- name: finaldata
|
||||
image: dexorder/backend
|
||||
command: [
|
||||
"python", "-u", "-m", "dexorder.bin.finaldata",
|
||||
"ohlc_dir=/ohlc",
|
||||
"rpc_url=https://arb-mainnet.g.alchemy.com/v2/fneXR05VTXzNS6ApcPd-QuyX-gv7AWzL",
|
||||
"ws_url=",
|
||||
"backfill=-345600", # about one day
|
||||
'walker_flush_interval=30',
|
||||
'concurrent_rpc_connections=6',
|
||||
# since finaldata uses the backend image we override many settings here
|
||||
"python", "-m", "dexorder.bin.finaldata", # do NOT use -u unbuffered mode!
|
||||
# "backfill=1", # In the beginning...
|
||||
"backfill=-345600", # about one day at quarter-second blocktimes
|
||||
# "backfill=99174110", # one block before the creation of UNIv3:WETH/USDC pool 0xC6962004f452bE9203591991D15f6b388e09E8D0
|
||||
"concurrent_rpc_connections=9999",
|
||||
'walker_flush_interval=25',
|
||||
]
|
||||
volumeMounts:
|
||||
- name: secret
|
||||
mountPath: /dexorder/backend/.secret.toml
|
||||
subPath: .secret.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/dexorder.toml
|
||||
subPath: dexorder.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/logging.toml
|
||||
subPath: logging.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/metadata.json
|
||||
subPath: metadata.json
|
||||
readOnly: true
|
||||
- name: ohlc
|
||||
mountPath: /ohlc
|
||||
resources:
|
||||
requests:
|
||||
cpu: 600m
|
||||
cpu: 100m
|
||||
memory: 500M
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -56,6 +72,12 @@ spec:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
volumes:
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: backend-secret
|
||||
- name: conf
|
||||
configMap:
|
||||
name: backend-finaldata
|
||||
- name: ohlc
|
||||
persistentVolumeClaim:
|
||||
claimName: ohlc
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: hardhat
|
||||
image: gcr.io/cointrader-211623/hardhat
|
||||
image: northamerica-northeast2-docker.pkg.dev/dexorder-430504/hardhat
|
||||
imagePullPolicy: Always
|
||||
command: ['npx','hardhat','node','--hostname','0.0.0.0']
|
||||
ports:
|
||||
|
||||
@@ -6,11 +6,11 @@ metadata:
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- secretName: alpha-dexorder-trade-tls
|
||||
- secretName: beta-dexorder-trade-tls
|
||||
hosts:
|
||||
- alpha.dexorder.trade
|
||||
- beta.dexorder.trade
|
||||
rules:
|
||||
- host: alpha.dexorder.trade
|
||||
- host: beta.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
@@ -27,7 +27,7 @@ kind: Ingress
|
||||
metadata:
|
||||
name: corp
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/temporal-redirect: 'https://alpha.dexorder.trade/home'
|
||||
nginx.ingress.kubernetes.io/temporal-redirect: 'https://beta.dexorder.trade/home'
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
@@ -61,11 +61,11 @@ metadata:
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- secretName: alpha-dexorder-trade-tls
|
||||
- secretName: beta-dexorder-trade-tls
|
||||
hosts:
|
||||
- alpha.dexorder.trade
|
||||
- beta.dexorder.trade
|
||||
rules:
|
||||
- host: alpha.dexorder.trade
|
||||
- host: beta.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /ohlc
|
||||
@@ -83,7 +83,7 @@ metadata:
|
||||
name: server
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/cors-allow-origin: "https://alpha.dexorder.trade"
|
||||
nginx.ingress.kubernetes.io/cors-allow-origin: "https://beta.dexorder.trade"
|
||||
# websocket connection timeouts
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "86400"
|
||||
nginx.ingress.kubernetes.io/proxy-write-timeout: "86400"
|
||||
@@ -91,10 +91,10 @@ spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- ws.alpha.dexorder.trade
|
||||
secretName: ws-alpha-dexorder-trade-tls
|
||||
- ws.beta.dexorder.trade
|
||||
secretName: ws-beta-dexorder-trade-tls
|
||||
rules:
|
||||
- host: ws.alpha.dexorder.trade
|
||||
- host: ws.beta.dexorder.trade
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
@@ -104,48 +104,3 @@ spec:
|
||||
name: server
|
||||
port:
|
||||
number: 3001
|
||||
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: dexorder-trade
|
||||
spec:
|
||||
secretName: dexorder-trade-tls
|
||||
commonName: dexorder.trade
|
||||
dnsNames:
|
||||
- dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: alpha-dexorder-trade
|
||||
spec:
|
||||
secretName: alpha-dexorder-trade-tls
|
||||
commonName: alpha.dexorder.trade
|
||||
dnsNames:
|
||||
- alpha.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ws-alpha-dexorder-trade-tls
|
||||
spec:
|
||||
secretName: ws-alpha-dexorder-trade-tls
|
||||
commonName: ws.alpha.dexorder.trade
|
||||
dnsNames:
|
||||
- ws.alpha.dexorder.trade
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
|
||||
|
||||
40
deploy/k8s/init-cluster-gke.yaml
Normal file
40
deploy/k8s/init-cluster-gke.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: retained-resizable
|
||||
provisioner: kubernetes.io/gce-pd
|
||||
parameters:
|
||||
type: pd-standard
|
||||
fstype: ext4
|
||||
reclaimPolicy: Retain
|
||||
allowVolumeExpansion: true
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: fast-retained-resizable
|
||||
provisioner: kubernetes.io/gce-pd
|
||||
parameters:
|
||||
type: pd-ssd
|
||||
fstype: ext4
|
||||
# replication-type: none
|
||||
reclaimPolicy: Retain
|
||||
allowVolumeExpansion: true
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
email: tim@dexorder.trade
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-privkey-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
ingressClassName: nginx
|
||||
43
deploy/k8s/init-cluster-talos.yaml
Normal file
43
deploy/k8s/init-cluster-talos.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
email: tim@dexorder.trade
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-privkey-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
ingressClassName: nginx
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: lighthouse
|
||||
spec:
|
||||
storageClassName: bulk-a
|
||||
capacity:
|
||||
storage: 10Ti
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: /var/mnt/bulk/a/lighthouse
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: reth
|
||||
spec:
|
||||
storageClassName: bulk-a
|
||||
capacity:
|
||||
storage: 10Ti
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: /var/mnt/bulk/a/reth
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
email: tim@dexorder.trade
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-privkey-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
@@ -19,11 +19,35 @@ spec:
|
||||
- name: mirrorprice
|
||||
image: dexorder/backend
|
||||
command: ['python', '-u', '-m', 'dexorder.bin.mirror',
|
||||
'mirror_source_rpc_url=https://arb-mainnet.g.alchemy.com/v2/RLdQUy5UQFawOzeriyXXx_CQNa5qmS_8',
|
||||
'metadata=metadata.json',
|
||||
'polling=1',
|
||||
'account=admin',
|
||||
'polling=50',
|
||||
]
|
||||
resources:
|
||||
requests:
|
||||
cpu: 20m
|
||||
memory: 150MiB
|
||||
memory: 150M
|
||||
volumeMounts:
|
||||
- name: secret
|
||||
mountPath: /dexorder/backend/.secret.toml
|
||||
subPath: .secret.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/dexorder.toml
|
||||
subPath: dexorder.toml
|
||||
readOnly: true
|
||||
- name: conf
|
||||
mountPath: /dexorder/backend/logging.toml
|
||||
subPath: logging.toml
|
||||
readOnly: true
|
||||
# NOTE: we do NOT mount metadata.json because it is GENERATED by the mirrorenv process.
|
||||
# - name: conf
|
||||
# mountPath: /dexorder/backend/metadata.json
|
||||
# subPath: metadata.json
|
||||
# readOnly: true
|
||||
volumes:
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: backend-secret
|
||||
- name: conf
|
||||
configMap:
|
||||
name: backend-mirrorprice # special mirrorprice config
|
||||
|
||||
@@ -8,7 +8,7 @@ spec:
|
||||
storageClassName: retained-resizable
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storage: 100Gi
|
||||
|
||||
|
||||
---
|
||||
@@ -32,14 +32,10 @@ spec:
|
||||
- name: postgres
|
||||
image: postgres
|
||||
env:
|
||||
- name: POSTGRES_DB
|
||||
value: dexorder
|
||||
- name: POSTGRES_USER
|
||||
value: dexorder
|
||||
- name: POSTGRES_PASSWORD
|
||||
value: redroxed
|
||||
- name: PGDATA
|
||||
value: /var/lib/postgresql/data/pgdata
|
||||
- name: POSTGRES_PASSWORD
|
||||
value: 'iXAtYgLGNzgqvaCfNhG8'
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
name: postgres
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: redis
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
serviceName: "redis"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -15,30 +16,14 @@ spec:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: docker.dragonflydb.io/dragonflydb/dragonfly:latest
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
selector:
|
||||
app: redis
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 6379
|
||||
name: redis
|
||||
- name: redis
|
||||
image: docker.dragonflydb.io/dragonflydb/dragonfly:latest
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: '1'
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
|
||||
1196
deploy/talos/ingress-nginx.helm.yaml
Normal file
1196
deploy/talos/ingress-nginx.helm.yaml
Normal file
File diff suppressed because it is too large
Load Diff
61
deploy/talos/local-static-provisioner.helm.yaml
Normal file
61
deploy/talos/local-static-provisioner.helm.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
classes:
|
||||
- name: bulk-a # Defines name of storage classes.
|
||||
hostDir: /var/mnt/bulk/a
|
||||
mountDir: /var/mnt/bulk/a
|
||||
volumeMode: Filesystem
|
||||
fsType: ext4
|
||||
namePattern: "*"
|
||||
allowedTopologies:
|
||||
blockCleanerCommand:
|
||||
# Do a quick reset of the block device during its cleanup.
|
||||
- "/scripts/quick_reset.sh"
|
||||
# or use dd to zero out block dev in two iterations by uncommenting these lines
|
||||
# - "/scripts/dd_zero.sh"
|
||||
# - "2"
|
||||
# or run shred utility for 2 iteration.s
|
||||
# - "/scripts/shred.sh"
|
||||
# - "2"
|
||||
# or blkdiscard utility by uncommenting the line below.
|
||||
# - "/scripts/blkdiscard.sh"
|
||||
storageClass:
|
||||
reclaimPolicy: Retain
|
||||
- name: bulk-b # Defines name of storage classes.
|
||||
hostDir: /var/mnt/bulk/b
|
||||
mountDir: /var/mnt/bulk/b
|
||||
volumeMode: Filesystem
|
||||
fsType: ext4
|
||||
namePattern: "*"
|
||||
allowedTopologies:
|
||||
blockCleanerCommand:
|
||||
# Do a quick reset of the block device during its cleanup.
|
||||
- "/scripts/quick_reset.sh"
|
||||
# or use dd to zero out block dev in two iterations by uncommenting these lines
|
||||
# - "/scripts/dd_zero.sh"
|
||||
# - "2"
|
||||
# or run shred utility for 2 iteration.s
|
||||
# - "/scripts/shred.sh"
|
||||
# - "2"
|
||||
# or blkdiscard utility by uncommenting the line below.
|
||||
# - "/scripts/blkdiscard.sh"
|
||||
storageClass:
|
||||
reclaimPolicy: Retain
|
||||
- name: fast-a # Defines name of storage classes.
|
||||
hostDir: /var/mnt/fast/a
|
||||
mountDir: /var/mnt/fast/a
|
||||
volumeMode: Filesystem
|
||||
fsType: ext4
|
||||
namePattern: "*"
|
||||
allowedTopologies:
|
||||
blockCleanerCommand:
|
||||
# Do a quick reset of the block device during its cleanup.
|
||||
- "/scripts/quick_reset.sh"
|
||||
# or use dd to zero out block dev in two iterations by uncommenting these lines
|
||||
# - "/scripts/dd_zero.sh"
|
||||
# - "2"
|
||||
# or run shred utility for 2 iteration.s
|
||||
# - "/scripts/shred.sh"
|
||||
# - "2"
|
||||
# or blkdiscard utility by uncommenting the line below.
|
||||
# - "/scripts/blkdiscard.sh"
|
||||
storageClass:
|
||||
reclaimPolicy: Retain
|
||||
@@ -19,7 +19,8 @@ server {
|
||||
# for all routes matching a dot, check for files and return 404 if not found
|
||||
# e.g. /file.js returns a 404 if not found
|
||||
location ~ \.(?!html) {
|
||||
add_header Cache-Control "public, max-age=2678400";
|
||||
# todo increase cache timeout for production
|
||||
add_header Cache-Control "public, max-age=300";
|
||||
try_files $uri =404;
|
||||
}
|
||||
}
|
||||
}
|
||||
13
doc/accounts.md
Normal file
13
doc/accounts.md
Normal file
@@ -0,0 +1,13 @@
|
||||
|
||||
## Accounts
|
||||
|
||||
The primary top-level account is 0x12DB90820DAFed100E40E21128E40Dcd4fF6B331
|
||||
|
||||
|
||||
| Name | Address | Description |
|
||||
|---------|--------------------------------------------|----------------------------------------------------------------------------|
|
||||
| admin | | deploys contracts, is upgrader, runs mirrorprice, funded from from faucets |
|
||||
| vaulter | | deploys vaults |
|
||||
| place | | receives order placement fees |
|
||||
| gas | | receives gas fees |
|
||||
| fill | | receives fill fees |
|
||||
15
doc/alpha-deploy.md
Normal file
15
doc/alpha-deploy.md
Normal file
@@ -0,0 +1,15 @@
|
||||
1. `cd deploy/k8s`
|
||||
2. `kubectl apply -f postgres.yaml -f redis.yaml -f anvil.yaml`
|
||||
3. `cd ../../contract` to the contracts project folder
|
||||
4. `RPC_URL=https://rpc.alpha.dexorder.trade ./bin/deploy alpha mirror` This uses dev account #0 to deploy the init contracts to the alpha rpc url, and also the mockenv contract. The broadcast files and all compiler output files are labeled and organized with the tag `alpha`, and copied into the `contract/deployment/` directory. The Dockerfiles will copy this tagged deployment directory into subproject containers to provide the contract ABI's and bytecode along with the init contract deployment addresses.
|
||||
5. `cd ..` back to top-level dexorder. The init contracts should be running on anvil now
|
||||
6. `RPC_URL=https://rpc.alpha.dexorder.trade TAG=alpha ./bin/build_version_json`
|
||||
7. check-in the new deployment files created under contract/deployment/alpha
|
||||
8. `./bin/mirrorprice-deploy`
|
||||
* This builds the backend without metadata yet
|
||||
9. `kubectl cp mirrorprice-<tab>:metadata.json metadata-alpha.json`
|
||||
* This gets the metadata file from the mirror, so all the pools and tokens have their mirrored addresses. Subsequent builds using the "alpha" config will put this metadata file into ./web/public/metadata.json (see Dockerfiles)
|
||||
10. check-in the new metadata file
|
||||
11. `./bin/deploy backend alpha` no trailing slash: backend is a tag not the directory name
|
||||
12. `./bin/deploy server alpha`
|
||||
13. `./bin/deploy web alpha`
|
||||
52
doc/arb1-deploy.md
Normal file
52
doc/arb1-deploy.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Arbitrum-Sepolia Deployment
|
||||
|
||||
## Contracts
|
||||
|
||||
| Contract | Address |
|
||||
|----------------|--------------------------------------------|
|
||||
| ArbitrumRouter | 0x3EE915a112Dc3E8c6f0bd6406f9b06D67249d77E |
|
||||
| Dexorder | 0x93bc17B48289093f763893647f98ECBB0ba02a2c |
|
||||
| FeeManager | 0x30414cb83dC6C0C5908bBEC936903300D46bCd11 |
|
||||
| QueryHelper | 0xfFfA495fFCFa45806AEAAC20Db92a8a83D5aF7C4 |
|
||||
| VaultFactory | 0xA9DEe3537EE42Cb4717C017a07e9C20c43B1EBEd |
|
||||
| VaultImpl | 0x8416b8B4738Cd1C3a1B4027694337F92E45CF9F0 |
|
||||
|
||||
|
||||
### Vault Init Code Hash
|
||||
0xda672cdca096de00f3fed8150430564c059a59ad30cb2c824902097e25cd8b3a
|
||||
|
||||
|
||||
## Accounts
|
||||
|
||||
| Index | Name | Address | Description |
|
||||
|------:|----------|--------------------------------------------|-----------------------------------------------|
|
||||
| 0 | admin | 0x12DB90820DAFed100E40E21128E40Dcd4fF6B331 | deploys contracts, is upgrader |
|
||||
| 1 | | 0x0E280F5eDA58872d7cDaA8AC0A57A55fD6133AEd | reserved |
|
||||
| 2 | place | 0x078E0C1112262433375b9aaa987BfF09a08e863C | receives order placement fees |
|
||||
| 3 | gas | 0x411c418C005EBDefB551e5E6B734520Ef2591f51 | receives gas fees |
|
||||
| 4 | fill | 0x152a3a04cE063dC77497aA06b6A09FeFD271E716 | receives fill fees |
|
||||
| | server | 0x6b9478ED1db265E0c4290be219e07A8080CB46Ac | server less-secure account for vault creation |
|
||||
| | backend | 0x28DCbf568EE9a022aBa84cA2da2958C61834d8d1 | backend less-secure account for executions |
|
||||
| | adjuster | 0x822e57D95444f9Bf373827950068A877F7C7F5FC | less-secure account for adjusting fees |
|
||||
|
||||
|
||||
## Deployment Steps
|
||||
|
||||
Use the top-level (dexorder) directory as working directory for every step.
|
||||
1. `kubectl apply -f deploy/k8s/postgres.yaml -f deploy/k8s/redis.yaml`
|
||||
2. `bin/arb1/finaldata-deploy`
|
||||
3. `bin/arb1/init-deploy broadcast` (leave off `broadcast` to test first)
|
||||
1. Record deployed contract addresses
|
||||
2. Create a `version.json` file under `contract/deploy/arb1/version.json`
|
||||
3. Commit everything under the `contract/deployment/arb1` directory
|
||||
4. Set up the API key, accounts, etc. in `backend/conf/arb1/.secret.toml` and `backend/conf/arb1/dexorder-arb1.toml`
|
||||
5. `cd backend && bin/secret-push arb1 && bin/config-push arb1`
|
||||
6. `bin/build backend arb1 && bin/deploy backend arb1`
|
||||
7. Create a `metadata.json` file with approved tokens and pools by:
|
||||
1. In the database's `token` table, set the `approved` flag to `true` for all coins that may be searched by symbol or name
|
||||
2. run the Python module `dexorder.bin.generate_metadata`
|
||||
8. Set up the API key, accounts, etc. in `server/.env-arb1`
|
||||
9. `cd server && bin/secret-push arb1`
|
||||
10. `bin/build server arb1 && bin/deploy server arb1`
|
||||
11. There are no secrets in `web/.env-arb1` so it is checked into git and part of the Docker build. Just run
|
||||
`bin/build web arb1 && bin/deploy web arb1`
|
||||
46
doc/arbsep-deploy.md
Normal file
46
doc/arbsep-deploy.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Arbitrum-Sepolia Deployment
|
||||
|
||||
## Contracts
|
||||
|
||||
| Contract | Address | Cost |
|
||||
|-----------------------|---------------------------------------------|-----------------------------------------------|
|
||||
| ArbitrumSepoliaRouter | 0xa710C9Cd85f5Ee1CbbCEaF64639CA9eA8E0bA759 | 0.0002141139 ETH (2141139 gas * 0.1 gwei) |
|
||||
| Dexorder | 0x1A35F07B853Dc09f055557Ee42009278C8CD760A | 0.0000789669 ETH ( 789669 gas * 0.1 gwei) |
|
||||
| FeeManager | 0x75de78a8C86a230cFf1AE68651bFFD7980146C6e | 0.0001375895 ETH (1375895 gas * 0.1 gwei) |
|
||||
| QueryHelper | 0x5bEf639213C6273b61c5C527A836Cc69cD05e781 | 0.0001012220 ETH (1012220 gas * 0.1 gwei) |
|
||||
| MirrorEnv | 0xb94a5C44B88711BEF40576BE72B4806BE0bf779d | 0.0003396941 ETH (3396941 gas * 0.1 gwei) |
|
||||
| VaultFactory | 0x3b4E0FDcA40bA1404b844Fca08D4A52A0c66b4F8 | 0.0001909797 ETH (1909797 gas * 0.1 gwei) |
|
||||
| VaultLogic | 0x99f46FD7ea420AC610D3bD9f3FbDAa62DCD091dA | 0.0005441150 ETH (5441150 gas * 0.1 gwei) |
|
||||
| --------------------- | ------------------------------------------- | --------------------------------------------- |
|
||||
| Total | | 0.001266987 ETH (12669870 gas * avg 0.1 gwei) |
|
||||
|
||||
### Vault Init Code Hash
|
||||
0x41c1f10139eb6d0494f6eb4fd36d54608d5c7c2a38dabaf1115e4b7085f74f7a
|
||||
|
||||
|
||||
## Accounts
|
||||
|
||||
| Name | Address | Description |
|
||||
|-------|----------------------------------------------|----------------------------------------------------------------------------|
|
||||
| admin | `0xE1918Fc43C3a9c4854463e86Af854BE7034Fab56` | deploys contracts, is upgrader, runs mirrorprice, funded from from faucets |
|
||||
| place | `0xE54C49D1DEE0097DdBe218f3C5A2d1d197fD8AB5` | receives order placement fees |
|
||||
| gas | `0x872844E1B289d62a3809d91fF2008A1DB1406441` | receives gas fees |
|
||||
| fill | `0xe3FaD2aFc0d6F89F16BCa73D92c3174a49885ED7` | receives fill fees |
|
||||
|
||||
|
||||
## Deployment Steps
|
||||
|
||||
1. `kubectl apply -f deploy/k8s/postgres.yaml -f deploy/k8s/redis.yaml`
|
||||
2. load private keys into environment variables with `. ./bin/arbsep/load-keys`
|
||||
3. run `bin/arbsep/mirrorenv-deploy broadcast` to deploy the MirrorEnv
|
||||
4. check in the deployment file created under `contract/deployment/arbsep`
|
||||
5. `bin/deploy backend arbsep arbsep mirrorprice`
|
||||
6. copy the metadata file produced by the mirrorprice process to `backend/conf/arbsep/metadata-arbsep.json`
|
||||
7. deploy the initial contracts
|
||||
`bin/arbsep/init-deploy broadcast`
|
||||
8. create a `version.json` file describing the deployed contract addresses
|
||||
`bin/arbsep/build_version_json`
|
||||
9. Check in the deployment files and `version.json` created under `contract/deployment/arbsep`
|
||||
10. `bin/deploy backend arbsep`
|
||||
11. `bin/deploy server arbsep`
|
||||
12. `bin/deploy web arbsep`
|
||||
|
Before Width: | Height: | Size: 113 KiB After Width: | Height: | Size: 113 KiB |
40
doc/design.md
Normal file
40
doc/design.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# General Design
|
||||
|
||||
## Vault
|
||||
Creating a separate contract for each user address allows users to deposit coins into their "account" using standard
|
||||
ERC20 sends without extra approvals. Withdrawals require a contract call, but again no approval step. Furthermore,
|
||||
this clarifies the no-custody nature of the setup, since DexOrder never has any claim to ownership of the user's
|
||||
contract. Of course this costs extra gas up front to create the contract for the user, but on L2's it should be
|
||||
minimal. What about ETH? Hmmm... The alternative is to have a single contract which keeps an accounting of everyone's
|
||||
everything. Deposits would require approvals and a contract call. Using separate vaults will be an easier, more secure
|
||||
experience for frequent traders who are more likely to be our users rather than casual, occasional traders.
|
||||
|
||||
## Orders
|
||||
Orders are defined using an in-token, an out-token, a route/pool, and an amount of either input or output currency.
|
||||
Each order may have many tranches, and each tranch may have its own price/time constraints. Each tranche tracks its
|
||||
own filled amount in addition to incrementing the global filled counter. OCO support. Conditional orders (stoploss)
|
||||
are implemented as a separate order which starts with amount 0 but receives any filled amounts from orders that
|
||||
reference it.
|
||||
|
||||
## Reorg Resilience
|
||||
Blockchain reorganizations happen, so we need a system that can revert recent activity, restore to a previous state,
|
||||
and replay a new set of truths. We need to track the current set of open orders with their computable pool
|
||||
dependencies, and we need to ensure that sent/mined transactions are not lost after a reorg but retried or safely
|
||||
discarded.
|
||||
|
||||
We run a single synchronous process which generates a batch of all the transactions and event logs, tagged
|
||||
with the block height, hash, and parent, plus a dexorder global sequence number. All subsequent processing is done
|
||||
within that block context, reading the state from the parent block and writing the state for the new block. State
|
||||
is kept entirely in-memory as a base state from an old block plus diffs organized in a tree of blockchain branches.
|
||||
All getters of a state object return the base value plus any diffs found by walking the current worker context's
|
||||
blockchain path up through its parent block. All setters write update requests to the current block hash, including
|
||||
its global seq. After all layers of jobs have provably completed, the actions are executed in seq order, and thte block
|
||||
may be marked as completed. As blocks age out, orphans/uncles are simply dropped from memory while old main-chain blocks
|
||||
become new root blocks for the in-memory diff tree.
|
||||
|
||||
Active orders with an opportunity to trade are poked whenever they appear on any fork.
|
||||
|
||||
Transactions generated by jobs carry their original block context. If a sent tx has an origin block that gets too old,
|
||||
the tx will be retried if the origin block is main chain but not if the origin block was a reorged fork. Whenever we
|
||||
get a transaction receipt, the receipt is added to the order. As the order ages out, transaction receipts are
|
||||
re-checked for validity
|
||||
13
doc/new-cluster.md
Normal file
13
doc/new-cluster.md
Normal file
@@ -0,0 +1,13 @@
|
||||
1. Create k8s cluster
|
||||
2. [Install ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/)
|
||||
* for Talos, I deployed nginx as a daemonset using hostPort mode to bind 80/443. This also required changes to the
|
||||
* `helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace --values ingress-nginx.helm.yaml`
|
||||
* local storage https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner/blob/master/helm/README.md#install-local-volume-provisioner-with-helm
|
||||
3. [Install cert-manager](https://cert-manager.io/docs/installation/kubectl/)
|
||||
4. `cd deploy/k8s`
|
||||
5. `kubectl apply -f init-cluster.yaml -f postgres.yaml -f redis.yaml -f ingress.yaml`
|
||||
6. Create the database by port-forwarding via k8s and running `alembic upgrade head`
|
||||
7. Run `create_roles.sql` and create users and passwords
|
||||
8. Point the DNS to the IP of the ingress load balancer named `ingress-nginx-controller`
|
||||
9. Create regional Artifact Registry
|
||||
1. `gcloud auth configure-docker <region>-docker.pkg.dev`
|
||||
27
doc/ohlc.md
Normal file
27
doc/ohlc.md
Normal file
@@ -0,0 +1,27 @@
|
||||
OHLC files are produced by the backend's finaldata.py process, which is a Walker, meaning it only processes blocks
|
||||
which have been finalized. Walkers additionally keep their block cursor in the `keyvalue` db table so they can exactly
|
||||
resume any interrupted process.
|
||||
|
||||
In this context, we may assume:
|
||||
* A single process is writing to each file
|
||||
* We may hold write locks open
|
||||
* If any data is found in a file upon startup, it's good up to the date of the cursor block.
|
||||
* Any row in a file with a date beyond the cursor is from a previous broken run and is removed.
|
||||
|
||||
OHLC files are a row-based format with newlines `\n` separating rows. Each row represents one OHLC interval
|
||||
Rows have a variable number of columns, separated by commas `,`
|
||||
If an interval had only one trade, the row will have only two columns: `time, price` where time is the start time
|
||||
of the OHLC interval and price is the only price recorded.
|
||||
When a second trade happens during the interval, a closing price is added: `time, open, close`
|
||||
A third trade is required to reach the final format of: `time, open, high, low, close`
|
||||
|
||||
Rows are ordered by time but intervals may be skipped if no trade occured during that interval. Such intervals are
|
||||
simply missing from the data file and clients are expected to forward-fill from the last known data. Every file will
|
||||
have at least one row at the beginning to announce the opening price upon entering the interval.
|
||||
This sparse format allows the many infrequently traded pools to have correspondingly small files.
|
||||
|
||||
|
||||
"Quote Files" are kept in the top-level directory of each symbol as `quote.csv`. This file contains a single row with
|
||||
three columns: init_time, time, price. The init_time is the time of the first data in the series, then time and price
|
||||
are the latest quote. If time is not recent, then the symbol has not traded recently. There is no active update of
|
||||
any data unless there is a trade. Inactive symbols simply have quotes with older times for their last trades.
|
||||
16
doc/solidity.md
Normal file
16
doc/solidity.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Solidity Primer
|
||||
|
||||
* memory model has three locations: `storage`, `memory`, and `calldata`
|
||||
* `storage` is per-contract private data, the members of the contract, stored on-chain. it's very gassy to read/write to storage.
|
||||
* `memory` is transient read/write memory whose lifetime ends after the transaction completes or reverts. far less gassy than storage.
|
||||
* `calldata` is a small read-only area for function arguments. you should rarely if ever need to use this keyword. reading from calldata takes the least gas of all.
|
||||
* word size is 256 bits. int and uint types are available for every 8-bit interval from `uint8`, `uint16`, `uint24`, ..., `uint256`. do not use the bare `uint` even though it's a legal alias for 256.
|
||||
* similarly to `uint?` types, there are value types `bytes1`, `bytes2`, ..., `bytes32` which is 256 bits. `bytes` by itself is an alias for the dynamic array `byte[]`
|
||||
* arrays have three types: dynamic storage array, dynamic memory array, and static array.
|
||||
* all arrays in storage (contract members) start as 0-length and may only be extended by `contractArray.push(item)` one at a time. remove with `pop()`. a storage array referenced inside a function as `Foo[] storage myArray = contractArray;` results in a reference to the storage area.
|
||||
* dynamic memory arrays `Foo[] memory myArray = new Foo[](length);` only this c++ style allocation is available for dynamic memory arrays, and the length must be known at creation time. you must then set each member of the array separately, in a loop.
|
||||
* static arrays `Foo[4] memory myArray = [foo1, foo2, foo3, foo4];` the length of the array is part of the type information, and it is not possible to cast a dynamic array to a static one or vice-versa.
|
||||
* functions have two different types: views and transactions.
|
||||
* a view is read-only and may be completed instantly off-chain. its return values are immediately available to whatever client invoked the call.
|
||||
* transactions make changes to chain data. the return values from a transaction function are not written to chain, but they are immediately useable by other code that calls into that function from the same transaction.
|
||||
* Events are the way to publish queryable on-chain results. they are declared types and the `emit` keyword is used to create an event log record on-chain.
|
||||
@@ -21,7 +21,7 @@ Note `Makefile` to perform these installation and execution instructions.
|
||||
[Fix sudo required:](https://docs.docker.com/engine/install/linux-postinstall/)
|
||||
`sudo groupadd docker`
|
||||
`sudo usermod -aG docker $USER`
|
||||
* ~~alternatively we can use containerd and update the backend/bin/df.sh script.~~
|
||||
* ~~alternatively we can use containerd and update the backend/bin/df script.~~
|
||||
4. Install NodeJS
|
||||
1. Suggested to use Node Version Manager `nvm`.
|
||||
1. `curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash`
|
||||
@@ -90,7 +90,7 @@ Note `Makefile` to perform these installation and execution instructions.
|
||||
|
||||
1. run web (port 3000)
|
||||
1. `cd web && npm run dev`
|
||||
2. `./bin/mock.sh` (from top level dexorder directory)
|
||||
2. `./bin/mock` (from top level dexorder directory)
|
||||
1. builds contracts
|
||||
2. starts anvil, forking arbitrum_ankr (we should change this to something like arbitrum_anvil so we can each set it to whatever we want )
|
||||
3. runs `forge script Deploy.sol` and broadcasts init contracts onto the private anvil fork
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
server
2
server
Submodule server updated: 4769d1c6fc...8835ad5272
2
web
2
web
Submodule web updated: 780eff5c04...a9bf23ddbb
Reference in New Issue
Block a user