deployed 0.1

This commit is contained in:
2026-03-04 00:56:08 -04:00
parent bf7af2b426
commit 185fa42caa
14 changed files with 371 additions and 27 deletions

View File

@@ -0,0 +1,38 @@
FROM python:3.14-alpine
# Set working directory
WORKDIR /app
# Copy requirements first for better caching
COPY backend/requirements.txt /app/requirements.txt
# Install TA-Lib C library and build dependencies, then install Python dependencies and clean up
RUN apk add --no-cache --virtual .build-deps \
gcc \
g++ \
make \
musl-dev \
wget \
tar \
cargo \
rust \
&& wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz \
&& tar -xzf ta-lib-0.4.0-src.tar.gz \
&& cd ta-lib/ \
&& ./configure --prefix=/usr \
&& make \
&& make install \
&& cd .. \
&& rm -rf ta-lib ta-lib-0.4.0-src.tar.gz \
&& pip install --no-cache-dir -r requirements.txt \
&& apk del .build-deps \
&& rm -rf /var/cache/apk/* /root/.cache /root/.cargo /root/.rustup
# Copy application code
COPY backend/src /app/src
# Expose port
EXPOSE 8000
# Run the application
CMD ["python", "-m", "uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@@ -1,27 +1,65 @@
FROM python:3.14-alpine
FROM python:3.12-slim
# Install TA-Lib C library and build dependencies
RUN apk add --no-cache --virtual .build-deps \
gcc \
g++ \
make \
musl-dev \
wget \
&& apk add --no-cache \
ta-lib \
&& rm -rf /var/cache/apk/*
ARG CONFIG=production
# Install TA-Lib C library early for better layer caching
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
gcc \
g++ \
make \
wget \
ca-certificates \
&& wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz \
&& tar -xzf ta-lib-0.4.0-src.tar.gz \
&& cd ta-lib/ \
&& ./configure --prefix=/usr \
&& make \
&& make install \
&& cd .. \
&& rm -rf ta-lib ta-lib-0.4.0-src.tar.gz \
&& apt-get purge -y --auto-remove gcc g++ make wget ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Install Python build dependencies early for better layer caching
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
gcc \
g++ \
cargo \
rustc
# Install compiled packages - separate layer so requirements.txt changes don't trigger recompilation
COPY backend/requirements-pre.txt /app/requirements-pre.txt
RUN --mount=type=cache,target=/root/.cache/pip \
--mount=type=cache,target=/root/.cargo \
pip install --no-cache-dir -r /app/requirements-pre.txt \
&& apt-get purge -y --auto-remove gcc g++ cargo rustc \
&& rm -rf /var/lib/apt/lists/* /root/.rustup /tmp/*
# Set working directory
WORKDIR /app
# Copy requirements first for better caching
# Copy and install remaining requirements
COPY backend/requirements.txt /app/requirements.txt
# Install Python dependencies
# Install Python dependencies and clean up
RUN pip install --no-cache-dir -r requirements.txt
# Clean up build dependencies
RUN apk del .build-deps
# Copy application code
COPY backend/src /app/src
COPY backend/config*.yaml /tmp/
RUN if [ -f /tmp/config-${CONFIG}.yaml ]; then \
cp /tmp/config-${CONFIG}.yaml /app/config.yaml; \
else \
cp /tmp/config.yaml /app/config.yaml; \
fi && rm -rf /tmp/config*.yaml
# Add src to PYTHONPATH for correct module resolution
ENV PYTHONPATH=/app/src
# Expose port
EXPOSE 8000
# Run the application
CMD ["python", "-m", "uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@@ -0,0 +1,19 @@
FROM node:20-alpine
# Set working directory
WORKDIR /app
# Copy package files first for better caching
COPY web/package*.json /app/
# Install dependencies
RUN npm install
# Copy application code
COPY web /app/
# Expose port
EXPOSE 5173
# Run dev server (for development/debug)
CMD ["npm", "run", "dev", "--", "--host", "0.0.0.0"]

48
deploy/backend.yaml Normal file
View File

@@ -0,0 +1,48 @@
---
apiVersion: v1
kind: Service
metadata:
name: ai-backend
spec:
selector:
app: ai-backend
ports:
- protocol: TCP
port: 8000
targetPort: 8000
type: ClusterIP
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ai-backend
spec:
serviceName: ai-backend
replicas: 1
selector:
matchLabels:
app: ai-backend
template:
metadata:
labels:
app: ai-backend
spec:
containers:
- name: ai-backend
image: dexorder/ai-backend
ports:
- containerPort: 8000
env:
- name: CONFIG
value: "dev"
volumeMounts:
- name: ai-backend-data
mountPath: /app/data
volumeClaimTemplates:
- metadata:
name: ai-backend-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

38
deploy/ingress.yaml Normal file
View File

@@ -0,0 +1,38 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ai-ingress
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
tls:
- hosts:
- dexorder.ai
secretName: dexorder-ai-tls
rules:
- host: dexorder.ai
http:
paths:
- path: /charting_library
pathType: Prefix
backend:
service:
name: ai-web
port:
number: 5173
- path: /cryptochimp
pathType: Prefix
backend:
service:
name: ai-web
port:
number: 5173
- path: /ws
pathType: Prefix
backend:
service:
name: ai-backend
port:
number: 8000

38
deploy/web.yaml Normal file
View File

@@ -0,0 +1,38 @@
---
apiVersion: v1
kind: Service
metadata:
name: ai-web
spec:
selector:
app: ai-web
ports:
- protocol: TCP
port: 5173
targetPort: 5173
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ai-web
spec:
replicas: 1
selector:
matchLabels:
app: ai-web
template:
metadata:
labels:
app: ai-web
spec:
containers:
- name: ai-web
image: dexorder/ai-web
ports:
- containerPort: 5173
env:
- name: VITE_BASE_PATH
value: "/cryptochimp/"
- name: VITE_WS_URL
value: "wss://dexorder.ai/ws"