698 lines
17 KiB
Markdown
698 lines
17 KiB
Markdown
# VoxBlog Production Deployment Guide
|
|
|
|
## Overview
|
|
|
|
Complete CI/CD pipeline for deploying VoxBlog to your VPS with Gitea using Docker and Gitea Actions (similar to GitHub Actions).
|
|
|
|
## Architecture
|
|
|
|
```
|
|
┌─────────────────────────────────────────────────────────┐
|
|
│ Your VPS Server │
|
|
│ │
|
|
│ ┌────────────┐ ┌──────────────┐ ┌─────────────┐ │
|
|
│ │ Gitea │ │ Gitea Runner │ │ Docker │ │
|
|
│ │ Repository │→ │ (CI/CD) │→ │ Containers │ │
|
|
│ └────────────┘ └──────────────┘ └─────────────┘ │
|
|
│ ↓ │
|
|
│ ┌────────────────────────┐ │
|
|
│ │ voxblog-api:3301 │ │
|
|
│ │ voxblog-admin:3300 │ │
|
|
│ │ mysql:3306 │ │
|
|
│ └────────────────────────┘ │
|
|
└─────────────────────────────────────────────────────────┘
|
|
```
|
|
|
|
## Project Structure
|
|
|
|
```
|
|
voxblog/
|
|
├── apps/
|
|
│ ├── api/ # Backend (Express + TypeScript)
|
|
│ └── admin/ # Frontend (React + Vite)
|
|
├── packages/
|
|
│ └── config-ts/
|
|
├── .gitea/
|
|
│ └── workflows/
|
|
│ └── deploy.yml
|
|
├── docker/
|
|
│ ├── api.Dockerfile
|
|
│ ├── admin.Dockerfile
|
|
│ └── nginx.conf
|
|
├── docker-compose.yml
|
|
└── deploy.sh
|
|
```
|
|
|
|
## Step 1: Create Dockerfiles
|
|
|
|
### API Dockerfile
|
|
```dockerfile
|
|
# docker/api.Dockerfile
|
|
FROM node:18-alpine AS builder
|
|
|
|
WORKDIR /app
|
|
|
|
# Copy workspace files
|
|
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml ./
|
|
COPY apps/api/package.json ./apps/api/
|
|
COPY packages/config-ts/package.json ./packages/config-ts/
|
|
|
|
# Install pnpm
|
|
RUN npm install -g pnpm
|
|
|
|
# Install dependencies
|
|
RUN pnpm install --frozen-lockfile
|
|
|
|
# Copy source
|
|
COPY apps/api ./apps/api
|
|
COPY packages/config-ts ./packages/config-ts
|
|
|
|
# Build
|
|
WORKDIR /app/apps/api
|
|
RUN pnpm run build || echo "No build script, using ts-node"
|
|
|
|
# Production image
|
|
FROM node:18-alpine
|
|
|
|
WORKDIR /app
|
|
|
|
# Install pnpm
|
|
RUN npm install -g pnpm
|
|
|
|
# Copy package files
|
|
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml ./
|
|
COPY apps/api/package.json ./apps/api/
|
|
COPY packages/config-ts/package.json ./packages/config-ts/
|
|
|
|
# Install production dependencies only
|
|
RUN pnpm install --frozen-lockfile --prod
|
|
|
|
# Copy built app
|
|
COPY --from=builder /app/apps/api ./apps/api
|
|
COPY --from=builder /app/packages/config-ts ./packages/config-ts
|
|
|
|
WORKDIR /app/apps/api
|
|
|
|
EXPOSE 3301
|
|
|
|
CMD ["pnpm", "run", "dev"]
|
|
```
|
|
|
|
### Admin Dockerfile
|
|
```dockerfile
|
|
# docker/admin.Dockerfile
|
|
FROM node:18-alpine AS builder
|
|
|
|
WORKDIR /app
|
|
|
|
# Copy workspace files
|
|
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml ./
|
|
COPY apps/admin/package.json ./apps/admin/
|
|
COPY packages/config-ts/package.json ./packages/config-ts/
|
|
|
|
# Install pnpm
|
|
RUN npm install -g pnpm
|
|
|
|
# Install dependencies
|
|
RUN pnpm install --frozen-lockfile
|
|
|
|
# Copy source
|
|
COPY apps/admin ./apps/admin
|
|
COPY packages/config-ts ./packages/config-ts
|
|
|
|
# Build
|
|
WORKDIR /app/apps/admin
|
|
RUN pnpm run build
|
|
|
|
# Production image with nginx
|
|
FROM nginx:alpine
|
|
|
|
# Copy built files
|
|
COPY --from=builder /app/apps/admin/dist /usr/share/nginx/html
|
|
|
|
# Copy nginx config
|
|
COPY docker/nginx.conf /etc/nginx/conf.d/default.conf
|
|
|
|
EXPOSE 80
|
|
|
|
CMD ["nginx", "-g", "daemon off;"]
|
|
```
|
|
|
|
### Nginx Config
|
|
```nginx
|
|
# docker/nginx.conf
|
|
server {
|
|
listen 80;
|
|
server_name _;
|
|
root /usr/share/nginx/html;
|
|
index index.html;
|
|
|
|
# Gzip compression
|
|
gzip on;
|
|
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
|
|
|
# SPA routing - all routes go to index.html
|
|
location / {
|
|
try_files $uri $uri/ /index.html;
|
|
}
|
|
|
|
# Cache static assets
|
|
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
|
|
expires 1y;
|
|
add_header Cache-Control "public, immutable";
|
|
}
|
|
|
|
# Security headers
|
|
add_header X-Frame-Options "SAMEORIGIN" always;
|
|
add_header X-Content-Type-Options "nosniff" always;
|
|
add_header X-XSS-Protection "1; mode=block" always;
|
|
}
|
|
```
|
|
|
|
## Step 2: Docker Compose
|
|
|
|
```yaml
|
|
# docker-compose.yml
|
|
version: '3.8'
|
|
|
|
services:
|
|
mysql:
|
|
image: mysql:8.0
|
|
container_name: voxblog-mysql
|
|
restart: unless-stopped
|
|
environment:
|
|
MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASSWORD}
|
|
MYSQL_DATABASE: ${DB_NAME:-voxblog}
|
|
MYSQL_USER: ${DB_USER:-voxblog}
|
|
MYSQL_PASSWORD: ${DB_PASSWORD}
|
|
volumes:
|
|
- mysql_data:/var/lib/mysql
|
|
networks:
|
|
- voxblog-network
|
|
healthcheck:
|
|
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
|
|
api:
|
|
build:
|
|
context: .
|
|
dockerfile: docker/api.Dockerfile
|
|
container_name: voxblog-api
|
|
restart: unless-stopped
|
|
ports:
|
|
- "3301:3301"
|
|
environment:
|
|
NODE_ENV: production
|
|
PORT: 3301
|
|
DB_HOST: ${DB_HOST:-mysql}
|
|
DB_PORT: ${DB_PORT:-3306}
|
|
DB_USER: ${DB_USER:-voxblog}
|
|
DB_PASSWORD: ${DB_PASSWORD}
|
|
DB_NAME: ${DB_NAME:-voxblog}
|
|
ADMIN_PASSWORD: ${ADMIN_PASSWORD}
|
|
OPENAI_API_KEY: ${OPENAI_API_KEY}
|
|
GHOST_ADMIN_API_KEY: ${GHOST_ADMIN_API_KEY}
|
|
GHOST_ADMIN_API_URL: ${GHOST_ADMIN_API_URL}
|
|
S3_BUCKET: ${S3_BUCKET}
|
|
S3_REGION: ${S3_REGION}
|
|
S3_ACCESS_KEY: ${S3_ACCESS_KEY}
|
|
S3_SECRET_KEY: ${S3_SECRET_KEY}
|
|
S3_ENDPOINT: ${S3_ENDPOINT}
|
|
depends_on:
|
|
mysql:
|
|
condition: service_healthy
|
|
networks:
|
|
- voxblog-network
|
|
volumes:
|
|
- ./data:/app/data
|
|
|
|
admin:
|
|
build:
|
|
context: .
|
|
dockerfile: docker/admin.Dockerfile
|
|
args:
|
|
VITE_API_URL: ${VITE_API_URL:-http://localhost:3301}
|
|
container_name: voxblog-admin
|
|
restart: unless-stopped
|
|
ports:
|
|
- "3300:80"
|
|
networks:
|
|
- voxblog-network
|
|
depends_on:
|
|
- api
|
|
|
|
networks:
|
|
voxblog-network:
|
|
driver: bridge
|
|
|
|
volumes:
|
|
mysql_data:
|
|
```
|
|
|
|
## Step 3: Gitea Actions Workflow
|
|
|
|
```yaml
|
|
# .gitea/workflows/deploy.yml
|
|
name: Deploy to Production
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- main
|
|
|
|
jobs:
|
|
deploy:
|
|
runs-on: ubuntu-latest
|
|
env:
|
|
COMPOSE_PROJECT_NAME: voxblog
|
|
INFISICAL_TOKEN: ${{ secrets.INFISICAL_TOKEN }}
|
|
INFISICAL_SITE_URL: ${{ secrets.INFISICAL_SITE_URL }}
|
|
INFISICAL_CLI_IMAGE: infisical/cli:latest
|
|
|
|
steps:
|
|
- name: Checkout code
|
|
uses: actions/checkout@v3
|
|
|
|
- name: Create placeholder .env
|
|
run: touch .env
|
|
|
|
- name: Set up Docker Buildx
|
|
uses: docker/setup-buildx-action@v2
|
|
|
|
- name: Load secrets from Infisical
|
|
shell: bash
|
|
run: |
|
|
set -euo pipefail
|
|
|
|
if [ -z "${INFISICAL_TOKEN}" ]; then
|
|
echo "INFISICAL_TOKEN is not configured"
|
|
exit 1
|
|
fi
|
|
|
|
CLI_IMAGE="${INFISICAL_CLI_IMAGE:-infisical/cli:latest}"
|
|
docker pull "$CLI_IMAGE" >/dev/null
|
|
|
|
tmp_file=$(mktemp)
|
|
if [ -n "${INFISICAL_API_URL:-}" ]; then
|
|
docker run --rm \
|
|
-e INFISICAL_TOKEN="$INFISICAL_TOKEN" \
|
|
${INFISICAL_SITE_URL:+-e INFISICAL_SITE_URL="$INFISICAL_SITE_URL"} \
|
|
-e INFISICAL_API_URL="$INFISICAL_API_URL" \
|
|
"$CLI_IMAGE" export --format=dotenv > "$tmp_file"
|
|
elif [ -n "${INFISICAL_SITE_URL:-}" ]; then
|
|
api_url="${INFISICAL_SITE_URL%/}/api"
|
|
docker run --rm \
|
|
-e INFISICAL_TOKEN="$INFISICAL_TOKEN" \
|
|
-e INFISICAL_SITE_URL="$INFISICAL_SITE_URL" \
|
|
-e INFISICAL_API_URL="$api_url" \
|
|
"$CLI_IMAGE" export --format=dotenv > "$tmp_file"
|
|
else
|
|
docker run --rm \
|
|
-e INFISICAL_TOKEN="$INFISICAL_TOKEN" \
|
|
"$CLI_IMAGE" export --format=dotenv > "$tmp_file"
|
|
fi
|
|
|
|
: > .env
|
|
|
|
while IFS= read -r line || [ -n "$line" ]; do
|
|
if [ -z "$line" ] || [[ "$line" == \#* ]]; then
|
|
continue
|
|
fi
|
|
key="${line%%=*}"
|
|
value="${line#*=}"
|
|
value="${value%$'\r'}"
|
|
if [[ "$value" == "\""* && "$value" == *"\"" ]]; then
|
|
value="${value:1:-1}"
|
|
elif [[ "$value" == "'"* && "$value" == *"'" ]]; then
|
|
value="${value:1:-1}"
|
|
fi
|
|
|
|
echo "::add-mask::$value"
|
|
printf '%s=%s\n' "$key" "$value" >> "$GITHUB_ENV"
|
|
printf '%s=%s\n' "$key" "$value" >> .env
|
|
done < "$tmp_file"
|
|
|
|
rm -f "$tmp_file"
|
|
|
|
- name: Build and deploy
|
|
run: |
|
|
docker-compose down
|
|
docker-compose build --no-cache
|
|
docker-compose up -d
|
|
|
|
- name: Run database migrations
|
|
run: |
|
|
docker-compose exec -T api pnpm run drizzle:migrate
|
|
|
|
- name: Health check
|
|
run: |
|
|
sleep 10
|
|
curl -f http://localhost:3301/api/health || exit 1
|
|
curl -f http://localhost:3300 || exit 1
|
|
|
|
- name: Clean up old images
|
|
run: |
|
|
docker image prune -af --filter "until=24h"
|
|
```
|
|
|
|
## Step 4: Deployment Script (Alternative to Gitea Actions)
|
|
|
|
If Gitea Actions is not available, you can still trigger deployments by SSH, cron, or a webhook that calls `deploy.sh`. The script now:
|
|
|
|
- Prefers `INFISICAL_TOKEN` and (optionally) `INFISICAL_SITE_URL` to pull secrets from Infisical via the official CLI container.
|
|
- Falls back to a local `.env` file only when no token is exported (for development/testing).
|
|
- Runs the same build → up → migrate → health-check flow afterwards.
|
|
|
|
Before running it manually or from a webhook, export the token:
|
|
|
|
```bash
|
|
export INFISICAL_TOKEN=st.your_service_token
|
|
export INFISICAL_SITE_URL=https://secrets.yourdomain.com # optional
|
|
./deploy.sh
|
|
```
|
|
|
|
Everything else inside the script remains unchanged; see [deploy.sh](deploy.sh) for details.
|
|
|
|
## Step 5: Gitea Webhook Setup
|
|
|
|
### Option A: Using Gitea Actions (Recommended)
|
|
|
|
1. **Install Gitea Runner on your VPS:**
|
|
```bash
|
|
# Download Gitea Runner
|
|
wget https://dl.gitea.com/act_runner/latest/act_runner-latest-linux-amd64
|
|
chmod +x act_runner-latest-linux-amd64
|
|
sudo mv act_runner-latest-linux-amd64 /usr/local/bin/act_runner
|
|
|
|
# Register runner
|
|
act_runner register --instance https://your-gitea-url --token YOUR_RUNNER_TOKEN
|
|
|
|
# Run as service
|
|
sudo tee /etc/systemd/system/gitea-runner.service > /dev/null <<EOF
|
|
[Unit]
|
|
Description=Gitea Actions Runner
|
|
After=network.target
|
|
|
|
[Service]
|
|
Type=simple
|
|
User=git
|
|
WorkingDirectory=/home/git
|
|
ExecStart=/usr/local/bin/act_runner daemon
|
|
Restart=always
|
|
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
EOF
|
|
|
|
sudo systemctl daemon-reload
|
|
sudo systemctl enable gitea-runner
|
|
sudo systemctl start gitea-runner
|
|
```
|
|
|
|
2. **Add secrets in Gitea:**
|
|
- Go to your repository → Settings → Secrets
|
|
- Add all environment variables as secrets
|
|
|
|
### Option B: Using Webhook + Script
|
|
|
|
1. **Create webhook endpoint:**
|
|
```bash
|
|
# Install webhook listener
|
|
sudo apt-get install webhook
|
|
|
|
# Create webhook config
|
|
sudo tee /etc/webhook.conf > /dev/null <<EOF
|
|
[
|
|
{
|
|
"id": "voxblog-deploy",
|
|
"execute-command": "/path/to/voxblog/deploy.sh",
|
|
"command-working-directory": "/path/to/voxblog",
|
|
"response-message": "Deployment started",
|
|
"trigger-rule": {
|
|
"match": {
|
|
"type": "payload-hash-sha256",
|
|
"secret": "YOUR_WEBHOOK_SECRET",
|
|
"parameter": {
|
|
"source": "header",
|
|
"name": "X-Gitea-Signature"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
]
|
|
EOF
|
|
|
|
# Run webhook as service
|
|
sudo tee /etc/systemd/system/webhook.service > /dev/null <<EOF
|
|
[Unit]
|
|
Description=Webhook Service
|
|
After=network.target
|
|
|
|
[Service]
|
|
Type=simple
|
|
ExecStart=/usr/bin/webhook -hooks /etc/webhook.conf -verbose
|
|
Restart=always
|
|
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
EOF
|
|
|
|
sudo systemctl daemon-reload
|
|
sudo systemctl enable webhook
|
|
sudo systemctl start webhook
|
|
```
|
|
|
|
2. **Configure Gitea webhook:**
|
|
- Repository → Settings → Webhooks → Add Webhook
|
|
- URL: `http://your-vps:9000/hooks/voxblog-deploy`
|
|
- Secret: `YOUR_WEBHOOK_SECRET`
|
|
- Trigger: Push events on main branch
|
|
|
|
## Step 6: Reverse Proxy (Nginx)
|
|
|
|
```nginx
|
|
# /etc/nginx/sites-available/voxblog
|
|
server {
|
|
listen 80;
|
|
server_name voxblog.yourdomain.com;
|
|
|
|
# Redirect to HTTPS
|
|
return 301 https://$server_name$request_uri;
|
|
}
|
|
|
|
server {
|
|
listen 443 ssl http2;
|
|
server_name voxblog.yourdomain.com;
|
|
|
|
ssl_certificate /etc/letsencrypt/live/voxblog.yourdomain.com/fullchain.pem;
|
|
ssl_certificate_key /etc/letsencrypt/live/voxblog.yourdomain.com/privkey.pem;
|
|
|
|
# Admin frontend
|
|
location / {
|
|
proxy_pass http://localhost:3300;
|
|
proxy_http_version 1.1;
|
|
proxy_set_header Upgrade $http_upgrade;
|
|
proxy_set_header Connection 'upgrade';
|
|
proxy_set_header Host $host;
|
|
proxy_cache_bypass $http_upgrade;
|
|
}
|
|
|
|
# API backend
|
|
location /api {
|
|
proxy_pass http://localhost:3301;
|
|
proxy_http_version 1.1;
|
|
proxy_set_header Upgrade $http_upgrade;
|
|
proxy_set_header Connection 'upgrade';
|
|
proxy_set_header Host $host;
|
|
proxy_cache_bypass $http_upgrade;
|
|
|
|
# Increase timeout for AI streaming
|
|
proxy_read_timeout 600s;
|
|
proxy_send_timeout 600s;
|
|
}
|
|
}
|
|
```
|
|
|
|
Enable site:
|
|
```bash
|
|
sudo ln -s /etc/nginx/sites-available/voxblog /etc/nginx/sites-enabled/
|
|
sudo nginx -t
|
|
sudo systemctl reload nginx
|
|
```
|
|
|
|
## Step 7: SSL Certificate
|
|
|
|
```bash
|
|
# Install certbot
|
|
sudo apt-get install certbot python3-certbot-nginx
|
|
|
|
# Get certificate
|
|
sudo certbot --nginx -d voxblog.yourdomain.com
|
|
|
|
# Auto-renewal is set up automatically
|
|
```
|
|
|
|
## Step 8: Environment Variables
|
|
|
|
Provision secrets in Infisical instead of maintaining a long-lived `.env` on disk:
|
|
|
|
1. Follow [INFISICAL_SETUP.md](INFISICAL_SETUP.md) to boot Infisical (already live at `https://secrets.pusula.blog`) and add VoxBlog secrets.
|
|
2. Create a production-scoped service token (scoped to the secret path you created).
|
|
3. Store the token securely; you'll export it whenever you deploy.
|
|
|
|
## Step 9: Initial Deployment
|
|
|
|
```bash
|
|
# Clone repository
|
|
cd /var/www # or your preferred location
|
|
git clone https://your-gitea-url/your-username/voxblog.git
|
|
cd voxblog
|
|
|
|
# (Optional) Prepare local .env for development only
|
|
# cp .env.example .env && nano .env
|
|
|
|
# Export Infisical token for production deployment
|
|
export INFISICAL_TOKEN=st.your_service_token
|
|
export INFISICAL_SITE_URL=https://secrets.pusula.blog
|
|
|
|
# Initial deployment
|
|
./deploy.sh
|
|
```
|
|
|
|
## Step 10: Monitoring & Logs
|
|
|
|
```bash
|
|
# View logs
|
|
docker-compose logs -f
|
|
|
|
# View specific service
|
|
docker-compose logs -f api
|
|
docker-compose logs -f admin
|
|
|
|
# Check status
|
|
docker-compose ps
|
|
|
|
# Restart services
|
|
docker-compose restart api
|
|
docker-compose restart admin
|
|
```
|
|
|
|
## Deployment Workflow
|
|
|
|
```
|
|
Developer pushes to main
|
|
↓
|
|
Gitea detects push
|
|
↓
|
|
Triggers Gitea Actions / Webhook
|
|
↓
|
|
Runs deploy.sh or workflow
|
|
↓
|
|
1. Pull latest code
|
|
2. Build Docker images
|
|
3. Stop old containers
|
|
4. Start new containers
|
|
5. Run migrations
|
|
6. Health check
|
|
7. Clean up
|
|
↓
|
|
Deployment complete! ✅
|
|
```
|
|
|
|
## Rollback Strategy
|
|
|
|
```bash
|
|
# View recent images
|
|
docker images | grep voxblog
|
|
|
|
# Rollback to previous version
|
|
docker tag voxblog-api:latest voxblog-api:backup
|
|
docker tag voxblog-api:previous voxblog-api:latest
|
|
docker-compose up -d
|
|
|
|
# Or use git
|
|
git log --oneline
|
|
git checkout <previous-commit-hash>
|
|
./deploy.sh
|
|
```
|
|
|
|
## Best Practices
|
|
|
|
1. **Always test locally first:**
|
|
```bash
|
|
docker-compose up --build
|
|
```
|
|
|
|
2. **Use health checks** in docker-compose.yml
|
|
|
|
3. **Backup database regularly:**
|
|
```bash
|
|
docker-compose exec mysql mysqldump -u voxblog -p voxblog > backup.sql
|
|
```
|
|
|
|
4. **Monitor disk space:**
|
|
```bash
|
|
docker system df
|
|
docker system prune -a
|
|
```
|
|
|
|
5. **Use secrets management** - keep secrets in Infisical, never commit `.env`
|
|
|
|
6. **Set up monitoring** (optional):
|
|
- Portainer for Docker management
|
|
- Grafana + Prometheus for metrics
|
|
- Uptime Kuma for uptime monitoring
|
|
|
|
## Troubleshooting
|
|
|
|
### Container won't start
|
|
```bash
|
|
docker-compose logs api
|
|
docker-compose exec api sh # Debug inside container
|
|
```
|
|
|
|
### Database connection issues
|
|
```bash
|
|
docker-compose exec mysql mysql -u voxblog -p
|
|
# Check if database exists
|
|
SHOW DATABASES;
|
|
```
|
|
|
|
### Port already in use
|
|
```bash
|
|
sudo lsof -i :3301
|
|
sudo kill -9 <PID>
|
|
```
|
|
|
|
### Out of disk space
|
|
```bash
|
|
docker system prune -a --volumes
|
|
```
|
|
|
|
## Security Checklist
|
|
|
|
- [ ] Infisical secrets configured with strong values
|
|
- [ ] Enable firewall (ufw)
|
|
- [ ] Keep Docker updated
|
|
- [ ] Use SSL/TLS (HTTPS)
|
|
- [ ] Limit SSH access
|
|
- [ ] Regular backups
|
|
- [ ] Monitor logs for suspicious activity
|
|
- [ ] Use Docker secrets for sensitive data (advanced)
|
|
|
|
## Next Steps
|
|
|
|
1. Create all Docker files
|
|
2. Set up Gitea Runner or webhook
|
|
3. Configure environment variables
|
|
4. Test deployment locally
|
|
5. Deploy to production
|
|
6. Set up monitoring
|
|
7. Configure backups
|
|
|
|
---
|
|
|
|
**Status**: Ready for production deployment! 🚀
|