diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..ec065fd4 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,25 @@ +# Keep secrets out of the Docker build context +docker/.env + +# Git metadata +.git +.gitignore + +# Python caches / build artifacts +__pycache__/ +*.py[cod] +*.pyo +*.egg-info/ +.pytest_cache/ +dist/ +build/ + +# Virtual environments (built fresh in the image) +hysteria2_venv/ +venv/ +.venv/ + +# Editor and OS files +.DS_Store +*.swp +*.swo diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..21f23af6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,65 @@ +# Stage 1: Build Go auth server +FROM golang:1.22-bookworm AS go-builder + +WORKDIR /build +COPY core/scripts/auth/user_auth.go . +RUN echo "precedence ::ffff:0:0/96 100" >> /etc/gai.conf && \ + go mod init hysteria_auth && go mod tidy && go build -o user_auth . + +# Stage 2: Runtime +FROM ubuntu:24.04 + +# TARGETARCH is set automatically by Docker (amd64, arm64, arm/v7, etc.) +ARG TARGETARCH + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y \ + python3 \ + python3-venv \ + curl \ + openssl \ + jq \ + lsof \ + supervisor \ + wireguard-tools \ + iptables \ + iproute2 \ + && rm -rf /var/lib/apt/lists/* + +# Install Hysteria2 binary from GitHub Releases using Docker TARGETARCH +RUN HY_VERSION=$(curl -fsSL "https://api.github.com/repos/apernet/hysteria/releases/latest" \ + | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"//;s/".*//') && \ + echo "Downloading hysteria ${HY_VERSION} for linux-${TARGETARCH}..." && \ + curl -fsSL -o /usr/local/bin/hysteria \ + "https://github.com/apernet/hysteria/releases/download/${HY_VERSION}/hysteria-linux-${TARGETARCH}" && \ + chmod +x /usr/local/bin/hysteria + +# Copy compiled Go auth server +COPY --chmod=755 --from=go-builder /build/user_auth /etc/hysteria/core/scripts/auth/user_auth + +# Copy application source +COPY . /etc/hysteria/ + +# Create Python venv, install dependencies, and expose packages to system python3 +RUN python3 -m venv /etc/hysteria/hysteria2_venv && \ + /etc/hysteria/hysteria2_venv/bin/pip install --no-cache-dir -r /etc/hysteria/requirements.txt && \ + find /etc/hysteria/hysteria2_venv/lib -name site-packages -type d \ + > /usr/lib/python3/dist-packages/hysteria-venv.pth + +# Download geo data at build time (same as bare-metal install.sh) +RUN curl -fsSL --max-time 30 -o /etc/hysteria/geosite.dat \ + "https://raw.githubusercontent.com/Chocolate4U/Iran-v2ray-rules/release/geosite.dat" && \ + curl -fsSL --max-time 30 -o /etc/hysteria/geoip.dat \ + "https://raw.githubusercontent.com/Chocolate4U/Iran-v2ray-rules/release/geoip.dat" + +# Preserve the repo config.json as a template (entrypoint symlinks config.json to volume) +RUN cp /etc/hysteria/config.json /etc/hysteria/config.json.template + +# Install systemctl shim so existing scripts work in Docker +COPY --chmod=755 docker/systemctl-shim.sh /usr/local/bin/systemctl + +# Copy entrypoint +COPY --chmod=755 docker/entrypoint.sh /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/README.md b/README.md index b7a4e772..448d5ac9 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,36 @@ There is no need to execute the installation command again. +## 🐳 Docker Installation (Local Build) + +> **Note:** This is a local build method for development and testing. A pre-built image on Docker Hub is planned for the future. + +### Prerequisites + +- [Docker](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) + +### Quick Start + +```bash +# 1. Clone the repository +git clone https://github.com/ReturnFI/Blitz.git +cd Blitz + +# 2. Create and configure your environment file +cp docker/.env.example docker/.env +nano docker/.env + +# 3. Build and start +docker compose up -d --build + +# 4. Get the admin password and web panel URL from the logs +docker logs blitz 2>&1 | grep -E "ADMIN PASSWORD|Access at" +``` + +For the full list of environment variables, architecture details, common operations, and troubleshooting, see [docker/README.md](docker/README.md). + +--- + ## 💎 Sponsorship & Support 💖 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..f6a383cc --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,25 @@ +services: + mongodb: + image: mongo:8.0 + container_name: mongodb + restart: unless-stopped + network_mode: "host" + volumes: + - mongodb_data:/data/db + + blitz: + build: . + container_name: blitz + restart: on-failure:5 + network_mode: "host" + cap_add: + - NET_ADMIN # required for WARP (wireguard) and IP limiting (iptables) + env_file: docker/.env + volumes: + - blitz_data:/blitz-data + depends_on: + - mongodb + +volumes: + blitz_data: + mongodb_data: diff --git a/docker/.env.example b/docker/.env.example new file mode 100644 index 00000000..afe5515a --- /dev/null +++ b/docker/.env.example @@ -0,0 +1,32 @@ +# Hysteria2 core +HYSTERIA_PORT=8080 +# Domain or IP for TLS cert + subscription URIs (auto-detected from ip.sb if blank) +HYSTERIA_SNI= +SERVER_IPV6= + +# OBFS Salamander — leave empty to disable +# Note: OBFS and MASQUERADE are mutually exclusive. If both are set, OBFS takes priority. +OBFS_PASSWORD= + +# Masquerade — disguise Hysteria2 as a regular website when probed +# Set to a URL (e.g. https://example.com) to proxy requests to that site +# Set to "string" to return a generic HTTP 502 response +# Leave empty to disable (default) +MASQUERADE_URL= + +# Web panel (always on) +WEBPANEL_PORT=2096 +ADMIN_USERNAME=admin +# If blank, a random password is generated and printed to logs on first start +ADMIN_PASSWORD= +WEBPANEL_EXPIRATION_MINUTES=1440 + +# NormalSub subscription service (optional) +NORMALSUB_ENABLED=false +NORMALSUB_PORT=2095 + +# Telegram bot (optional) +TELEGRAM_ENABLED=false +TELEGRAM_BOT_TOKEN= +TELEGRAM_ADMIN_USER_IDS= +TELEGRAM_BACKUP_INTERVAL_HOUR=6 diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..d6e94de8 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,135 @@ +# Blitz — Docker Deployment + +## Quick Start + +```bash +# 1. Copy and edit the environment file +cp docker/.env.example docker/.env +nano docker/.env + +# 2. Build and start +docker compose up -d --build + +# 3. Check logs for the admin password and web panel URL +docker logs blitz 2>&1 | grep -E "ADMIN PASSWORD|Access at" + +# 4. Check all services are running +docker exec blitz supervisorctl -c /tmp/supervisord.conf status +``` + +## Environment Variables + +Create `docker/.env` from `docker/.env.example` and configure the following: + +### Hysteria2 Core + +| Variable | Default | Description | +|----------|---------|-------------| +| `HYSTERIA_PORT` | `8080` | UDP port for the Hysteria2 server. Must be open in the firewall (the entrypoint opens it automatically via iptables). | +| `HYSTERIA_SNI` | *(auto-detected)* | Domain or IP used as the TLS certificate CN and in client URIs. If blank, the public IP is auto-detected via `ip.sb`. | +| `SERVER_IPV6` | *(empty)* | Server IPv6 address. Used in `.configs.env` for client URI generation. | + +### Traffic Obfuscation + +OBFS and Masquerade are **mutually exclusive**. If both are set, OBFS takes priority. + +| Variable | Default | Description | +|----------|---------|-------------| +| `OBFS_PASSWORD` | *(random)* | Salamander obfuscation password. If blank, a random 32-char password is generated. Set explicitly to share across reinstalls. | +| `MASQUERADE_URL` | *(empty)* | Disguise Hysteria2 when probed by non-clients. Set to a URL (e.g. `https://example.com`) to proxy requests to that site, or `string` to return a generic HTTP 502 response. **Only works when `OBFS_PASSWORD` is not set.** | + +### Web Panel + +| Variable | Default | Description | +|----------|---------|-------------| +| `WEBPANEL_PORT` | `2096` | TCP port for the web panel. Opened automatically in iptables. | +| `ADMIN_USERNAME` | `admin` | Web panel admin username. | +| `ADMIN_PASSWORD` | *(random)* | Web panel admin password. If blank, a random 16-char password is generated and printed to the container logs on first start. | +| `WEBPANEL_EXPIRATION_MINUTES` | `1440` | Admin session expiration time in minutes (1440 = 24 hours). | + +### NormalSub (Optional) + +| Variable | Default | Description | +|----------|---------|-------------| +| `NORMALSUB_ENABLED` | `false` | Set to `true` to enable the NormalSub subscription service. | +| `NORMALSUB_PORT` | `2095` | TCP port for the NormalSub service. | + +### Telegram Bot (Optional) + +| Variable | Default | Description | +|----------|---------|-------------| +| `TELEGRAM_ENABLED` | `false` | Set to `true` to enable the Telegram bot. | +| `TELEGRAM_BOT_TOKEN` | *(empty)* | Telegram Bot API token from @BotFather. Required if enabled. | +| `TELEGRAM_ADMIN_USER_IDS` | *(empty)* | Comma-separated Telegram user IDs with admin access (e.g. `123456,789012`). | +| `TELEGRAM_BACKUP_INTERVAL_HOUR` | `6` | Automatic backup interval in hours. | + +## Architecture + +The Docker deployment runs everything in **two containers**: + +| Container | Description | +|-----------|-------------| +| `mongodb` | MongoDB 8.0 database | +| `blitz` | All Blitz services managed by supervisord | + +Services inside the `blitz` container: + +| Service | Description | +|---------|-------------| +| `hysteria-server` | Hysteria2 proxy server | +| `hysteria-auth` | Go-based authentication server | +| `hysteria-scheduler` | Traffic tracking and user expiration scheduler | +| `hysteria-webpanel` | Web management panel (hypercorn/FastAPI) | +| `hysteria-normalsub` | Subscription service (if enabled) | +| `hysteria-telegrambot` | Telegram bot (if enabled) | + +Both containers use `network_mode: "host"` for direct access to host networking. + +## Data Persistence + +All persistent data is stored in the `blitz_data` Docker volume: + +- `config.json` — Hysteria2 server configuration +- `ca.crt` / `ca.key` — TLS certificates +- `.configs.env` — Server IP/SNI configuration +- `webpanel.env` — Web panel credentials and settings +- `normalsub.env` — NormalSub settings +- `telegrambot.env` — Telegram bot settings +- `.initialized` — First-run marker + +## Common Operations + +```bash +# View service status +docker exec blitz supervisorctl -c /tmp/supervisord.conf status + +# Restart Hysteria2 server +docker exec blitz supervisorctl -c /tmp/supervisord.conf restart hysteria-server + +# View logs +docker logs blitz + +# List users +docker exec blitz python3 /etc/hysteria/core/cli.py list-users + +# Get user connection URI +docker exec blitz python3 /etc/hysteria/core/cli.py show-user-uri -u -ip 4 + +# Full rebuild (reset all data) +docker compose down +docker volume rm blitz_blitz_data +docker compose up -d --build +``` + +## Troubleshooting + +**Web panel not accessible externally:** Ensure `WEBPANEL_PORT` is open. The entrypoint opens it via iptables, but some cloud providers have separate firewall rules. + +**Hysteria2 clients can't connect:** Ensure `HYSTERIA_PORT` is open for **UDP** traffic. The entrypoint opens it via iptables automatically. + +**Forgot admin password:** Remove the volume and rebuild to regenerate credentials: +```bash +docker compose down && docker volume rm blitz_blitz_data && docker compose up -d --build +``` + +**Services show FATAL:** Check `docker logs blitz` for error details. Common causes: port already in use, invalid config.json. diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh new file mode 100755 index 00000000..b620e420 --- /dev/null +++ b/docker/entrypoint.sh @@ -0,0 +1,390 @@ +#!/bin/bash +set -e + +BLITZ_DATA_DIR="/blitz-data" +HYSTERIA_DIR="/etc/hysteria" +VENV_PYTHON="${HYSTERIA_DIR}/hysteria2_venv/bin/python3" +CONFIG_FILE="${HYSTERIA_DIR}/config.json" +CLI_PATH="${HYSTERIA_DIR}/core/cli.py" + +# --------------------------------------------------------------------------- +# 1. Check CPU AVX support (required by MongoDB 5.0+) +# --------------------------------------------------------------------------- +if [ -f /proc/cpuinfo ] && ! grep -q -m1 -oE 'avx|avx2|avx512' /proc/cpuinfo; then + echo "============================================================" + echo "[entrypoint] ERROR: CPU does not support AVX instructions." + echo "[entrypoint] MongoDB 5.0+ requires AVX. See: https://www.mongodb.com/docs/manual/administration/production-notes/" + echo "[entrypoint] Consider using the 'nodb' branch of Blitz for systems without AVX." + echo "============================================================" + exit 1 +fi + +# --------------------------------------------------------------------------- +# 2. Wait for MongoDB +# --------------------------------------------------------------------------- +echo "[entrypoint] Waiting for MongoDB at localhost:27017..." +until "${VENV_PYTHON}" -c \ + "import pymongo; pymongo.MongoClient('mongodb://localhost:27017', serverSelectionTimeoutMS=2000).admin.command('ping')" \ + 2>/dev/null; do + echo "[entrypoint] MongoDB not ready — retrying in 2s..." + sleep 2 +done +echo "[entrypoint] MongoDB is up." + +# --------------------------------------------------------------------------- +# 2. Set up persistent volume directories and symlinks +# --------------------------------------------------------------------------- +mkdir -p "${BLITZ_DATA_DIR}" + +# Symlink Hysteria2 config + TLS certs so they persist across container recreation +for f in config.json ca.crt ca.key; do + target="${HYSTERIA_DIR}/${f}" + source="${BLITZ_DATA_DIR}/${f}" + [ -e "${target}" ] && [ ! -L "${target}" ] && rm -f "${target}" + [ ! -L "${target}" ] && ln -sf "${source}" "${target}" +done + +# Symlink blitz runtime data files into /etc/hysteria +for f in .configs.env nodes.json extra.json traffic_data.json hysteria_connections.json; do + target="${HYSTERIA_DIR}/${f}" + source="${BLITZ_DATA_DIR}/${f}" + [ -e "${target}" ] && [ ! -L "${target}" ] && rm -f "${target}" + [ ! -L "${target}" ] && ln -sf "${source}" "${target}" +done + +# Symlink service .env files so they persist in blitz_data across container recreation +declare -A SERVICE_ENVS=( + ["${HYSTERIA_DIR}/core/scripts/webpanel/.env"]="${BLITZ_DATA_DIR}/webpanel.env" + ["${HYSTERIA_DIR}/core/scripts/normalsub/.env"]="${BLITZ_DATA_DIR}/normalsub.env" + ["${HYSTERIA_DIR}/core/scripts/telegrambot/.env"]="${BLITZ_DATA_DIR}/telegrambot.env" +) +for target in "${!SERVICE_ENVS[@]}"; do + source="${SERVICE_ENVS[$target]}" + [ -e "${target}" ] && [ ! -L "${target}" ] && rm -f "${target}" + [ ! -L "${target}" ] && ln -sf "${source}" "${target}" +done + +# --------------------------------------------------------------------------- +# 3. Auto-detect SNI if not provided +# --------------------------------------------------------------------------- +if [ -z "${HYSTERIA_SNI}" ]; then + echo "[entrypoint] HYSTERIA_SNI is empty — auto-detecting from ip.sb..." + HYSTERIA_SNI=$(curl -s -4 --max-time 10 ip.sb 2>/dev/null || true) + if [ -z "${HYSTERIA_SNI}" ]; then + echo "[entrypoint] WARNING: could not detect public IP; using 'localhost' as SNI" + HYSTERIA_SNI="localhost" + fi + echo "[entrypoint] HYSTERIA_SNI=${HYSTERIA_SNI}" +fi + +# --------------------------------------------------------------------------- +# 4. Generate ECDSA TLS certificates — same as bare-metal install (first run) +# --------------------------------------------------------------------------- +if [ ! -f "${BLITZ_DATA_DIR}/ca.crt" ]; then + echo "[entrypoint] Generating ECDSA TLS certificate for CN=${HYSTERIA_SNI}..." + openssl ecparam -genkey -name prime256v1 -out "${BLITZ_DATA_DIR}/ca.key" 2>/dev/null + openssl req -new -x509 -days 36500 \ + -key "${BLITZ_DATA_DIR}/ca.key" \ + -out "${BLITZ_DATA_DIR}/ca.crt" \ + -subj "/CN=${HYSTERIA_SNI}" 2>/dev/null + chmod 640 "${BLITZ_DATA_DIR}/ca.key" "${BLITZ_DATA_DIR}/ca.crt" + echo "[entrypoint] TLS certificate generated." +fi + +# --------------------------------------------------------------------------- +# 5. Generate Hysteria2 config.json using repo template + jq (first run) +# --------------------------------------------------------------------------- +if [ ! -f "${BLITZ_DATA_DIR}/config.json" ]; then + echo "[entrypoint] Generating Hysteria2 config.json..." + + PORT="${HYSTERIA_PORT:-8080}" + SHA256=$(openssl x509 -noout -fingerprint -sha256 -inform pem \ + -in "${BLITZ_DATA_DIR}/ca.crt" | sed 's/.*=//;s///g') + TRAFFIC_SECRET=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || openssl rand -hex 16) + NETWORK_DEF=$(ip route | grep "^default" | awk '{print $5}' | head -n1) + + # Use the repo's config.json template and substitute values with jq, + # exactly as the bare-metal install.sh does. + OBFS_PASS="${OBFS_PASSWORD:-$(openssl rand -base64 24 | tr -dc 'A-Za-z0-9' | head -c 32)}" + jq --arg port "$PORT" \ + --arg sha256 "$SHA256" \ + --arg obfspassword "$OBFS_PASS" \ + --arg UUID "$TRAFFIC_SECRET" \ + --arg networkdef "$NETWORK_DEF" \ + '.listen = ":\($port)" | + .tls.cert = "/etc/hysteria/ca.crt" | + .tls.key = "/etc/hysteria/ca.key" | + .tls.pinSHA256 = $sha256 | + .obfs.salamander.password = $obfspassword | + .trafficStats.secret = $UUID | + .outbounds[0].direct.bindDevice = $networkdef' \ + "${HYSTERIA_DIR}/config.json.template" > "${BLITZ_DATA_DIR}/config.json" + + # Masquerade and OBFS are mutually exclusive. + # If MASQUERADE_URL is set and no explicit OBFS_PASSWORD was provided, replace obfs with masquerade. + if [ -n "${MASQUERADE_URL}" ] && [ -z "${OBFS_PASSWORD}" ]; then + echo "[entrypoint] Configuring masquerade (removing obfs)..." + if [ "${MASQUERADE_URL}" = "string" ]; then + # Return a generic HTTP 502 response (same as bare-metal masquerade.py) + jq 'del(.obfs) | .masquerade = { + "type": "string", + "string": { + "content": "HTTP 502: Bad Gateway", + "headers": {"Content-Type": "text/plain; charset=utf-8", "Server": "Caddy"}, + "statusCode": 502 + } + }' "${BLITZ_DATA_DIR}/config.json" > "${BLITZ_DATA_DIR}/config.json.tmp" \ + && mv "${BLITZ_DATA_DIR}/config.json.tmp" "${BLITZ_DATA_DIR}/config.json" + else + # Proxy mode — forward requests to the specified URL + jq --arg url "${MASQUERADE_URL}" 'del(.obfs) | .masquerade = { + "type": "proxy", + "proxy": {"url": $url} + }' "${BLITZ_DATA_DIR}/config.json" > "${BLITZ_DATA_DIR}/config.json.tmp" \ + && mv "${BLITZ_DATA_DIR}/config.json.tmp" "${BLITZ_DATA_DIR}/config.json" + fi + echo "[entrypoint] Masquerade configured." + fi + + echo "[entrypoint] config.json generated." +fi + +# --------------------------------------------------------------------------- +# 7. Generate .configs.env (first run) +# --------------------------------------------------------------------------- +if [ ! -f "${BLITZ_DATA_DIR}/.configs.env" ]; then + echo "[entrypoint] Generating .configs.env..." + IP4="${HYSTERIA_SNI}" + IP6="${SERVER_IPV6:-}" + cat > "${BLITZ_DATA_DIR}/.configs.env" < "${WEBPANEL_ENV}" <:${WEBPANEL_PORT:-2096}/${ROOT_PATH}/" +fi + +# --------------------------------------------------------------------------- +# 9. Generate normalsub .env (first run, if enabled) +# --------------------------------------------------------------------------- +if [ "${NORMALSUB_ENABLED}" = "true" ]; then + NORMALSUB_ENV="${BLITZ_DATA_DIR}/normalsub.env" + if [ ! -f "${NORMALSUB_ENV}" ]; then + echo "[entrypoint] Generating normalsub .env..." + SUBPATH=$(openssl rand -hex 16) + cat > "${NORMALSUB_ENV}" <:${NORMALSUB_PORT:-2095}/${SUBPATH}/" + fi +fi + +# --------------------------------------------------------------------------- +# 10. Generate telegrambot .env (first run, if enabled) +# --------------------------------------------------------------------------- +if [ "${TELEGRAM_ENABLED}" = "true" ]; then + TELEGRAM_ENV="${BLITZ_DATA_DIR}/telegrambot.env" + if [ ! -f "${TELEGRAM_ENV}" ]; then + echo "[entrypoint] Generating telegrambot .env..." + cat > "${TELEGRAM_ENV}" </dev/null | tr -dc '0-9') +if [ -n "${HYSTERIA_LISTEN_PORT}" ]; then + # Open Hysteria UDP port + if ! iptables -C INPUT -p udp --dport "${HYSTERIA_LISTEN_PORT}" -j ACCEPT 2>/dev/null; then + iptables -I INPUT -p udp --dport "${HYSTERIA_LISTEN_PORT}" -j ACCEPT 2>/dev/null && \ + echo "[entrypoint] Firewall: opened UDP port ${HYSTERIA_LISTEN_PORT}" + fi + # Open webpanel TCP port + if ! iptables -C INPUT -p tcp --dport "${WEBPANEL_BIND_PORT}" -j ACCEPT 2>/dev/null; then + iptables -I INPUT -p tcp --dport "${WEBPANEL_BIND_PORT}" -j ACCEPT 2>/dev/null && \ + echo "[entrypoint] Firewall: opened TCP port ${WEBPANEL_BIND_PORT}" + fi +fi + +# --------------------------------------------------------------------------- +# 15. Start supervisord +# --------------------------------------------------------------------------- +echo "[entrypoint] Starting supervisord..." +exec /usr/bin/supervisord -n -c "${SUPERVISOR_CONF}" diff --git a/docker/systemctl-shim.sh b/docker/systemctl-shim.sh new file mode 100644 index 00000000..d7da13dc --- /dev/null +++ b/docker/systemctl-shim.sh @@ -0,0 +1,206 @@ +#!/bin/bash +# systemctl shim for Docker environments. +# Translates systemctl calls into supervisorctl commands so that +# existing Blitz scripts work unmodified inside the Docker container. + +set -euo pipefail + +SUPERVISOR_CONF="/tmp/supervisord.conf" + +# --------------------------------------------------------------------------- +# Service-name mapping: systemd unit → supervisor program name +# --------------------------------------------------------------------------- +# Services managed by supervisord inside this container: +# hysteria-server.service → hysteria-server +# hysteria-auth.service → hysteria-auth +# hysteria-scheduler.service → hysteria-scheduler +# hysteria-webpanel.service → hysteria-webpanel +# hysteria-telegram-bot.service → hysteria-telegrambot +# hysteria-normal-sub.service → hysteria-normalsub +# hysteria-ip-limit.service → hysteria-iplimit +# +# Services that do not apply in Docker (no-op): +# hysteria-caddy.service +# hysteria-caddy-normalsub.service + +map_service() { + local unit="$1" + case "$unit" in + hysteria-server.service) echo "supervisor:hysteria-server" ;; + hysteria-auth.service) echo "supervisor:hysteria-auth" ;; + hysteria-scheduler.service) echo "supervisor:hysteria-scheduler" ;; + hysteria-webpanel.service) echo "supervisor:hysteria-webpanel" ;; + hysteria-telegram-bot.service) echo "supervisor:hysteria-telegrambot" ;; + hysteria-normal-sub.service) echo "supervisor:hysteria-normalsub" ;; + hysteria-caddy-normalsub.service) echo "noop:" ;; + hysteria-caddy.service) echo "noop:" ;; + hysteria-ip-limit.service) echo "supervisor:hysteria-iplimit" ;; + wg-quick@wgcf.service) echo "wg:wgcf" ;; + wg-quick@*.service) echo "wg:${unit#wg-quick@}" ;; + caddy.service) echo "noop:" ;; + warp-svc) echo "noop:" ;; + *) echo "unknown:$unit" ;; + esac +} + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- +supervisor_status() { + local prog="$1" + local st + st=$(supervisorctl -c "$SUPERVISOR_CONF" status "$prog" 2>/dev/null || true) + echo "$st" | grep -q "RUNNING" +} + +# --------------------------------------------------------------------------- +# Parse arguments +# --------------------------------------------------------------------------- +ACTION="" +QUIET=false +NOW=false +UNITS=() + +while [[ $# -gt 0 ]]; do + case "$1" in + is-active|start|stop|restart|enable|disable|reload|daemon-reload|is-enabled|status) + ACTION="$1"; shift ;; + --quiet|-q) + QUIET=true; shift ;; + --now) + NOW=true; shift ;; + --no-pager|--full|-all|--all) + shift ;; # ignored + list-units) + # `systemctl list-units ...` — just output nothing, scripts grep for service names + echo ""; exit 0 ;; + -*) + shift ;; # skip unknown flags + *) + UNITS+=("$1"); shift ;; + esac +done + +# daemon-reload is a no-op in Docker +if [ "$ACTION" = "daemon-reload" ]; then + exit 0 +fi + +# Need at least one unit for other actions +if [ ${#UNITS[@]} -eq 0 ]; then + exit 0 +fi + +RC=0 +for unit in "${UNITS[@]}"; do + mapping=$(map_service "$unit") + backend="${mapping%%:*}" + target="${mapping#*:}" + + case "$ACTION" in + is-active) + case "$backend" in + supervisor) + if supervisor_status "$target"; then + $QUIET || echo "active" + else + $QUIET || echo "inactive" + RC=3 + fi + ;; + wg) + if ip link show wgcf &>/dev/null; then + $QUIET || echo "active" + else + $QUIET || echo "inactive" + RC=3 + fi + ;; + noop|*) + $QUIET || echo "inactive" + RC=3 + ;; + esac + ;; + + is-enabled) + case "$backend" in + supervisor) echo "enabled" ;; + *) echo "disabled"; RC=1 ;; + esac + ;; + + start|restart) + case "$backend" in + supervisor) + supervisorctl -c "$SUPERVISOR_CONF" "$ACTION" "$target" 2>/dev/null || RC=1 + ;; + wg) + wg-quick up wgcf 2>/dev/null || true + ;; + noop) ;; + *) RC=1 ;; + esac + ;; + + stop) + case "$backend" in + supervisor) + supervisorctl -c "$SUPERVISOR_CONF" stop "$target" 2>/dev/null || true + ;; + wg) + wg-quick down wgcf 2>/dev/null || true + ;; + noop) ;; + *) RC=1 ;; + esac + ;; + + enable) + # enable is a no-op, but --now means also start + if $NOW; then + case "$backend" in + supervisor) supervisorctl -c "$SUPERVISOR_CONF" start "$target" 2>/dev/null || true ;; + wg) wg-quick up wgcf 2>/dev/null || true ;; + *) ;; + esac + fi + ;; + + disable) + # disable is a no-op, but --now means also stop + if $NOW; then + case "$backend" in + supervisor) supervisorctl -c "$SUPERVISOR_CONF" stop "$target" 2>/dev/null || true ;; + wg) wg-quick down wgcf 2>/dev/null || true ;; + *) ;; + esac + fi + ;; + + reload) + case "$backend" in + supervisor) + supervisorctl -c "$SUPERVISOR_CONF" restart "$target" 2>/dev/null || RC=1 + ;; + *) ;; + esac + ;; + + status) + case "$backend" in + supervisor) + supervisorctl -c "$SUPERVISOR_CONF" status "$target" 2>/dev/null || true + ;; + *) echo "Unit $unit not found." ;; + esac + ;; + + *) + echo "systemctl shim: unsupported action '$ACTION'" >&2 + RC=1 + ;; + esac +done + +exit $RC