-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
101 lines (96 loc) · 2.71 KB
/
docker-compose.yml
File metadata and controls
101 lines (96 loc) · 2.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
# BigEd CC — Self-Hosted SaaS Deployment
# Usage:
# docker compose up -d # start all services
# docker compose up -d --scale fleet=1 # single fleet instance
# docker compose logs -f fleet # follow fleet logs
# docker compose down # stop all services
#
# For GPU-less deployments, use the 'cpu' profile:
# docker compose --profile cpu up -d
#
# v0.400.00b — Platform & SaaS foundation + Rust rewrite
#
# The fleet image now includes Rust binaries (biged, biged_bridge.so) built in
# a multi-stage Dockerfile. The Python supervisor remains the default entrypoint;
# override with: command: ["biged", "supervisor"] to use the Rust supervisor.
#
# Tip: add "biged-rs/target" to .dockerignore to speed up builds.
services:
playwright-mcp:
image: mcr.microsoft.com/playwright/mcp:latest
container_name: playwright-mcp
command: ["--port", "8931", "--host", "0.0.0.0", "--allowed-hosts", "*"]
ports:
- "127.0.0.1:8931:8931"
volumes:
- ./education-context:/workspace/education-context:ro
init: true
restart: unless-stopped
fleet:
build: .
container_name: biged-fleet
ports:
- "127.0.0.1:5555:5555"
- "127.0.0.1:8080:8080"
volumes:
- ./fleet/fleet.toml:/app/fleet/fleet.toml:ro
- ./fleet:/app/fleet
- ./BigEd:/app/BigEd
- fleet-data:/app/fleet/knowledge
- fleet-logs:/app/fleet/logs
- biged-data:/app/data
environment:
- BIGED_PRODUCTION=1
- OLLAMA_HOST=http://ollama:11434
- BIGED_WEB_MODE=1
depends_on:
ollama:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5555/api/fleet/health"]
interval: 30s
timeout: 5s
retries: 3
restart: unless-stopped
ollama:
image: ollama/ollama:latest
container_name: biged-ollama
ports:
- "127.0.0.1:11434:11434"
volumes:
- ollama-models:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
dashboard:
build:
context: .
dockerfile: Dockerfile.dashboard
container_name: biged-dashboard
ports:
- "127.0.0.1:5556:5555"
volumes:
- ./fleet:/app/fleet:ro
- fleet-data:/app/fleet/knowledge:ro
environment:
- BIGED_PRODUCTION=1
- BIGED_WEB_MODE=1
depends_on:
fleet:
condition: service_healthy
restart: unless-stopped
volumes:
fleet-data:
fleet-logs:
ollama-models:
biged-data: