-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathdocker-compose.template.yml
More file actions
255 lines (249 loc) · 9.63 KB
/
docker-compose.template.yml
File metadata and controls
255 lines (249 loc) · 9.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
version: "3.7"
volumes:
caddy-data:
{% if testing %}
coverage-data:
name: "{{ prefix }}_coverage_data"
{% endif %}
services:
db:
init: true
image: postgres:17.2
restart: always
{% if not testing %}
volumes:
- ./data/postgresql-db:/var/lib/postgresql/data
{% endif %}
environment:
- POSTGRES_USER=ref
- POSTGRES_DB=ref
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
- REAL_HOSTNAME=${REAL_HOSTNAME}
networks:
- web-and-db
cgroup_parent: "{{ cgroup_parent }}-core.slice"
cap_drop:
- ALL
cap_add:
# Set owner of DB files to the correct user
- CAP_FOWNER
- CAP_CHOWN
- CAP_DAC_OVERRIDE
# Drop privileges to postgres user
- CAP_SETUID
- CAP_SETGID
{% if binfmt_support %}
foreign-arch-runner:
init: true
image: multiarch/qemu-user-static
privileged: true
command: ["--reset", "--persistent", "yes", "--credential", "yes"]
{% endif %}
web:
init: true
hostname: web
security_opt:
#Needed for mounting overlay inside containers
- apparmor:unconfined
environment:
- ADMIN_PASSWORD=${ADMIN_PASSWORD:?ADMIN_PASSWORD not set}
- SSH_TO_WEB_KEY=${SSH_TO_WEB_KEY:?SSH_TO_WEB_KEY not set}
- DEBUG=${DEBUG:-0}
- MAINTENANCE_ENABLED=${MAINTENANCE_ENABLED:-0}
- POSTGRES_USER=ref
- POSTGRES_DB=ref
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
- SECRET_KEY=${SECRET_KEY:?SECRET_KEY not set}
- SSH_HOST_PORT=${SSH_HOST_PORT:?SSH_HOST_PORT not set}
- ADMIN_SSH_KEY=${ADMIN_SSH_KEY}
- DISABLE_TELEGRAM=${DISABLE_TELEGRAM}
- DEBUG_TOOLBAR=${DEBUG_TOOLBAR}
- HOT_RELOADING=${HOT_RELOADING}
- DISABLE_RESPONSE_CACHING=${DISABLE_RESPONSE_CACHING}
- RATELIMIT_ENABLED=${RATELIMIT_ENABLED}
- DOCKER_RESSOURCE_PREFIX=${DOCKER_RESSOURCE_PREFIX:-}
- INSTANCES_CGROUP_PARENT={{ instances_cgroup_parent }}
- REAL_HOSTNAME=${REAL_HOSTNAME}
{% if testing %}
- COVERAGE_PROCESS_START=/coverage-config/.coveragerc
- COVERAGE_CONTAINER_NAME=web
{% endif %}
cap_add:
- SYS_ADMIN
build:
context: ./webapp
args:
#Pass the hosts docker group id, since we are using the docker socket from the host.
DOCKER_GROUP_ID: ${DOCKER_GROUP_ID:?DOCKER_GROUP_ID not set}
volumes:
#Persistance folder (db, templates, ...)
#The mounts need to be propageted, thus we can mount mounts created
#in this container from the host into other containers
- type: bind
source: {{ data_path }} # NOTE: Indented with two spaces!!!
target: /data # NOTE: Indented with two spaces!!!
bind: # NOTE: Indented with two spaces!!!
propagation: shared
#The webinterface, only needed for live updating during development
- ./webapp/:/app
#The exercise templates to import
- {{ exercises_path }}:/exercises
#Make docker availabe inside the container
- /var/run/docker.sock:/var/run/docker.sock
#Container SSH public keys, bind-mounted into student containers at runtime
- ./container-keys:/container-keys:ro
#Source for ref-utils, bind-mounted read-only into student
#instances so edits on the host apply without rebuilding images.
- type: bind
source: {{ ref_utils_path }}
target: /ref-utils
read_only: true
{% if testing %}
- coverage-data:/coverage-data:rw
- ./coverage:/coverage-config:ro
{% endif %}
networks:
- web-host
- web-and-ssh
- web-and-db
depends_on:
- db
cgroup_parent: "{{ cgroup_parent }}-core.slice"
# Caddy reverse proxy that fronts the whole web interface on a single
# host port. Routes /spa/* to the SPA (vite dev in dev mode, baked
# static bundle in prod) and /static/* directly from webapp/ref/static;
# everything else is reverse-proxied to the Flask web container. The
# SPA build artifact is baked into this image at docker build time via
# a multi-stage Dockerfile, so prod does not need `vite preview` or a
# shared volume. Dev selection is done at container start by
# entrypoint.sh based on $HOT_RELOADING.
frontend-proxy:
init: true
hostname: frontend-proxy
build:
context: .
dockerfile: frontend-proxy/Dockerfile
environment:
- HOT_RELOADING=${HOT_RELOADING:-false}
- TLS_MODE=${TLS_MODE:-off}
- DOMAIN=${TLS_DOMAIN:-}
- HTTPS_HOST_PORT=${HTTPS_HOST_PORT:-8443}
- REDIRECT_HTTP_TO_HTTPS=${TLS_REDIRECT_HTTP:-false}
volumes:
# Serve Flask's static assets directly from Caddy, skipping
# uWSGI. Read-only to keep the proxy sandboxed.
- ./webapp/ref/static:/srv/flask-static:ro
# Persist Caddy's TLS certificates and ACME state across
# container restarts. Essential for acme mode (avoids
# hitting Let's Encrypt rate limits).
- caddy-data:/data
{%- if not testing %}
ports:
{%- if tls_mode == 'off' %}
- "${HTTP_HOST_PORT}:8000"
{%- elif tls_mode == 'internal' %}
- "${HTTP_HOST_PORT}:8080"
- "${HTTPS_HOST_PORT}:8443"
{%- elif tls_mode == 'acme' %}
- "${HTTP_HOST_PORT}:80"
- "${HTTPS_HOST_PORT}:443"
{%- endif %}
{%- endif %}
networks:
- web-host
depends_on:
- web
cgroup_parent: "{{ cgroup_parent }}-core.slice"
healthcheck:
{%- if tls_mode == 'off' %}
test: ["CMD", "wget", "-q", "--spider", "http://localhost:8000/static/favicon.ico"]
{%- elif tls_mode == 'internal' %}
test: ["CMD", "wget", "-q", "--spider", "--no-check-certificate", "https://localhost:8443/static/favicon.ico"]
{%- elif tls_mode == 'acme' %}
test: ["CMD", "wget", "-q", "--spider", "--no-check-certificate", "https://localhost:443/static/favicon.ico"]
{%- endif %}
interval: 10s
timeout: 3s
retries: 5
start_period: 5s
# Vue 3 + Vuetify SPA dev server. Only started in the `dev` compose
# profile (ctrl.sh adds --profile dev when --hot-reloading is set).
# In dev it runs `vite dev` with HMR against the host bind-mounted
# source; frontend-proxy reverse-proxies /spa/* to this container's
# port 5173 (including Vite's HMR websocket). In prod this service
# is not started at all — frontend-proxy serves the baked SPA bundle.
spa-frontend:
init: true
hostname: spa-frontend
profiles:
- dev
build:
context: ./spa-frontend
environment:
- HOT_RELOADING=${HOT_RELOADING:-false}
volumes:
# Bind-mount the host source so Vite sees live edits. The
# anonymous volume below shields node_modules from the overlay
# so deps installed at build time remain available.
- ./spa-frontend/:/spa-frontend
- /spa-frontend/node_modules
networks:
- web-host
depends_on:
- web
cgroup_parent: "{{ cgroup_parent }}-core.slice"
# Rust-based SSH reverse proxy
ssh-reverse-proxy:
init: true
hostname: ssh-reverse-proxy
ulimits:
memlock:
soft: -1
hard: -1
build:
context: ./ssh-reverse-proxy
dockerfile: Dockerfile
environment:
- SSH_LISTEN_ADDR=0.0.0.0:2222
- API_BASE_URL=http://web:8000
- SSH_TO_WEB_KEY=${SSH_TO_WEB_KEY:?SSH_TO_WEB_KEY not set}
- CONTAINER_SSH_PORT=13370
- RUST_LOG=ref_ssh_proxy=info,russh=warn
volumes:
- ./container-keys:/keys:ro
- ./data/ssh-proxy:/data
{% if not testing %}
ports:
- "${SSH_HOST_PORT:-2222}:2222"
{% endif %}
networks:
- web-and-ssh
- ssh-and-host
depends_on:
- web
cgroup_parent: "{{ cgroup_parent }}-core.slice"
networks:
#Network used to connect the webinterface to the host
web-host:
driver: bridge
driver_opts:
com.docker.network.bridge.name: "br-{{ 'reft-' + bridge_id + '-wh' if testing else 'whost-ref' }}"
#Interface between the SSH reverse proxy and the webinterface.
#This interface is used by the SSH proxy to retrieve information
#on how an incoming connection should be routed.
web-and-ssh:
driver: bridge
internal: true
driver_opts:
com.docker.network.bridge.name: "br-{{ 'reft-' + bridge_id + '-ws' if testing else 'w2ssh-ref' }}"
#This network connects the SSH reverse proxy to the host.
ssh-and-host:
driver: bridge
driver_opts:
com.docker.network.bridge.name: "br-{{ 'reft-' + bridge_id + '-sh' if testing else 'shost-ref' }}"
#Connect web to postgres
web-and-db:
driver: bridge
internal: true
driver_opts:
com.docker.network.bridge.name: "br-{{ 'reft-' + bridge_id + '-wd' if testing else 'w2db-ref' }}"