forked from XposeMarket/SmallClaw
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
157 lines (141 loc) · 6.09 KB
/
docker-compose.yml
File metadata and controls
157 lines (141 loc) · 6.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
version: "3.9"
# ============================================================
# SmallClaw / LocalClaw – docker-compose.yml
#
# Supported providers (set SMALLCLAW_PROVIDER in .env):
# ollama – bundled Ollama container (default)
# lm_studio – LM Studio on your HOST machine (port 1234)
# llama_cpp – llama.cpp server on your HOST machine (port 8080)
# openai – OpenAI API key (cloud)
# openai_codex – OpenAI OAuth / ChatGPT Plus (cloud)
#
# Quick start:
# cp .env.example .env # then edit .env for your provider
# docker compose up -d # start everything
# docker compose logs -f # follow logs
# docker compose down # stop & remove containers
# docker compose down -v # also wipe volumes (full reset)
# ============================================================
services:
# ── Ollama ────────────────────────────────────────────────
# Only relevant when SMALLCLAW_PROVIDER=ollama.
# If you're using lm_studio / llama_cpp / openai / openai_codex
# you can comment out or remove the ollama + model-init services.
ollama:
image: ollama/ollama:latest
container_name: smallclaw-ollama
restart: unless-stopped
profiles:
- ollama # start only when using: docker compose --profile ollama up
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0
# ── GPU support ──────────────────────────────────────────
# NVIDIA (requires nvidia-container-toolkit on the host):
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
#
# AMD / ROCm:
# devices:
# - /dev/kfd
# - /dev/dri
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
interval: 20s
timeout: 10s
retries: 5
start_period: 10s
# ── Model pull (one-shot init, Ollama only) ──────────────
model-init:
image: ollama/ollama:latest
container_name: smallclaw-model-init
profiles:
- ollama
depends_on:
ollama:
condition: service_healthy
volumes:
- ollama_data:/root/.ollama
environment:
- OLLAMA_HOST=http://ollama:11434
- DEFAULT_MODEL=${SMALLCLAW_DEFAULT_MODEL:-qwen3:4b}
entrypoint: >
sh -c "
echo '>>> Pulling model: '$$DEFAULT_MODEL;
ollama pull $$DEFAULT_MODEL;
echo '>>> Done.';
"
restart: "no"
# ── SmallClaw Gateway ────────────────────────────────────
smallclaw:
build:
context: .
dockerfile: Dockerfile
container_name: smallclaw-app
restart: unless-stopped
ports:
- "${HOST_PORT:-18789}:18789"
# Allow the container to reach LM Studio / llama.cpp on the HOST.
# On Linux, host.docker.internal isn't automatically available so we
# inject it via extra_hosts. On Mac/Windows Docker Desktop it works
# out of the box, but adding it here doesn't hurt.
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- smallclaw_data:/data
- smallclaw_workspace:/data/workspace
# OpenAI Codex OAuth tokens are stored in ~/.localclaw on your host.
# Mount the directory so tokens survive container restarts and the
# initial `smallclaw auth login` can be run once on the host.
# Comment this out if you're not using openai_codex.
- ${LOCALCLAW_CONFIG_DIR:-~/.localclaw}:/root/.localclaw
environment:
- NODE_ENV=production
- DOCKER_CONTAINER=true
- GATEWAY_PORT=18789
- GATEWAY_HOST=0.0.0.0
- SMALLCLAW_DATA_DIR=/data
- SMALLCLAW_WORKSPACE_DIR=/data/workspace
# ── Active provider ──────────────────────────────────
- SMALLCLAW_PROVIDER=${SMALLCLAW_PROVIDER:-ollama}
# ── Ollama ───────────────────────────────────────────
# Points to the bundled container by default.
# Override in .env: OLLAMA_HOST=http://host.docker.internal:11434
# to use Ollama running on your host machine instead.
- OLLAMA_HOST=${OLLAMA_HOST:-http://ollama:11434}
# ── LM Studio ────────────────────────────────────────
# Reaches LM Studio running on the host via host.docker.internal.
- LM_STUDIO_ENDPOINT=${LM_STUDIO_ENDPOINT:-http://host.docker.internal:1234}
- LM_STUDIO_API_KEY=${LM_STUDIO_API_KEY:-}
- LM_STUDIO_MODEL=${LM_STUDIO_MODEL:-}
# ── llama.cpp ────────────────────────────────────────
- LLAMA_CPP_ENDPOINT=${LLAMA_CPP_ENDPOINT:-http://host.docker.internal:8080}
- LLAMA_CPP_MODEL=${LLAMA_CPP_MODEL:-}
# ── OpenAI ───────────────────────────────────────────
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- OPENAI_MODEL=${OPENAI_MODEL:-gpt-4o}
# ── OpenAI Codex (OAuth) ─────────────────────────────
# Tokens live in the mounted ~/.localclaw volume above.
- CODEX_MODEL=${CODEX_MODEL:-gpt-5.3-codex}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:18789/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 20s
# ── Named volumes ────────────────────────────────────────────
volumes:
ollama_data:
driver: local
smallclaw_data:
driver: local
smallclaw_workspace:
driver: local