forked from XposeMarket/SmallClaw
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
43 lines (36 loc) · 2.5 KB
/
.env.example
File metadata and controls
43 lines (36 loc) · 2.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# ============================================================
# SmallClaw – environment variables
# Copy this file to .env and customise it.
# docker-compose.yml reads these automatically.
# ============================================================
# ── Active provider ─────────────────────────────────────────
# One of: ollama | lm_studio | llama_cpp | openai | openai_codex
SMALLCLAW_PROVIDER=ollama
# ── Port ────────────────────────────────────────────────────
# Port the SmallClaw gateway will be accessible on the HOST
# The app internally always runs on 18789 inside the container.
# Change HOST_PORT to map it to a different port on your machine.
HOST_PORT=18789
# ── Ollama (default) ────────────────────────────────────────
# Model to auto-pull on first run (only used when PROVIDER=ollama)
SMALLCLAW_DEFAULT_MODEL=qwen3:4b
# Ollama endpoint (leave as-is when using the bundled ollama container)
# Change to http://host.docker.internal:11434 to use Ollama on your host machine
OLLAMA_HOST=http://ollama:11434
# ── LM Studio ───────────────────────────────────────────────
# LM Studio runs on the HOST, not inside Docker.
# Use host.docker.internal to reach it from inside the container.
LM_STUDIO_ENDPOINT=http://host.docker.internal:1234
# LM_STUDIO_API_KEY= # optional – only if you enabled auth in LM Studio
# LM_STUDIO_MODEL= # e.g. mistral-nemo-instruct-2407
# ── llama.cpp ───────────────────────────────────────────────
# llama.cpp server also runs on the HOST.
LLAMA_CPP_ENDPOINT=http://host.docker.internal:8080
# LLAMA_CPP_MODEL= # e.g. Meta-Llama-3-8B-Instruct.Q4_K_M.gguf
# ── OpenAI (API key) ────────────────────────────────────────
OPENAI_API_KEY=
# OPENAI_MODEL=gpt-4o
# ── OpenAI Codex (OAuth / ChatGPT Plus) ─────────────────────
# No key needed – SmallClaw handles the OAuth flow.
# Mount your .smallclaw dir (see docker-compose.yml) so tokens persist.
# CODEX_MODEL=gpt-5.3-codex