-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy path.env.example
More file actions
189 lines (158 loc) · 7.42 KB
/
.env.example
File metadata and controls
189 lines (158 loc) · 7.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
# Copy this file to .env and add your Mistral API key
MISTRAL_API_KEY=your_mistral_api_key_here
# Agent loop settings
MAX_ITERATIONS=20
MAX_TOKENS_BUDGET=0
# Maximum tokens in the context window sent to the LLM (leaves headroom for the response)
MAX_CONTEXT_TOKENS=28000
# LLM retry settings (exponential back-off; rate-limit responses get +1s penalty)
LLM_RETRY_MAX=3
LLM_RETRY_BASE_DELAY_MS=500
# Per-tool execution timeout in milliseconds (tool promise is raced against this limit)
TOOL_TIMEOUT_MS=30000
# LLM provider settings
LLM_PROVIDER=mistral
LLM_MODEL=
LLM_TEMPERATURE=0.7
# System prompt (optional: path to a .txt or .md file to override the generated prompt)
SYSTEM_PROMPT_PATH=
# Tool security (Task 1.7)
# Set to true to skip all confirmation prompts (useful for CI / non-interactive environments)
AUTO_APPROVE_ALL=false
# Comma-separated list of tool names that are permitted to run (empty = allow all)
TOOL_ALLOWLIST=
# Comma-separated list of tool names that are always blocked regardless of other settings
TOOL_BLOCKLIST=
# Shell command execution tool (Task 2.1)
# Comma-separated list of extra blocked command patterns appended to built-in defaults
SHELL_COMMAND_BLOCKLIST=
# Code execution tool (Task 2.5)
# Timeout in milliseconds for code-run executions (overrides TOOL_TIMEOUT_MS; default: 60s)
EXECUTION_TIMEOUT_MS=60000
# Execution environment label — reserved for future sandboxing support (default: local)
EXECUTION_ENVIRONMENT=local
# Execution sandboxing (Task 4.4)
# "none" runs code directly on the host (default); "docker" isolates execution in a container
SANDBOX_MODE=none
# Docker image used when SANDBOX_MODE=docker (must contain the required interpreter)
SANDBOX_DOCKER_IMAGE=node:20-alpine
# File management tools (Task 2.2)
# Root directory for all file operations; paths outside this are rejected (path traversal prevention)
WORKSPACE_ROOT=
# Instruction files (Task 5.1)
# Root directory for discovering instruction files (.instructions.md, AGENTS.md, etc.)
# Defaults to WORKSPACE_ROOT when not set.
INSTRUCTIONS_ROOT=
# Prompt templates (Task 5.2)
# Directory containing .md/.txt prompt template files loaded into the PromptRegistry on startup.
# Files may include YAML frontmatter with: name, description, variables, tags.
PROMPT_TEMPLATES_DIR=
# Prompt version history (Task 5.3)
# Path to a JSON file where prompt template version history is persisted across restarts.
# Leave empty to disable persistence.
PROMPT_HISTORY_FILE=
# Dynamic prompt context injection (Task 5.4)
# Milliseconds before runtime context (workspace, tools, instructions) is re-built.
# Set to 0 to rebuild context on every LLM call.
PROMPT_CONTEXT_REFRESH_MS=5000
# MCP client integration (Task 2.8)
# JSON array of MCP server configs to connect on startup.
# Each entry: { "name": "...", "transport": "stdio"|"sse", "command": "...", "args": [...], "url": "..." }
# Example: MCP_SERVERS=[{"name":"my-server","transport":"stdio","command":"npx","args":["my-mcp-server"]}]
MCP_SERVERS=
# Security hardening (Task 4.3)
# Maximum file size in bytes for read/write operations (default: 10 MB)
MAX_FILE_SIZE_BYTES=10485760
# Maximum combined stdout+stderr output size in bytes for shell commands (default: 1 MB)
MAX_SHELL_OUTPUT_BYTES=1048576
# Maximum number of tool executions allowed to run concurrently (0 = unlimited)
MAX_CONCURRENT_TOOLS=10
# Comma-separated allowlist of hostnames/IPs for network tool requests (empty = allow all)
NETWORK_ALLOWED_DOMAINS=
# Streaming response support (Task 4.2)
# Set to true to print LLM response tokens as they arrive in the CLI
STREAMING_ENABLED=false
# Observability & Tracing (Task 4.1)
# Set to true to write a JSON trace file per agent invocation
TRACING_ENABLED=false
# Directory where trace JSON files are written (created if missing)
TRACE_OUTPUT_DIR=./traces
# USD cost per input (prompt) token for cost estimation (e.g. 0.000002 = $2/million)
TRACING_COST_PER_INPUT_TOKEN_USD=0
# USD cost per output (completion) token for cost estimation
TRACING_COST_PER_OUTPUT_TOKEN_USD=0
# Logging configuration
LOG_LEVEL=info
LOG_ENABLED=true
# LOG_DESTINATION is used when LOG_FILE is not set: "stdout" (default) or "stderr"
LOG_DESTINATION=stdout
# When set, all log output is written to this file instead of stdout/stderr.
# Recommended for production and interactive CLI use to keep the terminal clean.
# LOG_FILE=./agentloop.log
LOG_NAME=agentloop
LOG_TIMESTAMP=true
# Web search provider
# Select the active search backend: duckduckgo (default) | tavily | langsearch | none
WEB_SEARCH_PROVIDER=duckduckgo
# Tavily search provider (https://tavily.com — requires free/paid account)
# Get your API key at https://app.tavily.com/home
TAVILY_API_KEY=
# Maximum number of results returned per query (default: 5)
TAVILY_MAX_RESULTS=5
# LangSearch provider (https://langsearch.com — requires account)
# Get your API key at https://langsearch.com/
LANGSEARCH_API_KEY=
# Maximum number of results returned per query (default: 5)
LANGSEARCH_MAX_RESULTS=5
# DuckDuckGo search tool
# Maximum number of results to return per query (default: 5)
DUCKDUCKGO_MAX_RESULTS=5
# Minimum delay between outbound DuckDuckGo requests in ms (default: 1000)
DUCKDUCKGO_MIN_DELAY_MS=1000
# Maximum retries for transient DuckDuckGo failures (default: 2)
DUCKDUCKGO_RETRY_MAX=2
# Base delay for exponential back-off between retries in ms (default: 400)
DUCKDUCKGO_RETRY_BASE_DELAY_MS=400
# Additional delay added when a rate-limit response is detected in ms (default: 1000)
DUCKDUCKGO_RATE_LIMIT_PENALTY_MS=1000
# In-memory DuckDuckGo cache TTL in ms (default: 300000 = 5 minutes; 0 disables cache)
DUCKDUCKGO_CACHE_TTL_MS=300000
# Maximum number of cached search queries in memory (default: 128; 0 disables cache)
DUCKDUCKGO_CACHE_MAX_ENTRIES=128
# Serve stale cached search results when upstream search fails (default: true)
DUCKDUCKGO_SERVE_STALE_ON_ERROR=true
# Web fetch tool (Task 2.9a)
# Additional domains to block (comma-separated, merged with built-in blocklist)
WEB_DOMAIN_BLOCKLIST=
# When non-empty, only listed domains are permitted (comma-separated)
WEB_DOMAIN_ALLOWLIST=
# Allow http:// URLs (insecure, off by default)
WEB_ALLOW_HTTP=false
# Max HTTP response body size before aborting (default: 5 MB)
WEB_MAX_RESPONSE_BYTES=5242880
# Max Markdown output length in characters (default: 20000)
WEB_MAX_CONTENT_CHARS=20000
# User-Agent header for fetch requests
WEB_USER_AGENT=AgentLoop/1.0
# Fetch timeout in milliseconds (default: 15s)
WEB_FETCH_TIMEOUT_MS=15000
# Runtime context injection
# When true, the system prompt includes current date/time, OS platform, and Node.js version.
# Set to false for fully deterministic/reproducible runs (e.g. snapshot tests).
RUNTIME_CONTEXT_ENABLED=true
# Interactive UI mode
# "cli" uses the existing readline interface; "tui" launches the Ink-based multi-pane terminal UI.
UI_MODE=cli
# Skills directory — auto-loads *.skill.md files on startup (Task 6.1)
SKILLS_DIR=
# LLM response recording and replay (Task 5.3)
# Set to true to record real LLM API responses as JSON fixture files
RECORD_LLM_RESPONSES=false
# Directory where recorded fixture files are stored and MockChatModel.fromFixture reads from
LLM_FIXTURE_DIR=tests/fixtures/llm-responses
# Agent profiles directory — auto-loads *.agent.json / *.agent.yaml files on startup (Task 7.1)
AGENT_PROFILES_DIR=
# Orchestrator engine
# "default" uses the existing agentic loop (agentExecutor).
# "langgraph" uses the LangGraphJS-based graphExecutor with blocks-plan, fork/join, and replanning.
ORCHESTRATOR=default