forked from SREGym/SREGym
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
47 lines (37 loc) · 1.44 KB
/
.env.example
File metadata and controls
47 lines (37 loc) · 1.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# Submission API
API_HOSTNAME="0.0.0.0"
API_PORT="8000"
# MCP Server Setting
MCP_SERVER_PORT=9954
EXPOSE_SERVER=False
# The size of session that can be hold at most; LRU session will be evicted if overflowing
SESSION_CACHE_SIZE=10000
# The time after which the session will be considered inactive if no interaction with the server;
# Cache of inactive session will be deleted in the next cache mutating operation
SESSION_TTL=600
# Timeout in seconds for waiting for pods to become ready during deployment.
# Increase this value if you have slow network connections (e.g., first deployment).
# Default: 600 (10 minutes)
# Recommended for slow networks: 1800 (30 minutes)
WAIT_FOR_POD_READY_TIMEOUT=600
# LangGraph Tool Setting
MCP_SERVER_URL=http://127.0.0.1:${MCP_SERVER_PORT}
# Agent LLM Config
# Maximum number of retries for API calls
LLM_QUERY_MAX_RETRIES=5
# Initial delay in seconds for retries
LLM_QUERY_INIT_RETRY_DELAY=1
# LLM Judge Model (for evaluating agent diagnoses)
# If not set, then defaults to gpt-4o
JUDGE_MODEL_ID="gpt-5"
### LLM API Keys ###
# GEMINI_API_KEY="AIHaveFreeFood_LotsOfIt"
# OPENAI_API_KEY="sk-proj-HaveSleep_LotsOfIt"
# ANTHROPIC_API_KEY="sk-ant-api03-HaveCats_LotsOfIt_Meow"
# AWS_PROFILE="default"
# AWS_DEFAULT_REGION=us-east-2
# WATSONX_API_BASE="https://us-south.ml.cloud.ibm.com"
# WX_PROJECT_ID="HaveIllinisLotsOfIt"
# WATSONX_API_KEY="HaveCornsLotsOfIt"
# AZURE_API_KEY="HaveBluesLotsOfIt"
# AZURE_API_BASE="HaveBasesLotsOfIt"