Skip to content

Commit b1b96df

Browse files
yimothysurbren
andauthored
Replace environment variables with configuration file (OpenHands#339)
* Replace environment variables with configuration file * Add config.toml to .gitignore * Remove unused os imports * Update README.md * Update README.md * Update README.md * Fix merge conflict * Fallback to environment variables * Use template file for config.toml * Update config.toml.template * Update config.toml.template --------- Co-authored-by: Robert Brennan <[email protected]>
1 parent b443c0a commit b1b96df

11 files changed

Lines changed: 95 additions & 40 deletions

File tree

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,3 +192,6 @@ yarn-error.log*
192192
.envrc
193193
/workspace
194194
/debug
195+
196+
# configuration
197+
config.toml

README.md

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,51 +26,58 @@ First, make sure Docker is running:
2626
```bash
2727
docker ps # this should exit successfully
2828
```
29+
2930
Then pull our latest image [here](https://github.com/opendevin/OpenDevin/pkgs/container/sandbox)
3031
```bash
3132
docker pull ghcr.io/opendevin/sandbox:v0.1
3233
```
34+
35+
Then copy `config.toml.template` to `config.toml`. Add an API key to `config.toml`.
36+
(See below for how to use different models.)
37+
```toml
38+
OPENAI_API_KEY="..."
39+
WORKSPACE_DIR="..."
40+
```
41+
42+
Next, start the backend.
3343
We manage python packages and the virtual environment with `pipenv`.
34-
Make sure python >= 3.10.
44+
Make sure you have python >= 3.10.
3545
```bash
3646
python -m pip install pipenv
3747
pipenv install -v
3848
pipenv shell
3949

40-
export OPENAI_API_KEY="..."
41-
export WORKSPACE_DIR="/path/to/your/project"
4250
python -m pip install -r requirements.txt
4351
uvicorn opendevin.server.listen:app --port 3000
4452
```
4553

46-
Then in a second terminal:
54+
Then, in a second terminal, start the frontend:
4755
```bash
4856
cd frontend
4957
npm install
5058
npm start
5159
```
52-
The virtual environment is now activated and you should see `(OpenDevin)` in front of your cmdline prompt.
5360

5461
### Picking a Model
5562
We use LiteLLM, so you can run OpenDevin with any foundation model, including OpenAI, Claude, and Gemini.
5663
LiteLLM has a [full list of providers](https://docs.litellm.ai/docs/providers).
5764

58-
To change the model, set the `LLM_MODEL` and `LLM_API_KEY` environment variables.
65+
To change the model, set the `LLM_MODEL` and `LLM_API_KEY` in `config.toml`.
5966

6067
For example, to run Claude:
61-
```bash
62-
export LLM_API_KEY="your-api-key"
63-
export LLM_MODEL="claude-3-opus-20240229"
68+
```toml
69+
LLM_API_KEY="your-api-key"
70+
LLM_MODEL="claude-3-opus-20240229"
6471
```
6572

6673
You can also set the base URL for local/custom models:
67-
```bash
68-
export LLM_BASE_URL="https://localhost:3000"
74+
```toml
75+
LLM_BASE_URL="https://localhost:3000"
6976
```
7077

7178
And you can customize which embeddings are used for the vector database storage:
72-
```bash
73-
export LLM_EMBEDDING_MODEL="llama2" # can be "llama2", "openai", "azureopenai", or "local"
79+
```toml
80+
LLM_EMBEDDING_MODEL="llama2" # can be "llama2", "openai", "azureopenai", or "local"
7481
```
7582

7683
### Running the app

agenthub/langchains_agent/utils/memory.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,36 @@
1-
import os
2-
31
import chromadb
42
from llama_index.core import Document
53
from llama_index.core.retrievers import VectorIndexRetriever
64
from llama_index.core import VectorStoreIndex
75
from llama_index.vector_stores.chroma import ChromaVectorStore
86

7+
from opendevin import config
98
from . import json
109

11-
embedding_strategy = os.getenv("LLM_EMBEDDING_MODEL", "local")
10+
embedding_strategy = config.get_or_default("LLM_EMBEDDING_MODEL", "local")
1211

1312
# TODO: More embeddings: https://docs.llamaindex.ai/en/stable/examples/embeddings/OpenAI/
1413
# There's probably a more programmatic way to do this.
1514
if embedding_strategy == "llama2":
1615
from llama_index.embeddings.ollama import OllamaEmbedding
1716
embed_model = OllamaEmbedding(
1817
model_name="llama2",
19-
base_url=os.getenv("LLM_BASE_URL", "http://localhost:8000"),
18+
base_url=config.get_or_default("LLM_BASE_URL", "http://localhost:8000"),
2019
ollama_additional_kwargs={"mirostat": 0},
2120
)
2221
elif embedding_strategy == "openai":
2322
from llama_index.embeddings.openai import OpenAIEmbedding
2423
embed_model = OpenAIEmbedding(
25-
base_url=os.getenv("LLM_BASE_URL"),
24+
base_url=config.get_or_error("LLM_BASE_URL"),
2625
)
2726
elif embedding_strategy == "azureopenai":
2827
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding # Need to instruct to set these env variables in documentation
2928
embed_model = AzureOpenAIEmbedding(
3029
model="text-embedding-ada-002",
31-
deployment_name=os.getenv("LLM_DEPLOYMENT_NAME"),
32-
api_key=os.getenv("LLM_API_KEY"),
33-
azure_endpoint=os.getenv("LLM_BASE_URL"),
34-
api_version=os.getenv("LLM_API_VERSION"),
30+
deployment_name=config.get_or_error("LLM_DEPLOYMENT_NAME"),
31+
api_key=config.get_or_error("LLM_API_KEY"),
32+
azure_endpoint=config.get_or_error("LLM_BASE_URL"),
33+
api_version=config.get_or_error("LLM_API_VERSION"),
3534
)
3635
else:
3736
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

agenthub/langchains_agent/utils/prompts.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
import os
2-
31
from typing import List, Dict, Type
42

53
from langchain_core.pydantic_v1 import BaseModel
64
from langchain.prompts import PromptTemplate
75
from langchain_core.output_parsers import JsonOutputParser
86

9-
if os.getenv("DEBUG"):
7+
from opendevin import config
8+
9+
if config.get_or_default("DEBUG", False):
1010
from langchain.globals import set_debug
1111
set_debug(True)
1212

config.toml.template

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# This is a template. Run `cp config.toml.template config.toml` to use it.
2+
3+
OPENAI_API_KEY="<YOUR OPENAI API KEY>"
4+
WORKSPACE_DIR="./workspace"

evaluation/regression/run_tests.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
1-
import os
21
import sys
32
import pytest
43

4+
from opendevin import config
5+
56
if __name__ == '__main__':
67
args = ['-v', 'evaluation/regression/cases']
78
for arg in sys.argv[1:]:
89
if arg.startswith('--OPENAI_API_KEY='):
9-
os.environ['OPENAI_API_KEY'] = arg.split('=')[1]
10+
config.config['OPENAI_API_KEY'] = arg.split('=')[1]
1011
elif arg.startswith('--model='):
1112
args.append(f'-o model={arg.split('=')[1]}')
1213
pytest.main(args)

opendevin/config.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import os
2+
import tomllib
3+
4+
from dotenv import load_dotenv
5+
6+
load_dotenv()
7+
8+
with open("config.toml", "rb") as f:
9+
config = tomllib.load(f)
10+
11+
def _get(key: str, default):
12+
value = config.get(key, default)
13+
if not value:
14+
value = os.environ.get(key, default)
15+
return value
16+
17+
def get_or_error(key: str):
18+
"""
19+
Get a key from the config, or raise an error if it doesn't exist.
20+
"""
21+
value = get_or_none(key)
22+
if not value:
23+
raise KeyError(f"Please set '{key}' in `config.toml` or `.env`.")
24+
return value
25+
26+
def get_or_default(key: str, default):
27+
"""
28+
Get a key from the config, or return a default value if it doesn't exist.
29+
"""
30+
return _get(key, default)
31+
32+
def get_or_none(key: str):
33+
"""
34+
Get a key from the config, or return None if it doesn't exist.
35+
"""
36+
return _get(key, None)

opendevin/llm/llm.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,12 @@
44
from litellm import completion as litellm_completion
55
from functools import partial
66

7-
DEFAULT_MODEL = os.getenv("LLM_MODEL", "gpt-4-0125-preview")
8-
DEFAULT_API_KEY = os.getenv("LLM_API_KEY")
9-
DEFAULT_BASE_URL = os.getenv("LLM_BASE_URL")
10-
PROMPT_DEBUG_DIR = os.getenv("PROMPT_DEBUG_DIR", "")
7+
from opendevin import config
8+
9+
DEFAULT_MODEL = config.get_or_default("LLM_MODEL", "gpt-4-0125-preview")
10+
DEFAULT_API_KEY = config.get_or_none("LLM_API_KEY")
11+
DEFAULT_BASE_URL = config.get_or_none("LLM_BASE_URL")
12+
PROMPT_DEBUG_DIR = config.get_or_default("PROMPT_DEBUG_DIR", "")
1113

1214
class LLM:
1315
def __init__(self, model=DEFAULT_MODEL, api_key=DEFAULT_API_KEY, base_url=DEFAULT_BASE_URL, debug_dir=PROMPT_DEBUG_DIR):

opendevin/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
import os
21
import asyncio
32
import argparse
43

54
from typing import Type
65

76
import agenthub # noqa F401 (we import this to get the agents registered)
7+
from opendevin import config
88
from opendevin.agent import Agent
99
from opendevin.controller import AgentController
1010
from opendevin.llm.llm import LLM
@@ -35,7 +35,7 @@
3535
parser.add_argument(
3636
"-m",
3737
"--model-name",
38-
default=os.getenv("LLM_MODEL") or "gpt-4-0125-preview",
38+
default=config.get_or_default("LLM_MODEL", "gpt-4-0125-preview"),
3939
type=str,
4040
help="The (litellm) model name to use",
4141
)

opendevin/sandbox/sandbox.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,19 +10,21 @@
1010
import docker
1111
import concurrent.futures
1212

13+
from opendevin import config
14+
1315
InputType = namedtuple("InputType", ["content"])
1416
OutputType = namedtuple("OutputType", ["content"])
1517

16-
DIRECTORY_REWRITE = os.getenv(
18+
DIRECTORY_REWRITE = config.get_or_default(
1719
"DIRECTORY_REWRITE", ""
1820
) # helpful for docker-in-docker scenarios
19-
CONTAINER_IMAGE = os.getenv("SANDBOX_CONTAINER_IMAGE", "ghcr.io/opendevin/sandbox:v0.1")
21+
CONTAINER_IMAGE = config.get_or_default("SANDBOX_CONTAINER_IMAGE", "ghcr.io/opendevin/sandbox:v0.1")
2022
# FIXME: On some containers, the devin user doesn't have enough permission, e.g. to install packages
2123
# How do we make this more flexible?
22-
RUN_AS_DEVIN = os.getenv("RUN_AS_DEVIN", "true").lower() != "false"
24+
RUN_AS_DEVIN = config.get_or_default("RUN_AS_DEVIN", "true").lower() != "false"
2325
USER_ID = 1000
24-
if os.getenv("SANDBOX_USER_ID") is not None:
25-
USER_ID = int(os.getenv("SANDBOX_USER_ID", ""))
26+
if config.get_or_none("SANDBOX_USER_ID") is not None:
27+
USER_ID = int(config.get_or_default("SANDBOX_USER_ID", ""))
2628
elif hasattr(os, "getuid"):
2729
USER_ID = os.getuid()
2830

0 commit comments

Comments
 (0)