Skip to content

Commit 688bd2a

Browse files
SmartManojneubig
andauthored
Added local ollama models (OpenHands#2433)
* added local ollama models * add ollama_base_url config * Update listen.py * add docs * Update opendevin/server/listen.py Co-authored-by: Graham Neubig <[email protected]> * lint --------- Co-authored-by: Graham Neubig <[email protected]>
1 parent 6853cbb commit 688bd2a

3 files changed

Lines changed: 19 additions & 1 deletion

File tree

docs/modules/usage/llms/localLLMs.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,11 @@ But when running `docker run`, you'll need to add a few more arguments:
3535
--add-host host.docker.internal:host-gateway \
3636
-e LLM_API_KEY="ollama" \
3737
-e LLM_BASE_URL="http://host.docker.internal:11434" \
38+
-e LLM_OLLAMA_BASE_URL="http://host.docker.internal:11434" \
3839
```
3940

41+
LLM_OLLAMA_BASE_URL is optional. If you set it, it will be used to show the available installed models in the UI.
42+
4043
For example:
4144

4245
```bash

opendevin/core/config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ class LLMConfig(metaclass=Singleton):
4747
max_output_tokens: The maximum number of output tokens. This is sent to the LLM.
4848
input_cost_per_token: The cost per input token. This will available in logs for the user to check.
4949
output_cost_per_token: The cost per output token. This will available in logs for the user to check.
50+
ollama_base_url: The base URL for the OLLAMA API.
5051
"""
5152

5253
model: str = 'gpt-4o'
@@ -71,6 +72,7 @@ class LLMConfig(metaclass=Singleton):
7172
max_output_tokens: int | None = None
7273
input_cost_per_token: float | None = None
7374
output_cost_per_token: float | None = None
75+
ollama_base_url: str | None = None
7476

7577
def defaults_to_dict(self) -> dict:
7678
"""

opendevin/server/listen.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import uuid
44
import warnings
55

6+
import requests
67
from pathspec import PathSpec
78
from pathspec.patterns import GitWildMatchPattern
89

@@ -190,7 +191,7 @@ async def attach_session(request: Request, call_next):
190191
async def websocket_endpoint(websocket: WebSocket):
191192
"""
192193
WebSocket endpoint for receiving events from the client (i.e., the browser).
193-
Once connected, you can send various actions:
194+
Once connected, the client can send various actions:
194195
- Initialize the agent:
195196
session management, and event streaming.
196197
```json
@@ -311,6 +312,18 @@ async def get_litellm_models():
311312
)
312313
bedrock_model_list = bedrock.list_foundation_models()
313314
model_list = litellm_model_list_without_bedrock + bedrock_model_list
315+
ollama_base_url = config.llm.ollama_base_url
316+
if config.llm.model.startswith('ollama'):
317+
if not ollama_base_url:
318+
ollama_base_url = config.llm.base_url
319+
if ollama_base_url:
320+
ollama_url = ollama_base_url.strip('/') + '/api/tags'
321+
try:
322+
ollama_models_list = requests.get(ollama_url, timeout=3).json()['models']
323+
for model in ollama_models_list:
324+
model_list.append('ollama/' + model['name'])
325+
except requests.exceptions.RequestException as e:
326+
logger.error(f'Error getting OLLAMA models: {e}', exc_info=True)
314327

315328
return list(sorted(set(model_list)))
316329

0 commit comments

Comments
 (0)