-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.py
More file actions
98 lines (74 loc) · 2.64 KB
/
config.py
File metadata and controls
98 lines (74 loc) · 2.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""
Centralized project configuration.
Allows switching LLM provider, configuring cache and vector DB.
"""
import os
from enum import Enum
from pydantic_settings import BaseSettings
from pydantic import Field
class LLMProvider(str, Enum):
ANTHROPIC = "anthropic"
OPENAI = "openai"
OLLAMA = "ollama"
class Settings(BaseSettings):
"""Application settings via environment variables."""
# LLM
llm_provider: LLMProvider = Field(default=LLMProvider.ANTHROPIC)
anthropic_api_key: str = Field(default="")
openai_api_key: str = Field(default="")
ollama_base_url: str = Field(default="http://localhost:11434")
# Redis
redis_url: str = Field(default="redis://localhost:6379")
cache_ttl_seconds: int = Field(default=300) # 5 minutes
# Qdrant
qdrant_url: str = Field(default="http://localhost:6333")
qdrant_collection: str = Field(default="incidents")
# Embedding
embedding_model: str = Field(default="all-MiniLM-L6-v2")
# Application
reports_dir: str = Field(default="./reports")
demo_mode: bool = Field(default=False)
verbose: bool = Field(default=True)
# Pre-analysis
similarity_threshold: float = Field(default=0.85)
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
# Global instance
settings = Settings()
def get_llm():
"""
Returns LLM instance based on configuration.
Supports Anthropic, OpenAI and Ollama.
"""
if settings.demo_mode:
return None
if settings.llm_provider == LLMProvider.ANTHROPIC:
from langchain_anthropic import ChatAnthropic
return ChatAnthropic(
model="claude-sonnet-4-20250514",
api_key=settings.anthropic_api_key,
max_tokens=2048
)
elif settings.llm_provider == LLMProvider.OPENAI:
from langchain_openai import ChatOpenAI
return ChatOpenAI(
model="gpt-4o",
api_key=settings.openai_api_key,
max_tokens=2048
)
elif settings.llm_provider == LLMProvider.OLLAMA:
from langchain_community.chat_models import ChatOllama
return ChatOllama(
model="llama3",
base_url=settings.ollama_base_url
)
raise ValueError(f"Unsupported provider: {settings.llm_provider}")
def get_embeddings():
"""Returns embedding model for vector database."""
from langchain_community.embeddings import HuggingFaceEmbeddings
return HuggingFaceEmbeddings(
model_name=settings.embedding_model,
model_kwargs={'device': 'cpu'},
encode_kwargs={'normalize_embeddings': True}
)