-
Notifications
You must be signed in to change notification settings - Fork 29
Expand file tree
/
Copy path.env.example
More file actions
57 lines (48 loc) · 2.42 KB
/
.env.example
File metadata and controls
57 lines (48 loc) · 2.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# Memoria Configuration Template
# Copy this file to .env and fill in your actual values
# Docker user (run containers as current user to avoid permission issues)
# Set to your uid:gid — run: id -u && id -g
DOCKER_UID=1000
DOCKER_GID=1000
# Master key for API authentication (generate with: openssl rand -hex 32)
MEMORIA_MASTER_KEY=your-master-key-here
# Embedding provider configuration
# Option 1: Local embedding (no API key needed, ~900MB download on first use)
# Note: the default Docker image does NOT include `local-embedding` support.
# For Docker, use an HTTP embedding provider or rebuild Memoria with that feature enabled.
# MEMORIA_EMBEDDING_PROVIDER=local
# MEMORIA_EMBEDDING_MODEL=all-MiniLM-L6-v2
# MEMORIA_EMBEDDING_DIM=384
#
# Hugging Face Settings (for Local Provider):
# HF_ENDPOINT=https://hf-mirror.com # Use mirror if official site is slow/blocked
# HF_HUB_OFFLINE=1 # Set to 1 ONLY AFTER model is cached locally
# TRANSFORMERS_OFFLINE=1 # Set to 1 ONLY AFTER model is cached locally
# Option 2: OpenAI official
# MEMORIA_EMBEDDING_PROVIDER=openai
# MEMORIA_EMBEDDING_MODEL=text-embedding-3-small
# MEMORIA_EMBEDDING_DIM=1536
# MEMORIA_EMBEDDING_API_KEY=sk-your-openai-key-here
# Option 3: OpenAI-compatible service (SiliconFlow, Ollama, etc.)
MEMORIA_EMBEDDING_PROVIDER=openai
MEMORIA_EMBEDDING_MODEL=BAAI/bge-m3
MEMORIA_EMBEDDING_DIM=1024
MEMORIA_EMBEDDING_API_KEY=your-embedding-api-key-here
MEMORIA_EMBEDDING_BASE_URL=https://api.siliconflow.cn/v1
# Option 4: Multi-endpoint round-robin (same model, multiple API keys/URLs)
# When set, MEMORIA_EMBEDDING_ENDPOINTS supersedes BASE_URL/API_KEY above.
# All endpoints must serve the same model. Requests are distributed in round-robin;
# if one endpoint is rate-limited (429) or fails, the next is tried automatically.
# MEMORIA_EMBEDDING_ENDPOINTS=[{"url":"https://api1.example.com/v1","api_key":"sk-key1"},{"url":"https://api2.example.com/v1","api_key":"sk-key2"}]
# MatrixOne database configuration
MATRIXONE_DATA_DIR=./data/matrixone
MATRIXONE_DEBUG_PORT=6060
# Rate limiting (requests per minute)
MEMORIA_RATE_LIMIT_AUTH_KEYS=1000,60
MEMORIA_RATE_LIMIT_CONSOLIDATE=100,60
MEMORIA_RATE_LIMIT_REFLECT=100,60
MEMORIA_RATE_LIMIT_EXTRACT_ENTITIES=100,60
# Optional: Internal LLM for reflection/entity extraction
# MEMORIA_LLM_API_KEY=your-llm-api-key-here
# MEMORIA_LLM_BASE_URL=https://api.magikcloud.cn/v1
# MEMORIA_LLM_MODEL=ep-deepseek-v3-2-104138