-
Notifications
You must be signed in to change notification settings - Fork 18
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
131 lines (124 loc) · 2.85 KB
/
docker-compose.yaml
File metadata and controls
131 lines (124 loc) · 2.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
---
version: "3.8"
services:
postgres:
image: ankane/pgvector:v0.5.0 #postgres:15-alpine
container_name: postgres
hostname: postgres
volumes:
- chatfaq_db_data:/var/lib/postgresql/data/
environment:
- PGDATA=/var/lib/postgresql/data/pgdata/
- POSTGRES_USER=chatfaq
- POSTGRES_PASSWORD=chatfaq
- POSTGRES_DB=chatfaq
networks:
- chatfaq_db_net
redis:
image: redis:6.2-alpine
hostname: redis
# ports:
# - '6379:6379'
networks:
- chatfaq_db_net
# command: redis-server --save 20 1 --loglevel warning --requirepass eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
# volumes:
# - cache:/data
back:
build:
context: back
dockerfile: Dockerfile
image: chatfaq_back
container_name: chatfaq_back
hostname: back
depends_on:
- postgres
- redis
- ray
ports:
- 8265:8265
- 8000:8000
networks:
- default
- chatfaq_db_net
- chatfaq_back_net
- chatfaq_state_machine_net
# volumes:
# - ./back/back:/back
ray:
build:
context: back
dockerfile: Dockerfile
args:
- INSTALL_CHAT_RAG=true
container_name: chatfaq_ray
command: ["/.venv/bin/ray", "start", "--address=back:6375", "--resources={\"tasks\": 100, \"ai_components\": 100}", "--block"]
networks:
- default
- chatfaq_db_net
- chatfaq_back_net
- chatfaq_state_machine_net
environment:
- CUDA_VISIBLE_DEVICES=0
deploy:
mode: replicated
replicas: 1
resources:
limits:
cpus: '4.0'
memory: 4G
# extra_hosts:
# - host.docker.internal:host-gateway
# vllm:
# image: vllm/vllm-openai:latest
# container_name: chatfaq_vllm
# command: ["python", "-m", "vllm.entrypoints.openai.api_server", "--model", "Qwen/Qwen1.5-0.5B-Chat", "--port", "5000", "--max-model-len", "8192", "--swap-space", "1"]
# ports:
# - 5000:5000
# networks:
# - default
# - chatfaq_back_net
state-machine:
build:
context: sdk
dockerfile: Dockerfile
image: chatfaq_fsm
container_name: chatfaq_fsm
hostname: fsm
depends_on:
- back
networks:
- chatfaq_state_machine_net
volumes:
- ./sdk/chatfaq_sdk:/chatfaq_sdk
- ./sdk/examples:/examples
widget:
build:
context: widget
dockerfile: Dockerfile
image: chatfaq_widget
container_name: chatfaq_widget
depends_on:
- back
ports:
- 3003:3000
networks:
- default
admin:
build:
context: admin
dockerfile: Dockerfile
image: chatfaq_admin
container_name: chatfaq_admin
depends_on:
- back
ports:
- 3000:3000
networks:
- default
volumes:
chatfaq_db_data:
networks:
chatfaq_db_net:
chatfaq_back_net:
chatfaq_state_machine_net: