-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathrequirements.txt
More file actions
117 lines (117 loc) · 2.45 KB
/
requirements.txt
File metadata and controls
117 lines (117 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
absl-py==2.1.0
accelerate==0.27.2
arrow==1.3.0
bert-score==0.3.13
bitsandbytes==0.43.0
datasets==2.18.0
deepspeed==0.13.1
einops==0.7.0
evaluate==0.4.2
fairscale==0.4.13
fastapi==0.110.0
fastjsonschema==2.19.1
fire==0.5.0
flash-attn==2.5.9.post1
gradio==3.50.2
gradio_client==0.6.1
httpcore==1.0.5
httplib2==0.22.0
httptools==0.6.1
httpx==0.27.0
huggingface-hub==0.23.2
ipython
ipywidgets==8.1.2
json5==0.9.25
jsonpatch==1.33
jsonpointer==2.4
jsonschema==4.21.1
langchain==0.1.13
langchain-community==0.0.34
langchain-core==0.1.45
langchain-text-splitters==0.0.1
langdetect==1.0.9
langsmith==0.1.31
llama-index==0.10.31
llama-index-agent-openai==0.2.3
llama-index-cli==0.1.12
llama-index-core==0.10.31
llama-index-embeddings-langchain==0.1.2
llama-index-embeddings-openai==0.1.8
llama-index-indices-managed-llama-cloud==0.1.5
llama-index-legacy==0.9.48
llama-index-llms-huggingface
llama-index-llms-openai==0.1.16
llama-index-multi-modal-llms-openai==0.1.5
llama-index-program-openai==0.1.6
llama-index-question-gen-openai==0.1.3
llama-index-readers-file==0.1.19
llama-index-readers-llama-parse==0.1.4
llama-parse==0.4.1
llamaindex-py-client==0.1.18
llvmlite==0.42.0
lm-format-enforcer==0.10.1
looseversion==1.3.0
loralib==0.1.2
lxml==4.9.4
matplotlib==3.8.3
mistune==3.0.2
mpi==1.0.0
multidict==6.0.5
multiprocess==0.70.16
ninja==1.11.1.1
numba==0.59.1
numpy==1.26.4
nvidia-cublas-cu12==12.1.3.1
nvidia-cuda-cupti-cu12==12.1.105
nvidia-cuda-nvrtc-cu12==12.1.105
nvidia-cuda-runtime-cu12==12.1.105
nvidia-cudnn-cu12==8.9.2.26
nvidia-cufft-cu12==11.0.2.54
nvidia-curand-cu12==10.3.2.106
nvidia-cusolver-cu12==11.4.5.107
nvidia-cusparse-cu12==12.1.0.106
nvidia-ml-py==12.550.52
nvidia-nccl-cu12==2.20.5
nvidia-nvjitlink-cu12==12.4.99
nvidia-nvtx-cu12==12.1.105
pandas==2.2.1
peft==0.9.0
pillow==10.2.0
protobuf==4.25.3
pyarrow==15.0.1
PyMuPDF==1.23.26
pyparsing==3.1.2
pypdf==4.2.0
PyPDF2
pyramid==2.0.2
python-json-logger==2.0.7
python-multipart==0.0.9
python3-openid==3.2.0
PyYAML==6.0.1
qtconsole==5.5.1
QtPy==2.4.1
ray==2.23.0
regex==2023.12.25
requests==2.31.0
requests-oauthlib==2.0.0
rouge-score==0.1.2
safetensors==0.4.2
scikit-learn==1.4.2
scipy==1.12.0
sentence-transformers==2.7.0
sentencepiece==0.2.0
six
tiktoken==0.6.0
tokenizers==0.19.1
torch==2.3.0
tornado
tqdm
transformers==4.41.2
triton==2.3.0
trl==0.7.11
urllib3==2.2.1
vllm==0.5.0
# git+https://github.com/vllm-project/vllm.git@5d7e3d0176e0dbcf144c64b7d14d996c55e36c50#egg=vllm
wandb==0.17.0
websockets==11.0.3
xformers==0.0.26.post1