generated from ivangabriele/template-base
-
Notifications
You must be signed in to change notification settings - Fork 2
/
pyproject.toml
34 lines (31 loc) · 1.26 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
[tool.poetry]
name = "docker-llm"
version = "0.1.0"
description = "OpenAI API-Compatible Pre-loaded LLM Server."
authors = ["Ivan Gabriele"]
license = "AGPL-3.0-or-later"
readme = "README.md"
[tool.poetry.dependencies]
# https://github.com/pytorch/pytorch/issues/100974#issuecomment-1541856571
nvidia-cublas-cu11 = { version = "11.10.3.66", platform = 'linux' }
nvidia-cuda-cupti-cu11 = { version = "11.7.101", platform = 'linux' }
nvidia-cuda-nvrtc-cu11 = { version = "11.7.99", platform = 'linux' }
nvidia-cuda-runtime-cu11 = { version = "11.7.99", platform = 'linux' }
nvidia-cudnn-cu11 = { version = "8.5.0.96", platform = 'linux' }
nvidia-cufft-cu11 = { version = "10.9.0.58", platform = 'linux' }
nvidia-curand-cu11 = { version = "10.2.10.91", platform = 'linux' }
nvidia-cusolver-cu11 = { version = "11.4.0.1", platform = 'linux' }
nvidia-cusparse-cu11 = { version = "11.7.4.91", platform = 'linux' }
nvidia-nccl-cu11 = { version = "2.14.3", platform = 'linux' }
nvidia-nvtx-cu11 = { version = "11.7.91", platform = 'linux' }
python = "^3.11.0"
python-dotenv = "^1.0.0"
torch = "2.0.1"
vllm = "0.2.0"
accelerate = "0.24.0"
fschat = "0.2.31"
[tool.poetry.group.dev.dependencies]
openai = "0.28.1"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"