-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathpyproject.toml
42 lines (38 loc) · 1.16 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
[tool.poetry]
name = "selfie"
version = "0.1.0"
description = "Data mixin for LLMs"
authors = ["Vana <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.9,<4.0" # >=3.9 for numpy, <4 for llama-index
beautifulsoup4 = "^4.12.3"
colorlog = "^6.8.2"
fastapi = "^0.109.0"
uvicorn = "^0.27.0"
humanize = "^4.9.0"
llama-cpp-python = ">=0.2.26, <0.2.56"
litellm = "1.16.0" # Newer versions than this break macOS builds: https://github.com/BerriAI/litellm/issues/2607
txtai = {version = "^7.0.0", extras = ["pipeline-llm"]}
sse-starlette = "^2.0.0"
llama-index = "^0.10.4"
numpy = "^1.26.4"
html2text = "^2020.1.16"
peewee = "^3.17.0"
pypdf = "^4.0.1"
python-multipart = "^0.0.9"
ngrok = "^1.0.0"
python-dotenv = "^1.0.1"
pillow = "^10.2.0"
pyqt6 = "^6.6.1"
swig = "^4.2.1"
torch = ">=2.0, <2.2.1" # 2.2.1 and 2.2.2 break macOS builds
# Experimental GPU Features
auto-gptq = { version = "^0.6.0", optional = true }
optimum = { version = "^1.16.2", optional = true }
autoawq = { version = "^0.1.8", optional = true }
[tool.poetry.extras]
gpu = ["auto-gptq", "optimum", "autoawq"]
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"