forked from TransformerOptimus/SuperAGI
-
Notifications
You must be signed in to change notification settings - Fork 0
/
local-llm-gpu
107 lines (98 loc) · 2.65 KB
/
local-llm-gpu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
version: '3.8'
services:
backend:
volumes:
- "./:/app"
build: .
ports:
- "8001:8001"
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network
celery:
volumes:
- "./:/app"
build:
context: .
dockerfile: DockerfileCelery
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network
gui:
build: ./gui
ports:
- "3000:3000"
environment:
- NEXT_PUBLIC_API_BASE_URL=http://localhost:8001
networks:
- super_network
volumes:
- ./gui:/app
- /app/node_modules
- /app/.next
super__tgwui:
build:
context: .
target: llama-cublas
dockerfile: ./tgwui/DockerfileTGWUI
# args:
# - LCL_SRC_DIR=text-generation-webui # Developers - see Dockerfile app_base
container_name: super__tgwui
environment:
- EXTRA_LAUNCH_ARGS="--listen --no-mmap --verbose --extensions openai --auto-devices --n_ctx 1600 --gpu-memory 20 20 --n-gpu-layers 128 --threads 8 --model vicuna-13b-cot.ggmlv3.q8_0.bin"
ports:
- 7860:7860 # Default web port
- 5000:5000 # Default API port
- 5005:5005 # Default streaming port
- 5001:5001 # Default OpenAI API extension port
volumes:
- ./tgwui/config/loras:/app/loras
- ./tgwui/config/models:/app/models
- ./tgwui/config/presets:/app/presets
- ./tgwui/config/prompts:/app/prompts
- ./tgwui/config/softprompts:/app/softprompts
- ./tgwui/config/training:/app/training
- ./tgwui/config/embeddings:/app/embeddings
logging:
driver: json-file
options:
max-file: "3" # number of files or file count
max-size: '10m'
networks:
- super_network
### Uncomment the following lines to run the container using the host machine's GPU resources
deploy:
resources:
reservations:
devices:
- driver: nvidia
# count: "all"
device_ids: ['0', '1'] # must comment the above line if this line is uncommented.
capabilities: [gpu]
super__redis:
image: "docker.io/library/redis:latest"
networks:
- super_network
super__postgres:
image: "docker.io/library/postgres:latest"
environment:
- POSTGRES_USER=superagi
- POSTGRES_PASSWORD=password
- POSTGRES_DB=super_agi_main
volumes:
- superagi_postgres_data:/var/lib/postgresql/data/
networks:
- super_network
ports:
- "5432:5432"
networks:
super_network:
driver: bridge
volumes:
superagi_postgres_data: