forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-bake.hcl
More file actions
76 lines (63 loc) · 1.69 KB
/
docker-bake.hcl
File metadata and controls
76 lines (63 loc) · 1.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# docker-bake.hcl - vLLM Docker build configuration
#
# This file lives in vLLM repo at docker/docker-bake.hcl
#
# Usage:
# cd docker && docker buildx bake # Build default target (openai)
# cd docker && docker buildx bake test # Build test target
# docker buildx bake --print # Show resolved config
#
# Reference: https://docs.docker.com/build/bake/reference/
# Build configuration
variable "MAX_JOBS" {
default = 16
}
variable "NVCC_THREADS" {
default = 8
}
variable "TORCH_CUDA_ARCH_LIST" {
default = "8.0 8.9 9.0 10.0"
}
variable "COMMIT" {
default = ""
}
# Groups
group "default" {
targets = ["openai"]
}
# Base targets
target "_common" {
dockerfile = "docker/Dockerfile"
context = "."
args = {
max_jobs = MAX_JOBS
nvcc_threads = NVCC_THREADS
torch_cuda_arch_list = TORCH_CUDA_ARCH_LIST
}
}
target "_labels" {
labels = {
"org.opencontainers.image.source" = "https://github.com/vllm-project/vllm"
"org.opencontainers.image.vendor" = "vLLM"
"org.opencontainers.image.title" = "vLLM"
"org.opencontainers.image.description" = "vLLM: A high-throughput and memory-efficient inference and serving engine for LLMs"
"org.opencontainers.image.licenses" = "Apache-2.0"
"org.opencontainers.image.revision" = COMMIT
}
annotations = [
"index,manifest:org.opencontainers.image.revision=${COMMIT}",
]
}
# Build targets
target "test" {
inherits = ["_common", "_labels"]
target = "test"
tags = ["vllm:test"]
output = ["type=docker"]
}
target "openai" {
inherits = ["_common", "_labels"]
target = "vllm-openai"
tags = ["vllm:openai"]
output = ["type=docker"]
}