forked from project-zot/zot
-
Notifications
You must be signed in to change notification settings - Fork 0
190 lines (173 loc) · 7.28 KB
/
cluster.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
name: "Clustering test"
on:
push:
branches:
- main
pull_request:
branches: [main]
release:
types:
- published
jobs:
client-tools:
name: Stateless zot with shared reliable storage
runs-on: ubuntu-latest
# services:
# minio:
# image: minio/minio:edge-cicd
# env:
# MINIO_ROOT_USER: minioadmin
# MINIO_ROOT_PASSWORD: minioadmin
# ports:
# - 9000:9000
# volumes:
# - /tmp/data:/data
# options: --name=minio --health-cmd "curl http://localhost:9000/minio/health/live"
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Install dependencies
run: |
cd $GITHUB_WORKSPACE
go get -u github.com/swaggo/swag/cmd/swag
go mod download
sudo apt-get update
sudo apt-get -y install rpm uidmap
# install skopeo
. /etc/os-release
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install skopeo
# install notation
curl -Lo notation.tar.gz https://github.com/notaryproject/notation/releases/download/v0.7.1-alpha.1/notation_0.7.1-alpha.1_linux_amd64.tar.gz
sudo tar xvzf notation.tar.gz -C /usr/bin notation
# install oras
curl -LO https://github.com/oras-project/oras/releases/download/v0.12.0/oras_0.12.0_linux_amd64.tar.gz
mkdir -p oras-install/
tar -zxf oras_0.12.0_*.tar.gz -C oras-install/
sudo mv oras-install/oras /usr/bin/
rm -rf oras_0.12.0_*.tar.gz oras-install/
# install haproxy
sudo apt-get install haproxy
- name: Setup minio service
run: |
docker run -d -p 9000:9000 --name minio \
-e "MINIO_ACCESS_KEY=minioadmin" \
-e "MINIO_SECRET_KEY=minioadmin" \
-v /tmp/data:/data \
-v /tmp/config:/root/.minio \
--health-cmd "curl http://localhost:9000/minio/health/live" \
minio/minio:edge-cicd server /data
- name: Install py minio
run: pip3 install minio
- name: Wait for minio to come up
run: |
curl --connect-timeout 5 \
--max-time 10 \
--retry 12 \
--retry-max-time 120 \
'http://localhost:9000/minio/health/live'
- name: Create minio bucket
run: |
python3 - <<'EOF'
from minio import Minio
try:
minio = Minio(
'localhost:9000',
access_key='minioadmin',
secret_key='minioadmin',
secure=False
)
except Exception as ex:
raise
minio.make_bucket('zot-storage')
print(f'{minio.list_buckets()}')
EOF
- name: Run haproxy
run: |
sudo haproxy -d -f examples/cluster/haproxy.cfg -D
sleep 10
- name: Prepare configuration files
run: |
cp test/cluster/config-minio.json test/cluster/config-minio1.json
sed -i 's/8081/8082/g' test/cluster/config-minio.json
cp test/cluster/config-minio.json test/cluster/config-minio2.json
sed -i 's/8082/8083/g' test/cluster/config-minio.json
cp test/cluster/config-minio.json test/cluster/config-minio3.json
- name: Run push-pull tests
run: |
make binary
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
sleep 10
# run tests
skopeo --debug copy --format=oci --dest-tls-verify=false docker://ghcr.io/project-zot/golang:1.18 docker://localhost:8080/golang:1.18
skopeo --debug copy --src-tls-verify=false docker://localhost:8080/golang:1.18 oci:golang:1.18
echo "{\"name\":\"foo\",\"value\":\"bar\"}" > config.json
echo "hello world" > artifact.txt
oras push localhost:8080/hello-artifact:v2 \
--manifest-config config.json:application/vnd.acme.rocket.config.v1+json \
artifact.txt:text/plain -d -v
rm -f artifact.txt # first delete the file
oras pull localhost:8080/hello-artifact:v2 -d -v -a
grep -q "hello world" artifact.txt # should print "hello world"
if [ $? -ne 0 ]; then \
killall -r zot-*; \
exit 1; \
fi
killall -r zot-*
env:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Run benchmark with --src-cidr arg
run: |
make bench
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
sleep 10
# run zb with --src-cidr
bin/zb-linux-amd64 -c 10 -n 50 -o ci-cd --src-cidr 127.0.0.0/8 http://localhost:8080
killall -r zot-*
# clean zot storage
sudo rm -rf /tmp/data/zot-storage/zot
env:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Run benchmark with --src-ips arg
run: |
make bench
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
sleep 10
# run zb with --src-ips
bin/zb-linux-amd64 -c 10 -n 50 -o ci-cd --src-ips 127.0.0.2,127.0.0.3,127.0.0.4,127.0.0.5,127.0.0.6,127.0.12.5,127.0.12.6 http://localhost:8080
killall -r zot-*
env:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
# Download previous benchmark result from cache (if exists)
- name: Download previous benchmark data
uses: actions/cache@v1
with:
path: ./cache
key: ${{ runner.os }}-benchmark-stateless-cluster
# Run `github-action-benchmark` action
- name: Store benchmark result
uses: benchmark-action/[email protected]
with:
# What benchmark tool the output.txt came from
tool: 'customBiggerIsBetter'
# Where the output from the benchmark tool is stored
output-file-path: ci-cd.json
# Where the previous data file is stored
external-data-json-path: ./cache/benchmark-data.json
# Workflow will fail when an alert happens
fail-on-alert: true
# Upload the updated cache file for the next job by actions/cache