-
Notifications
You must be signed in to change notification settings - Fork 3
186 lines (179 loc) · 7.44 KB
/
e2e.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
name: E2E Tests
on:
pull_request:
permissions:
id-token: write
contents: read
jobs:
build-provider-e2e-images:
name: Build Provider E2E Images
runs-on : [self-hosted, linux, X64, jammy, large]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
# We run into rate limiting issues if we don't authenticate
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Check out repo
uses: actions/checkout@v4
- name: Install requirements
run: |
sudo apt update
sudo apt install -y make docker-buildx
sudo snap install go --classic --channel=1.22/stable
sudo snap install kubectl --classic --channel=1.30/stable
- name: Build provider images
#run: sudo make docker-build-e2e
run: |
docker pull ghcr.io/canonical/cluster-api-k8s/bootstrap-controller:ci-test
docker tag ghcr.io/canonical/cluster-api-k8s/bootstrap-controller:ci-test ghcr.io/canonical/cluster-api-k8s/bootstrap-controller:dev
docker pull ghcr.io/canonical/cluster-api-k8s/controlplane-controller:ci-test
docker tag ghcr.io/canonical/cluster-api-k8s/controlplane-controller:ci-test ghcr.io/canonical/cluster-api-k8s/controlplane-controller:dev
- name: Save provider image
run: |
sudo docker save -o provider-images.tar ghcr.io/canonical/cluster-api-k8s/controlplane-controller:dev ghcr.io/canonical/cluster-api-k8s/bootstrap-controller:dev
sudo chmod 775 provider-images.tar
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: e2e-images
path: |
provider-images.tar
build-k8s-snap-e2e-images:
name: Build K8s Snap E2E Images
if: false
runs-on: [self-hosted, linux, X64, jammy, large]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
# We run into rate limiting issues if we don't authenticate
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Check out repo
uses: actions/checkout@v4
- name: Install requirements
run: |
sudo apt update
sudo apt install -y make docker-buildx
sudo snap install go --classic --channel=1.22/stable
sudo snap install kubectl --classic --channel=1.30/stable
- name: Build k8s-snap images
working-directory: hack/
run: |
./build-e2e-images.sh
- name: Save k8s-snap image
run: |
sudo docker save -o k8s-snap-image-old.tar k8s-snap:dev-old
sudo docker save -o k8s-snap-image-new.tar k8s-snap:dev-new
sudo chmod 775 k8s-snap-image-old.tar
sudo chmod 775 k8s-snap-image-new.tar
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: e2e-images
path: |
k8s-snap-image-old.tar
k8s-snap-image-new.tar
run-e2e-tests:
name: Run E2E Tests
runs-on: [self-hosted, linux, X64, jammy, xlarge]
needs: [build-provider-e2e-images]
strategy:
max-parallel: 1 # Only one at a time because of AWS resource limitations (like maximum number of elastic ip's)
matrix:
infra:
- "aws"
#- "docker"
ginkgo_focus:
#- "KCP remediation"
#- "MachineDeployment remediation"
- "Workload cluster creation"
#- "Workload cluster scaling"
#- "Workload cluster upgrade"
# TODO(ben): Remove once all tests are running stable.
fail-fast: false
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
# We run into rate limiting issues if we don't authenticate
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Check out repo
uses: actions/checkout@v4
- name: Setup tmate session
uses: canonical/action-tmate@main
with:
detached: true
- name: Install requirements
run: |
sudo apt update
sudo snap install go --classic --channel=1.22/stable
sudo snap install kubectl --classic --channel 1.31/stable
sudo apt install make
./hack/install-aws-nuke.sh
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: e2e-images
path: .
- name: Load provider image
run: sudo docker load -i provider-images.tar
- name: Load k8s-snap old image
if: matrix.infra == 'docker'
run: |
sudo docker load -i k8s-snap-image-old.tar
- name: Load k8s-snap new image
if: matrix.infra == 'docker' && matrix.ginkgo_focus == 'Workload cluster upgrade'
run: |
sudo docker load -i k8s-snap-image-new.tar
- name: Create docker network
run: |
sudo docker network create kind --driver=bridge -o com.docker.network.bridge.enable_ip_masquerade=true
- name: Increase inotify watches
run: |
# Prevents https://cluster-api.sigs.k8s.io/user/troubleshooting#cluster-api-with-docker----too-many-open-files
sudo sysctl fs.inotify.max_user_watches=1048576
sudo sysctl fs.inotify.max_user_instances=8192
- name: Install clusterawsadm
if: matrix.infra == 'aws'
run: |
curl -L https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.6.1/clusterawsadm-linux-amd64 -o clusterawsadm
chmod +x ./clusterawsadm
sudo mv ./clusterawsadm /usr/local/bin
clusterawsadm version
- name: Configure AWS Credentials
id: creds
if: matrix.infra == 'aws'
uses: aws-actions/configure-aws-credentials@v4
with:
audience: sts.amazonaws.com
aws-region: us-east-2
role-to-assume: arn:aws:iam::018302341396:role/GithubOIDC
role-duration-seconds: 3600
output-credentials: true
- name: Set AWS Credentials as Environment Variables
if: matrix.infra == 'aws'
run: |
AWS_ACCESS_KEY_ID=${{ steps.creds.outputs.aws-access-key-id }} >> "$GITHUB_ENV"
AWS_SECRET_ACCESS_KEY=${{ steps.creds.outputs.aws-secret-access-key }} >> "$GITHUB_ENV"
AWS_SESSION_TOKEN=${{ steps.creds.outputs.aws-session-token }} >> "$GITHUB_ENV"
export AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
export AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN
AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-as-profile --region us-east-2)
echo "AWS_B64ENCODED_CREDENTIALS=$AWS_B64ENCODED_CREDENTIALS" >> "$GITHUB_ENV"
echo "::add-mask::$AWS_B64ENCODED_CREDENTIALS"
- name: Run e2e tests
if: ${{!(matrix.infra == 'aws' && (matrix.ginkgo_focus == 'KCP remediation' || matrix.ginkgo_focus == 'MachineDeployment remediation'))}}
run: |
sudo -E E2E_INFRA=${{matrix.infra}} GINKGO_FOCUS="${{ matrix.ginkgo_focus }}" SKIP_RESOURCE_CLEANUP=true make test-e2e
- name: Cleanup AWS account
if: ${{ always() && matrix.infra == 'aws' }}
run: |
aws-nuke run --config ./hack/aws-nuke-config.yaml --force --force-sleep 3 --no-dry-run