From 167296e670b9e6f8a2a6d38bd70ff85e11914d20 Mon Sep 17 00:00:00 2001 From: Alexandre Lamarre Date: Thu, 11 Apr 2024 02:04:11 -0400 Subject: [PATCH] add binary release to gha CI Signed-off-by: Alexandre Lamarre add BRO CI to gha CI Signed-off-by: Alexandre Lamarre rename K8S_VERSION_FROM_DRONE to K8S_VERSION_FROM_CI Signed-off-by: Alexandre Lamarre publish images CI Signed-off-by: Alexandre Lamarre ammend CI to use specific kubernetes versions Signed-off-by: Alexandre Lamarre update platform format Signed-off-by: Alexandre Lamarre remove integration tests from make CI Signed-off-by: Alexandre Lamarre [DROP ME] temporarily disable CI action Signed-off-by: Alexandre Lamarre add barebones e2e tests action, with k3d setup Signed-off-by: Alexandre Lamarre modify integration to run off of kube context only e2e test Signed-off-by: Alexandre Lamarre standardize versioning info linkflags Signed-off-by: Alexandre Lamarre temprorary changes to e2e gha Signed-off-by: Alexandre Lamarre use k3d inside dapper Signed-off-by: Alexandre Lamarre add kubectl to dapper Signed-off-by: Alexandre Lamarre fix import image in cluster Signed-off-by: Alexandre Lamarre temporarily disable e2e ci because its useless Signed-off-by: Alexandre Lamarre re-enable CI Signed-off-by: Alexandre Lamarre disable command tracing for setup cluster Signed-off-by: Alexandre Lamarre copy /dist/artifacts to /tmp/dist/artifacts inside dapper container Signed-off-by: Alexandre Lamarre change deploy script basedir in integration script Signed-off-by: Alexandre Lamarre comment out code that handles 'k3s kubectl' Signed-off-by: Alexandre Lamarre change integration path Signed-off-by: Alexandre Lamarre tests should be ../tests Signed-off-by: Alexandre Lamarre force specific kubectl context Signed-off-by: Alexandre Lamarre try and use correct kube context Signed-off-by: Alexandre Lamarre fix order Signed-off-by: Alexandre Lamarre maybe setting up buildx fixes the nodes not coming up Signed-off-by: Alexandre Lamarre --- .drone.yml | 8 +-- .github/workflows/ci.yaml | 32 +++++++++ .github/workflows/e2e-test.yaml | 52 +++++++++++++++ .github/workflows/e2e/scripts/install-k3d.sh | 19 ++++++ .github/workflows/publish.yaml | 40 +++++++++++ .github/workflows/release.yaml | 28 ++++++++ .goreleaser.yaml | 46 +++++++++++++ Dockerfile.dapper | 10 ++- main.go | 10 ++- scripts/build | 6 +- scripts/ci | 5 ++ scripts/cleanup-cluster.sh | 4 ++ scripts/deploy | 70 ++++++++++---------- scripts/integration | 70 +++++++++++--------- scripts/package | 9 ++- scripts/setup-cluster.sh | 45 +++++++++++++ scripts/version | 1 + 17 files changed, 373 insertions(+), 82 deletions(-) create mode 100644 .github/workflows/ci.yaml create mode 100644 .github/workflows/e2e-test.yaml create mode 100755 .github/workflows/e2e/scripts/install-k3d.sh create mode 100644 .github/workflows/publish.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 .goreleaser.yaml create mode 100755 scripts/cleanup-cluster.sh create mode 100755 scripts/setup-cluster.sh diff --git a/.drone.yml b/.drone.yml index 30e58b83..b6f0575d 100644 --- a/.drone.yml +++ b/.drone.yml @@ -10,7 +10,7 @@ steps: - name: build image: rancher/dapper:v0.6.0 commands: - - export K8S_VERSION_FROM_DRONE="v1.24" + - export K8S_VERSION_FROM_CI="v1.24" - dapper ci volumes: - name: docker @@ -21,7 +21,7 @@ steps: - name: build_no_psp image: rancher/dapper:v0.6.0 commands: - - export K8S_VERSION_FROM_DRONE="stable" + - export K8S_VERSION_FROM_CI="stable" - dapper ci volumes: - name: docker @@ -129,7 +129,7 @@ steps: - name: build image: rancher/dapper:v0.6.0 commands: - - export K8S_VERSION_FROM_DRONE="v1.24" + - export K8S_VERSION_FROM_CI="v1.24" - dapper ci volumes: - name: docker @@ -138,7 +138,7 @@ steps: - name: build_no_psp image: rancher/dapper:v0.6.0 commands: - - export K8S_VERSION_FROM_DRONE="stable" + - export K8S_VERSION_FROM_CI="stable" - dapper ci volumes: - name: docker diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..dc4d9637 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,32 @@ +name: Backup Restore CI + +on: + push: + paths-ignore: + - 'docs/**' + - '*.md' + - '.gitignore' + - 'CODEOWNERS' + - 'LICENSE' + +jobs: + build: + name : CI + runs-on : ubuntu-latest + strategy: + matrix: + K8S_VERSION_FROM_CI : ["v1.24","stable"] + include: + - platform: linux/amd64 + - platform: linux/arm64 + steps: + - name : Checkout repository + uses : actions/checkout@v4 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name : Setup environment variables + run : echo "K8S_VERSION_FROM_CI=${{ matrix.K8S_VERSION_FROM_CI }}" >> $GITHUB_ENV + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name : CI + run : make ci diff --git a/.github/workflows/e2e-test.yaml b/.github/workflows/e2e-test.yaml new file mode 100644 index 00000000..21c01e83 --- /dev/null +++ b/.github/workflows/e2e-test.yaml @@ -0,0 +1,52 @@ +# name: Backup Restore CI + +# on: +# push: +# paths-ignore: +# - 'docs/**' +# - '*.md' +# - '.gitignore' +# - 'CODEOWNERS' +# - 'LICENSE' + +# env : +# K3D_VERSION : v5.4.6 +# CLUSTER_NAME : backup-restore + +# jobs: +# e2e: +# name : e2e-test +# runs-on : ubuntu-latest +# strategy: +# matrix: +# K8S_VERSION_FROM_CI : ["v1.24","stable"] +# include: +# - platform: linux/amd64 +# - platform: linux/arm64 +# steps: +# - name : Checkout repository +# uses : actions/checkout@v4 +# - name: Set up Docker Buildx +# uses: docker/setup-buildx-action@v3 +# - name : Install K3D +# run : ./.github/workflows/e2e/scripts/install-k3d.sh +# - name : Install minio client +# run : | +# curl https://dl.min.io/client/mc/release/linux-amd64/mc > mc +# mv mc /usr/local/bin/ +# - name : Setup environment variables +# run : echo "K8S_VERSION_FROM_CI=${{ matrix.K8S_VERSION_FROM_CI }}" >> $GITHUB_ENV +# - name : CI +# run : make ci +# - name : Setup Cluster +# run : ./.github/workflows/e2e/scripts/setup-cluster.sh +# - name : Cluster info +# run: | +# kubectl cluster-info --context k3d-${{ env.CLUSTER_NAME }} +# kubectl config use-context k3d-${{ env.CLUSTER_NAME }} +# kubectl get nodes -o wide +# - name : Import BRO images +# run : k3d image import ${{ env.REPO }}/rancher/backup-restore-operator:${{ env.TAG}} -c ${{ env.CLUSTER_NAME }} +# - name : e2e test +# run : ./scripts/integration + diff --git a/.github/workflows/e2e/scripts/install-k3d.sh b/.github/workflows/e2e/scripts/install-k3d.sh new file mode 100755 index 00000000..fa33d6dd --- /dev/null +++ b/.github/workflows/e2e/scripts/install-k3d.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e +set -x + +K3D_URL=https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh +DEFAULT_K3D_VERSION=v5.4.6 + +install_k3d(){ + local k3dVersion=${K3D_VERSION:-${DEFAULT_K3D_VERSION}} + echo -e "Downloading k3d@${k3dVersion} see: ${K3D_URL}" + curl --silent --fail ${K3D_URL} | TAG=${k3dVersion} bash +} + + + +install_k3d + +k3d version \ No newline at end of file diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 00000000..64c2d71e --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,40 @@ +name : Publish Images + +on: + push: + tags: + - "*" + +env: + REGISTRY: docker.io + REPO : rancher + +jobs: + push: + name : Build and push BRO images + runs-on : ubuntu-latest + steps: + - name : Checkout repository + uses: actions/checkout@v4 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name : CI + run : make ci + # setup tag name + - if: ${{ startsWith(github.ref, 'refs/tags/') }} + run: | + echo TAG_NAME=$(echo $GITHUB_REF | sed -e "s|refs/tags/||") >> $GITHUB_ENV + - name: Build and push BRO image + uses: docker/build-push-action@v5 + with: + context: . + file: ./package/Dockerfile + push: true + tags: ${{ env.REGISTRY }}/${{ env.REPO }}/backup-restore-operator:${{ env.TAG_NAME }} + platforms: linux/amd64,linux/arm64 \ No newline at end of file diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..7501d625 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,28 @@ +name: goreleaser + +on: + push: + tags: + - '*' + +permissions: + contents: write + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - run: git fetch --force --tags + - uses: actions/setup-go@v5 + with: + go-version: 1.22 + - uses: goreleaser/goreleaser-action@v5 + with: + distribution: goreleaser + version: latest + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 00000000..69a27ff0 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,46 @@ +# This is an example .goreleaser.yml file with some sensible defaults. +# Make sure to check the documentation at https://goreleaser.com +before: + hooks: + # You may remove this if you don't use go modules. + - go mod tidy +builds: + - id: backup-restore-operator + main: ./main.go + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + binary: backup-restore-operator + ldflags: + - -extldflags + - -static + - -s + flags: + - -trimpath + env: + - CGO_ENABLED=0 +# same archives as opentelemetry-collector releases +archives: + - id: backup-restore-operator + builds: + - pushprox-client + name_template: '{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}' +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + +# The lines beneath this are called `modelines`. See `:help modeline` +# Feel free to remove those if you don't want/use them. +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json +# vim: set ts=2 sw=2 tw=0 fo=cnqoj \ No newline at end of file diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 8e950c0d..aeda2e0a 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -21,11 +21,11 @@ ENV K3S_BINARY_amd64=k3s \ K3S_BINARY_arm64=k3s-arm64 \ K3S_BINARY=K3S_BINARY_${ARCH} -ARG K8S_VERSION_FROM_DRONE -ENV K8S_VERSION $K8S_VERSION_FROM_DRONE +ARG K8S_VERSION_FROM_CI +ENV K8S_VERSION $K8S_VERSION_FROM_CI RUN if [ -z "${K8S_VERSION}" ]; then export K8S_VERSION="v1.24" && echo $(date +%s%N); fi -# ENV K8S_VERSION="${K8S_VERSION_FROM_DRONE:-'v1.24'}" +# ENV K8S_VERSION="${K8S_VERSION_FROM_CI:-'v1.24'}" RUN echo "${K8S_VERSION}" @@ -36,6 +36,10 @@ RUN if [ "${ARCH}" != "s390x" ]; then \ curl -sL https://dl.min.io/client/mc/release/linux-${ARCH}/mc > /usr/local/bin/mc && \ chmod +x /usr/local/bin/mc; \ fi +RUN curl --fail https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=v5.4.6 K3D_INSTALL_DIR="/usr/local/bin" bash +RUN curl -LO --fail "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl" && \ + chmod +x kubectl && \ + mv kubectl /usr/local/bin/kubectl ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS CROSS_ARCH USE_DOCKER_BUILDX ENV DAPPER_SOURCE /go/src/github.com/rancher/backup-restore-operator/ diff --git a/main.go b/main.go index a96795e3..256e07d1 100644 --- a/main.go +++ b/main.go @@ -34,8 +34,12 @@ const ( ) var ( - Version = "v0.0.0-dev" - GitCommit = "HEAD" + version = "v0.0.0-dev" + commit = "HEAD" + date = "1970-01-01T00:00:00Z" +) + +var ( LocalBackupStorageLocation = "/var/lib/backups" // local within the pod, this is the mountPath for PVC KubeConfig string OperatorPVEnabled string @@ -81,7 +85,7 @@ func main() { logrus.Tracef("Loglevel set to [%v]", logrus.TraceLevel) } - logrus.Infof("Starting backup-restore controller version %s (%s)", Version, GitCommit) + logrus.Infof("Starting backup-restore controller version %s (%s), built at : %s", version, commit, date) ctx := signals.SetupSignalContext() restKubeConfig, err := kubeconfig.GetNonInteractiveClientConfig(KubeConfig).ClientConfig() if err != nil { diff --git a/scripts/build b/scripts/build index c78fbd16..cb34378e 100755 --- a/scripts/build +++ b/scripts/build @@ -9,8 +9,10 @@ mkdir -p bin if [ "$(uname)" = "Linux" ]; then OTHER_LINKFLAGS="-extldflags -static -s" fi -LINKFLAGS="-X main.Version=$VERSION" -LINKFLAGS="-X main.GitCommit=$COMMIT $LINKFLAGS" + +LINKFLAGS="-X main.version=$VERSION" +LINKFLAGS="-X main.commit=$COMMIT $LINKFLAGS" +LINKFLAGS="-X main.date=$DATE $LINKFLAGS" ARCHES=( "$ARCH" ) diff --git a/scripts/ci b/scripts/ci index d3828726..b25b6da8 100755 --- a/scripts/ci +++ b/scripts/ci @@ -3,6 +3,8 @@ set -e cd $(dirname $0) +export CLUSTER_NAME="backup-restore-operator" + ./build ./test ./validate @@ -10,4 +12,7 @@ cd $(dirname $0) ./package ./chart/test ./hull +# integration tests +./cleanup-cluster.sh +./setup-cluster.sh ./integration diff --git a/scripts/cleanup-cluster.sh b/scripts/cleanup-cluster.sh new file mode 100755 index 00000000..1362fbf8 --- /dev/null +++ b/scripts/cleanup-cluster.sh @@ -0,0 +1,4 @@ +set -e +set -x + +k3d cluster delete ${CLUSTER_NAME} || true \ No newline at end of file diff --git a/scripts/deploy b/scripts/deploy index b04912eb..73d4354b 100755 --- a/scripts/deploy +++ b/scripts/deploy @@ -41,11 +41,11 @@ EOF ) KUBECTL_CMD="kubectl" - if command -v k3s &> /dev/null ; then - KUBECTL_CMD="k3s kubectl" - else - check_kubeconfig - fi + # if command -v k3s &> /dev/null ; then + # KUBECTL_CMD="k3s kubectl" + # else + check_kubeconfig + # fi helm repo add minio https://charts.min.io/ helm repo update @@ -66,11 +66,11 @@ EOF list_minio_files() { KUBECTL_CMD="kubectl" - if command -v k3s &> /dev/null ; then - KUBECTL_CMD="k3s kubectl" - else - check_kubeconfig - fi + # if command -v k3s &> /dev/null ; then + # KUBECTL_CMD="k3s kubectl" + # else + check_kubeconfig + # fi local POD_NAME POD_NAME=$("${KUBECTL_CMD}" get pods --namespace minio -l "release=minio" -o jsonpath="{.items[0].metadata.name}") @@ -88,11 +88,11 @@ list_minio_files() { retrieve_minio_files() { KUBECTL_CMD="kubectl" - if command -v k3s &> /dev/null ; then - KUBECTL_CMD="k3s kubectl" - else - check_kubeconfig - fi + # if command -v k3s &> /dev/null ; then + # KUBECTL_CMD="k3s kubectl" + # else + check_kubeconfig + # fi local POD_NAME POD_NAME=$("${KUBECTL_CMD}" get pods --namespace minio -l "release=minio" -o jsonpath="{.items[0].metadata.name}") @@ -115,11 +115,11 @@ retrieve_minio_files() { copy_minio_files() { KUBECTL_CMD="kubectl" - if command -v k3s &> /dev/null ; then - KUBECTL_CMD="k3s kubectl" - else - check_kubeconfig - fi + # if command -v k3s &> /dev/null ; then + # KUBECTL_CMD="k3s kubectl" + # else + check_kubeconfig + # fi local POD_NAME POD_NAME=$("${KUBECTL_CMD}" get pods --namespace minio -l "release=minio" -o jsonpath="{.items[0].metadata.name}") @@ -142,11 +142,11 @@ copy_minio_files() { reset_minio_bucket() { KUBECTL_CMD="kubectl" - if command -v k3s &> /dev/null ; then - KUBECTL_CMD="k3s kubectl" - else - check_kubeconfig - fi + # if command -v k3s &> /dev/null ; then + # KUBECTL_CMD="k3s kubectl" + # else + check_kubeconfig + # fi local POD_NAME POD_NAME=$("${KUBECTL_CMD}" get pods --namespace minio -l "release=minio" -o jsonpath="{.items[0].metadata.name}") @@ -190,11 +190,11 @@ deploy_backup_restore() { create_backup() { KUBECTL_CMD="kubectl" - if command -v k3s &> /dev/null ; then - KUBECTL_CMD="k3s kubectl" - else - check_kubeconfig - fi + # if command -v k3s &> /dev/null ; then + # KUBECTL_CMD="k3s kubectl" + # else + check_kubeconfig + # fi if [[ "$1" = "insecure" ]]; then ${KUBECTL_CMD} create -f - < /dev/null ; then - KUBECTL_CMD="k3s kubectl" - else - check_kubeconfig - fi + # if command -v k3s &> /dev/null ; then + # KUBECTL_CMD="k3s kubectl" + # else + check_kubeconfig + # fi if [[ "$insecure" = "true" ]]; then ${KUBECTL_CMD} create -f - < /tmp/k3s.log 2>&1 & -k3s_pid=$! +# cd $(dirname $0)/.. -export KUBECONFIG=/etc/rancher/k3s/k3s.yaml +# k3s server --disable servicelb --disable traefik --disable local-storage --disable metrics-server > /tmp/k3s.log 2>&1 & +# k3s_pid=$! -echo_with_time 'Waiting for node to be ready ...' -time timeout 300 bash -c 'while ! (k3s kubectl wait --for condition=ready node/$(hostname) 2>/dev/null); do sleep 5; done' -time timeout 300 bash -c 'while ! (k3s kubectl --namespace kube-system rollout status --timeout 10s deploy/coredns 2>/dev/null); do sleep 5; done' +# export KUBECONFIG=/etc/rancher/k3s/k3s.yaml -echo_with_time "using kubectl and kubernetes versions:" -k3s kubectl version +# echo_with_time 'Waiting for node to be ready ...' +# time timeout 300 bash -c 'while ! (k3s kubectl wait --for condition=ready node/$(hostname) 2>/dev/null); do sleep 5; done' +# time timeout 300 bash -c 'while ! (k3s kubectl --namespace kube-system rollout status --timeout 10s deploy/coredns 2>/dev/null); do sleep 5; done' -k3s kubectl get nodes -o wide --show-labels +# echo_with_time "using kubectl and kubernetes versions:" +# k3s kubectl version -docker image save rancher/backup-restore-operator:$TAG -o /tmp/bro.img +# k3s kubectl get nodes -o wide --show-labels -k3s ctr images import /tmp/bro.img +# docker image save rancher/backup-restore-operator:$TAG -o /tmp/bro.img -ls -la ./dist/artifacts +# k3s ctr images import /tmp/bro.img + +## == End of cluster specific setup code == + +ls -la /tmp/dist/artifacts # In case short commit only conists of numbers, it is regarded valid by Helm when packaging # Or if a tag is set (if its a (pre) release) @@ -38,16 +42,16 @@ else HELM_CHART_VERSION=$HELM_VERSION_DEV fi -helm install rancher-backup-crd ./dist/artifacts/rancher-backup-crd-$HELM_CHART_VERSION.tgz -n cattle-resources-system --create-namespace --wait -helm install rancher-backup ./dist/artifacts/rancher-backup-$HELM_CHART_VERSION.tgz -n cattle-resources-system --set image.tag=$TAG --set imagePullPolicy=IfNotPresent +helm install rancher-backup-crd /tmp/dist/artifacts/rancher-backup-crd-$HELM_CHART_VERSION.tgz -n cattle-resources-system --create-namespace --wait +helm install rancher-backup /tmp/dist/artifacts/rancher-backup-$HELM_CHART_VERSION.tgz -n cattle-resources-system --set image.tag=$TAG --set imagePullPolicy=IfNotPresent -time timeout 300 bash -c 'while ! (k3s kubectl --namespace cattle-resources-system rollout status --timeout 10s deploy/rancher-backup 2>/dev/null); do sleep 5; done' +time timeout 300 bash -c 'while ! (kubectl --namespace cattle-resources-system rollout status --timeout 10s deploy/rancher-backup 2>/dev/null); do sleep 5; done' -k3s kubectl get pods -n cattle-resources-system +kubectl get pods -n cattle-resources-system -time timeout 300 bash -c 'while ! (k3s kubectl --namespace cattle-resources-system rollout status --timeout 10s deploy/rancher-backup 2>/dev/null); do sleep 5; done' +time timeout 300 bash -c 'while ! (kubectl --namespace cattle-resources-system rollout status --timeout 10s deploy/rancher-backup 2>/dev/null); do sleep 5; done' -k3s kubectl get pods -n cattle-resources-system +kubectl get pods -n cattle-resources-system # Minio not available for s390x, only test on amd64 and arm64 if [ "$ARCH" = "s390x" ]; then @@ -56,10 +60,10 @@ if [ "$ARCH" = "s390x" ]; then fi #Deploy Minio -./scripts/deploy minio +./deploy minio -export POD_NAME=$(k3s kubectl get pods --namespace minio -l "release=minio" -o jsonpath="{.items[0].metadata.name}") -k3s kubectl port-forward $POD_NAME 9000 --namespace minio & +export POD_NAME=$(kubectl get pods --namespace minio -l "release=minio" -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward $POD_NAME 9000 --namespace minio & sleep 10 mkdir -p $HOME/.mc/certs/CAs @@ -89,15 +93,15 @@ for BACKUP in rancherbackups-insecure rancherbackups; do if [[ $BACKUP = "rancherbackups-insecure" ]]; then # Backup without CA / insecure TLS configured - ./scripts/deploy create-backup-insecure + ./deploy create-backup-insecure BACKUPRS_NAME="s3-recurring-backup-insecure" else - ./scripts/deploy create-backup + ./deploy create-backup BACKUPRS_NAME="s3-recurring-backup" fi - time timeout 60 bash -c 'while ! (k3s kubectl wait --for condition=ready backup.resources.cattle.io/'"${BACKUPRS_NAME}"' 2>/dev/null); do k3s kubectl get backup.resources.cattle.io -A; k3s kubectl -n cattle-resources-system logs -l app.kubernetes.io/name=rancher-backup --tail=-1; sleep 2; done' + time timeout 60 bash -c 'while ! (kubectl wait --for condition=ready backup.resources.cattle.io/'"${BACKUPRS_NAME}"' 2>/dev/null); do kubectl get backup.resources.cattle.io -A; kubectl -n cattle-resources-system logs -l app.kubernetes.io/name=rancher-backup --tail=-1; sleep 2; done' mc ls --quiet --no-color "miniolocal/${BACKUP}" FIRSTBACKUP=$(mc ls --quiet --no-color miniolocal/${BACKUP} | awk '{ print $NF }') @@ -119,20 +123,20 @@ for BACKUP in rancherbackups-insecure rancherbackups; do fi done # Disable the recurring back-ups by deleting the backup CRD - k3s kubectl delete "backup.resources.cattle.io/${BACKUPRS_NAME}" + kubectl delete "backup.resources.cattle.io/${BACKUPRS_NAME}" done # Restore resource with spec.preserveUnknownFields # https://github.com/rancher/backup-restore-operator/issues/186 -cd tests/files/preserve-unknown-fields +cd ../tests/files/preserve-unknown-fields tar cvzf /tmp/preserve-unknown-fields.tar.gz -- * cd - mc cp --quiet --no-color /tmp/preserve-unknown-fields.tar.gz miniolocal/rancherbackups mc ls --quiet --no-color miniolocal/rancherbackups -k3s kubectl create -f - </dev/null); do k3s kubectl get restore.resources.cattle.io -A; k3s kubectl -n cattle-resources-system logs -l app.kubernetes.io/name=rancher-backup --tail=15; sleep 5; done' +time timeout 60 bash -c 'while ! (kubectl wait --for condition=ready restore.resources.cattle.io/restore-preserve-unknown-fields 2>/dev/null); do kubectl get restore.resources.cattle.io -A; kubectl -n cattle-resources-system logs -l app.kubernetes.io/name=rancher-backup --tail=15; sleep 5; done' # Restore resource with metadata.deletionGracePeriodSeconds # https://github.com/rancher/backup-restore-operator/issues/188 -cd tests/files/deletion-grace-period-seconds +cd ../tests/files/deletion-grace-period-seconds tar cvzf /tmp/deletion-grace-period-seconds.tar.gz -- * cd - mc cp --quiet --no-color /tmp/deletion-grace-period-seconds.tar.gz miniolocal/rancherbackups @@ -165,7 +169,7 @@ mc ls --quiet --no-color miniolocal/rancherbackups for i in $(seq 1 2); do echo "Running restore #${i} with resource having metadata.deletionGracePeriodSeconds" - k3s kubectl create -f - </dev/null); do k3s kubectl get restore.resources.cattle.io -A; k3s kubectl -n cattle-resources-system logs -l app.kubernetes.io/name=rancher-backup --tail=15; sleep 5; done' - k3s kubectl delete restore.resources.cattle.io/restore-deletion-grace-period-seconds + time timeout 60 bash -c 'while ! (kubectl wait --for condition=ready restore.resources.cattle.io/restore-deletion-grace-period-seconds 2>/dev/null); do kubectl get restore.resources.cattle.io -A; kubectl -n cattle-resources-system logs -l app.kubernetes.io/name=rancher-backup --tail=15; sleep 5; done' + kubectl delete restore.resources.cattle.io/restore-deletion-grace-period-seconds done diff --git a/scripts/package b/scripts/package index e0a4b80f..cee50339 100755 --- a/scripts/package +++ b/scripts/package @@ -19,10 +19,15 @@ if [ -e ${DOCKERFILE}.${ARCH} ]; then fi if [[ ${USE_DOCKER_BUILDX} -eq 1 ]]; then - docker buildx build --build-arg K8S_VERSION_FROM_DRONE="${K8S_VERSION}" --platform linux/amd64 -f ${DOCKERFILE} . -t ${IMAGE} + docker buildx build --build-arg K8S_VERSION_FROM_CI="${K8S_VERSION}" --platform linux/amd64 -f ${DOCKERFILE} . -t ${IMAGE} else - docker build --build-arg K8S_VERSION_FROM_DRONE="${K8S_VERSION}" -f ${DOCKERFILE} -t ${IMAGE} . + docker build --build-arg K8S_VERSION_FROM_CI="${K8S_VERSION}" -f ${DOCKERFILE} -t ${IMAGE} . fi echo Built ${IMAGE} ./scripts/package-helm + +echo "Copying built artifacts to dapper artifacts" + +mkdir -p /tmp/dist/artifacts +cp -rT ./dist/artifacts/ /tmp/dist/artifacts \ No newline at end of file diff --git a/scripts/setup-cluster.sh b/scripts/setup-cluster.sh new file mode 100755 index 00000000..e0c08472 --- /dev/null +++ b/scripts/setup-cluster.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -e + +source $(dirname $0)/version + +# waits until all nodes are ready +wait_for_nodes(){ + echo "wait until all agents are ready" + while : + do + readyNodes=1 + statusList=$(kubectl get nodes --no-headers | awk '{ print $2}') + # shellcheck disable=SC2162 + while read status + do + if [ "$status" == "NotReady" ] || [ "$status" == "" ] + then + readyNodes=0 + break + fi + done <<< "$(echo -e "$statusList")" + # all nodes are ready; exit + if [[ $readyNodes == 1 ]] + then + break + fi + sleep 1 + done +} + +k3d cluster create ${CLUSTER_NAME} + + +wait_for_nodes + +echo "${CLUSTER_NAME} ready" + +kubectl cluster-info --context k3d-${CLUSTER_NAME} +kubectl config use-context k3d-${CLUSTER_NAME} +kubectl get nodes -o wide + +IMAGE=${REPO}/backup-restore-operator:${TAG} + +k3d image import ${IMAGE} -c ${CLUSTER_NAME} diff --git a/scripts/version b/scripts/version index c43d1cc8..b9ccb28d 100755 --- a/scripts/version +++ b/scripts/version @@ -7,6 +7,7 @@ fi COMMIT=$(git rev-parse --short HEAD) GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)} +DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then VERSION=$GIT_TAG