Skip to content

OPSEXP-2411 Drop privileged for traefik containers (#1213) #1652

OPSEXP-2411 Drop privileged for traefik containers (#1213)

OPSEXP-2411 Drop privileged for traefik containers (#1213) #1652

---
name: Helm (Enterprise)
on:
pull_request:
branches:
- master
- release/**
- next/**
paths:
- helm/**
- test/postman/helm/**
- .github/workflows/helm*
- .github/actions/charts-as-json/**
- test/enterprise-integration-test-values.yaml
push:
branches:
- master
- release/**
concurrency:
group: helm-ent-${{ github.head_ref || github.ref_name }}
cancel-in-progress: true
jobs:
build_vars:
runs-on: ubuntu-latest
if: >-
github.event_name == 'push'
|| (
! github.event.repository.fork
&& github.actor != 'dependabot[bot]'
)
outputs:
ver_json: ${{ steps.app_versions.outputs.json }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: Get charts
id: getcharts
uses: ./.github/actions/charts-as-json
with:
charts-root: helm
- name: Select ACS enterprise versions excluding any deprecated versions
id: app_versions
env:
JSON: ${{ toJSON(fromJSON(steps.getcharts.outputs.all)) }}
JQ_FILTER: >-
[inputs | .charts[] | {name: .name, values: .values[]}
| del(. | select(.values=="community_values.yaml"))
| del(. | select(.values=="7.1.N_values.yaml"))
| del(. | select(.values=="7.2.N_values.yaml"))
| select(.name=="alfresco-content-services")]
run: |
echo "${JSON}" | jq -nc '${{ env.JQ_FILTER }}'
VERS=$(echo "${JSON}" | jq -nc '${{ env.JQ_FILTER }}')
echo "json=$VERS" >> $GITHUB_OUTPUT
helm_integration:
runs-on: alfrescoPub-ubuntu2204-16G-4CPU
timeout-minutes: 12
needs:
- build_vars
name: ${{ matrix.values }} on ${{ matrix.name }}
strategy:
fail-fast: false
matrix:
include: ${{ fromJSON(needs.build_vars.outputs.ver_json) }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: "3.14.3"
- name: Login to Docker Hub
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to Quay.io
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_PASSWORD }}
- name: Setup cluster
uses: Alfresco/alfresco-build-tools/.github/actions/[email protected]
with:
ingress-nginx-ref: controller-v1.8.2
metrics: "true"
- name: Set nginx ingress config
run: >-
kubectl -n ingress-nginx patch cm ingress-nginx-controller
-p '{"data": {"allow-snippet-annotations":"true"}}'
- name: Create registries auth secret
run: >-
kubectl create secret generic regcred
--from-file=.dockerconfigjson=$HOME/.docker/config.json
--type=kubernetes.io/dockerconfigjson
- name: Check if cgroup v2 workaround is needed
if: startsWith(matrix.values, '7.')
env:
VALUES_FILE: ${{ matrix.values }}
id: cgroupv2-workaround-extra-values
run: |
if [ ${VALUES_FILE:2:1} -le 2 ]; then
echo "Enabling cgroupv v2 workaround"
echo "helm_install_params=--values test/cgroup-v2-workaround-values.yaml" >> "$GITHUB_OUTPUT"
fi
- name: Check if we want additional helm customizations
id: configurable-extra-values
run: |
if [ "${{ matrix.values }}" = "values.yaml" -o "${{ matrix.values }}" = "pre-release_values.yaml" ]; then
echo "Enabling clustered tests via auto-scaling: 2 replicas max"
echo "Renditions tests will be skipped because particularly flaky on clustered runs"
echo "helm_install_params=--values test/autoscaling-hpa-test-values.yaml \
--set dtas.additionalArgs[0]='-k not test_renditions'" >> "$GITHUB_OUTPUT"
fi
- name: Add dependency chart repos
run: |
helm repo add self https://alfresco.github.io/alfresco-helm-charts/
helm repo add elastic https://helm.elastic.co/
- name: Helm install
run: >-
helm dep build ./helm/alfresco-content-services &&
helm install acs ./helm/alfresco-content-services
--set global.search.sharedSecret="$(openssl rand -hex 24)"
--set global.known_urls=http://localhost
--set global.alfrescoRegistryPullSecrets=regcred
--values helm/${{ matrix.name }}/${{ matrix.values }}
--values test/enterprise-integration-test-values.yaml
${{ steps.cgroupv2-workaround-extra-values.outputs.helm_install_params }}
${{ steps.configurable-extra-values.outputs.helm_install_params }}
- name: Watch Helm deployment
env:
VALUES_FILE: ${{ matrix.values }}
run: |
kubectl get pods --watch &
KWPID=$!
kubectl wait --timeout=7m --all=true --for=condition=Available deploy && kill $KWPID
if [ "${VALUES_FILE:0:2}" != "7." ] ; then echo -n "Waiting for ESC Reindexing job to complete... "
kubectl wait --timeout=5m --for=condition=complete job/acs-alfresco-search-enterprise-reindexing
echo "Completed."
fi
- name: Spit cluster status after install
if: always()
run: |
helm ls --all-namespaces --all
helm status acs --show-resources
kubectl describe pod
- name: Run Newman tests
uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3.0.0
id: newman
with:
timeout_minutes: 1
retry_wait_seconds: 20
max_attempts: 5
command: >-
docker run --network=host
-v $(pwd)/test/postman:/etc/postman
-t postman/newman run /etc/postman/helm/acs-test-helm-collection.json
--global-var protocol=http --global-var url=localhost
- name: Spit cluster status after newman tests
if: always() && steps.newman.outcome != 'skipped'
run: |
kubectl get all --all-namespaces
kubectl describe pod
kubectl events --for deployment/acs-alfresco-repository
- name: Run helm test
id: helm_test
run: helm test acs
- name: Spit cluster status after helm test
if: always() && steps.helm_test.outcome != 'skipped'
run: |
kubectl logs -l app.kubernetes.io/component=dtas --tail=-1
kubectl get all --all-namespaces
kubectl describe pod
kubectl events --for deployment/acs-alfresco-repository
- name: Check HPA behavior during tests
id: hpa_check
if: always() && (matrix.values == 'values.yaml' || matrix.values == 'pre-release_values.yaml') && steps.helm_test.outcome != 'skipped'
run: |
echo "Checking current repo hpa status"
kubectl get hpa acs-alfresco-repository
echo -n "Repository should have 2 replicas after Helm test... "
SETSIZE=$(kubectl get deployment/acs-alfresco-repository -o jsonpath='{@.spec.replicas}')
if [ "$SETSIZE" -ne 2 ]; then
echo "Got $SETSIZE instead"
exit 1
else
echo "Got $SETSIZE"
echo -n "Now waiting for scale down"
kubectl patch hpa acs-alfresco-repository \
-p '{"spec": {"behavior": {"scaleDown": {"stabilizationWindowSeconds": 20}}}}'
echo -n "(speeding scale down up)... "
kubectl wait --timeout=2m --for=jsonpath='spec.replicas'=1 deployment/acs-alfresco-repository
echo "Scale down completed"
fi
- name: Spit cluster status after HPA check
if: always() && steps.hpa_check.outcome != 'skipped'
run: |
kubectl get all --all-namespaces
kubectl describe pod
kubectl events --for deployment/acs-alfresco-repository