Skip to content

Commit

Permalink
Test pull requests using github actions
Browse files Browse the repository at this point in the history
  • Loading branch information
aledbf committed Jul 3, 2020
1 parent 57d1eb6 commit b392fed
Show file tree
Hide file tree
Showing 18 changed files with 213 additions and 55 deletions.
117 changes: 117 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
name: CI

on:
pull_request:
branches:
- "*"
push:
branches:
- master

jobs:
build:
name: Build
runs-on: ubuntu-latest

steps:
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
id: go

- name: Checkout
uses: actions/checkout@v1

- name: Set up Docker Buildx
id: buildx
uses: crazy-max/ghaction-docker-buildx@v1
with:
buildx-version: latest
qemu-version: latest

- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}

- name: Prepare Host
run: |
sudo apt-get -qq update || true
sudo apt-get install -y pigz
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
- name: Build images
env:
TAG: 1.0.0-dev
ARCH: amd64
REGISTRY: ingress-controller
run: |
echo "building images..."
make clean-image build image
make -C test/e2e-image image
echo "creating images cache..."
docker save \
nginx-ingress-controller:e2e \
ingress-controller/nginx-ingress-controller:1.0.0-dev \
| pigz > docker.tar.gz
- name: cache
uses: actions/upload-artifact@v2
with:
name: docker.tar.gz
path: docker.tar.gz

kubernetes:
name: Kubernetes
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
k8s: [v1.14.10, v1.15.11, v1.16.9, v1.17.5, v1.18.4]

steps:
- name: Checkout
uses: actions/checkout@v1

- name: cache
uses: actions/download-artifact@v2
with:
name: docker.tar.gz

- name: Create Kubernetes ${{ matrix.k8s }} cluster
id: kind
uses: engineerd/[email protected]
with:
version: v0.8.1
config: test/e2e/kind.yaml
image: kindest/node:${{ matrix.k8s }}

# delete-artifact
- uses: geekyeggo/delete-artifact@v1
with:
name: docker.tar.gz
failOnError: false

- name: Prepare cluster for testing
id: local-path
run: |
kubectl version
echo
echo "installing helm 3..."
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
- name: Load images from cache
run: |
echo "loading docker images..."
pigz -dc docker.tar.gz | docker load
- name: Run e2e tests
env:
KIND_CLUSTER_NAME: kind
SKIP_CLUSTER_CREATION: true
SKIP_IMAGE_CREATION: true
run: |
kind get kubeconfig > $HOME/.kube/kind-config-kind
make kind-e2e-test
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ TAG ?= $(shell cat VERSION)
# Allow limiting the scope of the e2e tests. By default run everything
FOCUS ?= .*
# number of parallel test
E2E_NODES ?= 10
E2E_NODES ?= 8
# run e2e test suite with tests that check for memory leaks? (default is false)
E2E_CHECK_LEAKS ?=

Expand Down
1 change: 1 addition & 0 deletions test/e2e/annotations/affinitymode.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
assert.Nil(ginkgo.GinkgoT(), err)
framework.Sleep()

response = request.WithCookies(cookies).Expect()
newHostName := getHostnameFromResponseBody(response.Body().Raw())
assert.Equal(ginkgo.GinkgoT(), originalHostName, newHostName,
Expand Down
6 changes: 5 additions & 1 deletion test/e2e/annotations/auth.go
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,8 @@ var _ = framework.DescribeAnnotation("auth-*", func() {
return strings.Contains(server, "location /bar")
})
}

framework.Sleep()
})

ginkgo.It("should return status code 200 when signed in after auth backend is deleted ", func() {
Expand All @@ -482,6 +484,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() {

err := f.DeleteDeployment(framework.HTTPBinService)
assert.Nil(ginkgo.GinkgoT(), err)
framework.Sleep()

f.HTTPTestClient().
GET(fooPath).
Expand All @@ -501,6 +504,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() {

err := f.DeleteDeployment(framework.HTTPBinService)
assert.Nil(ginkgo.GinkgoT(), err)
framework.Sleep()

f.HTTPTestClient().
GET(fooPath).
Expand All @@ -516,7 +520,6 @@ var _ = framework.DescribeAnnotation("auth-*", func() {
WithBasicAuth("user", "password").
Expect().
Status(http.StatusInternalServerError)

})

ginkgo.It("should deny login for different servers", func() {
Expand All @@ -530,6 +533,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() {

err := f.DeleteDeployment(framework.HTTPBinService)
assert.Nil(ginkgo.GinkgoT(), err)
framework.Sleep()

ginkgo.By("receiving an internal server error without cache on thisHost location /bar")
f.HTTPTestClient().
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/annotations/influxdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ var _ = framework.DescribeAnnotation("influxdb-*", func() {
Expect().
Status(http.StatusOK)

time.Sleep(10 * time.Second)
framework.Sleep(10 * time.Second)

var measurements string
var err error
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/annotations/serversnippet.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ var _ = framework.DescribeAnnotation("server-snippet", func() {
f.NewEchoDeployment()
})

ginkgo.It(`add valid directives to server via server snippet"`, func() {
ginkgo.It(`add valid directives to server via server snippet`, func() {
host := "serversnippet.foo.com"
annotations := map[string]string{
"nginx.ingress.kubernetes.io/server-snippet": `
Expand Down
3 changes: 0 additions & 3 deletions test/e2e/defaultbackend/custom_default_backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,10 @@ var _ = framework.IngressNginxDescribe("[Default Backend] custom service", func(
args = append(args, fmt.Sprintf("--default-backend-service=%v/%v", f.Namespace, framework.EchoService))
deployment.Spec.Template.Spec.Containers[0].Args = args
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
framework.Sleep()
return err
})
assert.Nil(ginkgo.GinkgoT(), err, "updating deployment")

framework.Sleep()

f.WaitForNginxServer("_",
func(server string) bool {
return strings.Contains(server, `set $proxy_upstream_name "upstream-default-backend"`)
Expand Down
50 changes: 42 additions & 8 deletions test/e2e/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@ import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"os/exec"
"strings"
"time"

Expand Down Expand Up @@ -106,17 +108,16 @@ func (f *Framework) BeforeEach() {
err = f.newIngressController(f.Namespace, f.BaseName)
assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller")

err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
})
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready")
f.WaitForNginxListening(80)
}

// AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() {
defer func(kubeClient kubernetes.Interface, ns string) {
err := deleteKubeNamespace(kubeClient, ns)
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
go func() {
err := deleteKubeNamespace(kubeClient, ns)
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
}()
}(f.KubeClientSet, f.Namespace)

if !ginkgo.CurrentGinkgoTestDescription().Failed {
Expand Down Expand Up @@ -205,7 +206,7 @@ func (f *Framework) GetURL(scheme RequestScheme) string {
func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) {
err := wait.PollImmediate(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher))
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
Sleep()
Sleep(1 * time.Second)
}

// WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration
Expand Down Expand Up @@ -473,17 +474,45 @@ func (f *Framework) newTestClient(config *tls.Config) *httpexpect.Expect {
})
}

// WaitForNginxListening waits until NGINX starts accepting connections on a port
func (f *Framework) WaitForNginxListening(port int) {
err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
})
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready")

podIP := f.GetNginxIP()
err = wait.Poll(500*time.Millisecond, DefaultTimeout, func() (bool, error) {
conn, err := net.Dial("tcp", fmt.Sprintf("%v:%v", podIP, port))
if err != nil {
return false, nil
}

defer conn.Close()

return true, nil
})
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress controller pod listening on port 80")
}

// UpdateDeployment runs the given updateFunc on the deployment and waits for it to be updated
func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name string, replicas int, updateFunc func(d *appsv1.Deployment) error) error {
deployment, err := kubeClientSet.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return err
}

rolloutStatsCmd := fmt.Sprintf("%v --namespace %s rollout status deployment/%s -w --timeout 5m", KubectlPath, namespace, deployment.Name)

if updateFunc != nil {
if err := updateFunc(deployment); err != nil {
return err
}

err = exec.Command("bash", "-c", rolloutStatsCmd).Run()
if err != nil {
return err
}
}

if *deployment.Spec.Replicas != int32(replicas) {
Expand All @@ -492,6 +521,11 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
if err != nil {
return errors.Wrapf(err, "scaling the number of replicas to %v", replicas)
}

err = exec.Command("/bin/bash", "-c", rolloutStatsCmd).Run()
if err != nil {
return err
}
}

err = waitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
Expand Down Expand Up @@ -528,7 +562,7 @@ func UpdateIngress(kubeClientSet kubernetes.Interface, namespace string, name st
return err
}

Sleep()
Sleep(1 * time.Second)
return nil
}

Expand Down
7 changes: 4 additions & 3 deletions test/e2e/framework/k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func (f *Framework) UpdateIngress(ingress *networking.Ingress) *networking.Ingre
}

// updating an ingress requires a reload.
Sleep()
Sleep(1 * time.Second)

return ing
}
Expand Down Expand Up @@ -129,7 +129,7 @@ func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) *appsv1.Depl

// waitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
return wait.Poll(Poll, timeout, func() (bool, error) {
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
if err != nil {
return false, nil
Expand All @@ -152,7 +152,7 @@ func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration,

// waitForPodsDeleted waits for a given amount of time until a group of Pods are deleted in the given namespace.
func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duration, namespace string, opts metav1.ListOptions) error {
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
return wait.Poll(Poll, timeout, func() (bool, error) {
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
if err != nil {
return false, nil
Expand All @@ -161,6 +161,7 @@ func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duratio
if len(pl.Items) == 0 {
return true, nil
}

return false, nil
})
}
Expand Down
1 change: 0 additions & 1 deletion test/e2e/gracefulshutdown/shutdown.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
return err
})

assert.Nil(ginkgo.GinkgoT(), err)

annotations := map[string]string{
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/leaks/lua_ssl.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun
iterations := 10

ginkgo.By("Waiting a minute before starting the test")
time.Sleep(1 * time.Minute)
framework.Sleep(1 * time.Minute)

for iteration := 1; iteration <= iterations; iteration++ {
ginkgo.By(fmt.Sprintf("Running iteration %v", iteration))
Expand All @@ -64,7 +64,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun
p.Close()

ginkgo.By("waiting one minute before next iteration")
time.Sleep(1 * time.Minute)
framework.Sleep(1 * time.Minute)
}
})
})
Expand Down Expand Up @@ -116,7 +116,7 @@ func run(host string, f *framework.Framework) pool.WorkFunc {
ginkgo.By(fmt.Sprintf("\tcreating ingress for host %v", host))
privisionIngress(host, f)

time.Sleep(100 * time.Millisecond)
framework.Sleep(100 * time.Millisecond)

ginkgo.By(fmt.Sprintf("\tchecking ingress for host %v", host))
checkIngress(host, f)
Expand Down
Loading

0 comments on commit b392fed

Please sign in to comment.