Test pull requests using github actions
This commit is contained in:
parent
57d1eb6aa3
commit
b392fed580
18 changed files with 214 additions and 56 deletions
117
.github/workflows/ci.yaml
vendored
Normal file
117
.github/workflows/ci.yaml
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- "*"
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Set up Go 1.14
|
||||||
|
uses: actions/setup-go@v1
|
||||||
|
with:
|
||||||
|
go-version: 1.14
|
||||||
|
id: go
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
id: buildx
|
||||||
|
uses: crazy-max/ghaction-docker-buildx@v1
|
||||||
|
with:
|
||||||
|
buildx-version: latest
|
||||||
|
qemu-version: latest
|
||||||
|
|
||||||
|
- name: Available platforms
|
||||||
|
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||||
|
|
||||||
|
- name: Prepare Host
|
||||||
|
run: |
|
||||||
|
sudo apt-get -qq update || true
|
||||||
|
sudo apt-get install -y pigz
|
||||||
|
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl
|
||||||
|
chmod +x ./kubectl
|
||||||
|
sudo mv ./kubectl /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
- name: Build images
|
||||||
|
env:
|
||||||
|
TAG: 1.0.0-dev
|
||||||
|
ARCH: amd64
|
||||||
|
REGISTRY: ingress-controller
|
||||||
|
run: |
|
||||||
|
echo "building images..."
|
||||||
|
make clean-image build image
|
||||||
|
make -C test/e2e-image image
|
||||||
|
|
||||||
|
echo "creating images cache..."
|
||||||
|
docker save \
|
||||||
|
nginx-ingress-controller:e2e \
|
||||||
|
ingress-controller/nginx-ingress-controller:1.0.0-dev \
|
||||||
|
| pigz > docker.tar.gz
|
||||||
|
|
||||||
|
- name: cache
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: docker.tar.gz
|
||||||
|
path: docker.tar.gz
|
||||||
|
|
||||||
|
kubernetes:
|
||||||
|
name: Kubernetes
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
k8s: [v1.14.10, v1.15.11, v1.16.9, v1.17.5, v1.18.4]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
|
- name: cache
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: docker.tar.gz
|
||||||
|
|
||||||
|
- name: Create Kubernetes ${{ matrix.k8s }} cluster
|
||||||
|
id: kind
|
||||||
|
uses: engineerd/setup-kind@v0.4.0
|
||||||
|
with:
|
||||||
|
version: v0.8.1
|
||||||
|
config: test/e2e/kind.yaml
|
||||||
|
image: kindest/node:${{ matrix.k8s }}
|
||||||
|
|
||||||
|
# delete-artifact
|
||||||
|
- uses: geekyeggo/delete-artifact@v1
|
||||||
|
with:
|
||||||
|
name: docker.tar.gz
|
||||||
|
failOnError: false
|
||||||
|
|
||||||
|
- name: Prepare cluster for testing
|
||||||
|
id: local-path
|
||||||
|
run: |
|
||||||
|
kubectl version
|
||||||
|
echo
|
||||||
|
echo "installing helm 3..."
|
||||||
|
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
|
||||||
|
|
||||||
|
- name: Load images from cache
|
||||||
|
run: |
|
||||||
|
echo "loading docker images..."
|
||||||
|
pigz -dc docker.tar.gz | docker load
|
||||||
|
|
||||||
|
- name: Run e2e tests
|
||||||
|
env:
|
||||||
|
KIND_CLUSTER_NAME: kind
|
||||||
|
SKIP_CLUSTER_CREATION: true
|
||||||
|
SKIP_IMAGE_CREATION: true
|
||||||
|
run: |
|
||||||
|
kind get kubeconfig > $HOME/.kube/kind-config-kind
|
||||||
|
make kind-e2e-test
|
2
Makefile
2
Makefile
|
@ -33,7 +33,7 @@ TAG ?= $(shell cat VERSION)
|
||||||
# Allow limiting the scope of the e2e tests. By default run everything
|
# Allow limiting the scope of the e2e tests. By default run everything
|
||||||
FOCUS ?= .*
|
FOCUS ?= .*
|
||||||
# number of parallel test
|
# number of parallel test
|
||||||
E2E_NODES ?= 10
|
E2E_NODES ?= 8
|
||||||
# run e2e test suite with tests that check for memory leaks? (default is false)
|
# run e2e test suite with tests that check for memory leaks? (default is false)
|
||||||
E2E_CHECK_LEAKS ?=
|
E2E_CHECK_LEAKS ?=
|
||||||
|
|
||||||
|
|
|
@ -104,6 +104,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
|
||||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
framework.Sleep()
|
framework.Sleep()
|
||||||
|
|
||||||
response = request.WithCookies(cookies).Expect()
|
response = request.WithCookies(cookies).Expect()
|
||||||
newHostName := getHostnameFromResponseBody(response.Body().Raw())
|
newHostName := getHostnameFromResponseBody(response.Body().Raw())
|
||||||
assert.Equal(ginkgo.GinkgoT(), originalHostName, newHostName,
|
assert.Equal(ginkgo.GinkgoT(), originalHostName, newHostName,
|
||||||
|
|
|
@ -470,6 +470,8 @@ var _ = framework.DescribeAnnotation("auth-*", func() {
|
||||||
return strings.Contains(server, "location /bar")
|
return strings.Contains(server, "location /bar")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
framework.Sleep()
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should return status code 200 when signed in after auth backend is deleted ", func() {
|
ginkgo.It("should return status code 200 when signed in after auth backend is deleted ", func() {
|
||||||
|
@ -482,6 +484,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() {
|
||||||
|
|
||||||
err := f.DeleteDeployment(framework.HTTPBinService)
|
err := f.DeleteDeployment(framework.HTTPBinService)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
framework.Sleep()
|
||||||
|
|
||||||
f.HTTPTestClient().
|
f.HTTPTestClient().
|
||||||
GET(fooPath).
|
GET(fooPath).
|
||||||
|
@ -501,6 +504,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() {
|
||||||
|
|
||||||
err := f.DeleteDeployment(framework.HTTPBinService)
|
err := f.DeleteDeployment(framework.HTTPBinService)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
framework.Sleep()
|
||||||
|
|
||||||
f.HTTPTestClient().
|
f.HTTPTestClient().
|
||||||
GET(fooPath).
|
GET(fooPath).
|
||||||
|
@ -516,7 +520,6 @@ var _ = framework.DescribeAnnotation("auth-*", func() {
|
||||||
WithBasicAuth("user", "password").
|
WithBasicAuth("user", "password").
|
||||||
Expect().
|
Expect().
|
||||||
Status(http.StatusInternalServerError)
|
Status(http.StatusInternalServerError)
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should deny login for different servers", func() {
|
ginkgo.It("should deny login for different servers", func() {
|
||||||
|
@ -530,6 +533,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() {
|
||||||
|
|
||||||
err := f.DeleteDeployment(framework.HTTPBinService)
|
err := f.DeleteDeployment(framework.HTTPBinService)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
framework.Sleep()
|
||||||
|
|
||||||
ginkgo.By("receiving an internal server error without cache on thisHost location /bar")
|
ginkgo.By("receiving an internal server error without cache on thisHost location /bar")
|
||||||
f.HTTPTestClient().
|
f.HTTPTestClient().
|
||||||
|
|
|
@ -72,7 +72,7 @@ var _ = framework.DescribeAnnotation("influxdb-*", func() {
|
||||||
Expect().
|
Expect().
|
||||||
Status(http.StatusOK)
|
Status(http.StatusOK)
|
||||||
|
|
||||||
time.Sleep(10 * time.Second)
|
framework.Sleep(10 * time.Second)
|
||||||
|
|
||||||
var measurements string
|
var measurements string
|
||||||
var err error
|
var err error
|
||||||
|
|
|
@ -31,7 +31,7 @@ var _ = framework.DescribeAnnotation("server-snippet", func() {
|
||||||
f.NewEchoDeployment()
|
f.NewEchoDeployment()
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It(`add valid directives to server via server snippet"`, func() {
|
ginkgo.It(`add valid directives to server via server snippet`, func() {
|
||||||
host := "serversnippet.foo.com"
|
host := "serversnippet.foo.com"
|
||||||
annotations := map[string]string{
|
annotations := map[string]string{
|
||||||
"nginx.ingress.kubernetes.io/server-snippet": `
|
"nginx.ingress.kubernetes.io/server-snippet": `
|
||||||
|
|
|
@ -42,13 +42,10 @@ var _ = framework.IngressNginxDescribe("[Default Backend] custom service", func(
|
||||||
args = append(args, fmt.Sprintf("--default-backend-service=%v/%v", f.Namespace, framework.EchoService))
|
args = append(args, fmt.Sprintf("--default-backend-service=%v/%v", f.Namespace, framework.EchoService))
|
||||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||||
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||||
framework.Sleep()
|
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "updating deployment")
|
assert.Nil(ginkgo.GinkgoT(), err, "updating deployment")
|
||||||
|
|
||||||
framework.Sleep()
|
|
||||||
|
|
||||||
f.WaitForNginxServer("_",
|
f.WaitForNginxServer("_",
|
||||||
func(server string) bool {
|
func(server string) bool {
|
||||||
return strings.Contains(server, `set $proxy_upstream_name "upstream-default-backend"`)
|
return strings.Contains(server, `set $proxy_upstream_name "upstream-default-backend"`)
|
||||||
|
|
|
@ -17,7 +17,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -106,17 +108,16 @@ func (f *Framework) BeforeEach() {
|
||||||
err = f.newIngressController(f.Namespace, f.BaseName)
|
err = f.newIngressController(f.Namespace, f.BaseName)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller")
|
assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller")
|
||||||
|
|
||||||
err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
f.WaitForNginxListening(80)
|
||||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
|
||||||
})
|
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AfterEach deletes the namespace, after reading its events.
|
// AfterEach deletes the namespace, after reading its events.
|
||||||
func (f *Framework) AfterEach() {
|
func (f *Framework) AfterEach() {
|
||||||
defer func(kubeClient kubernetes.Interface, ns string) {
|
defer func(kubeClient kubernetes.Interface, ns string) {
|
||||||
err := deleteKubeNamespace(kubeClient, ns)
|
go func() {
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
|
err := deleteKubeNamespace(kubeClient, ns)
|
||||||
|
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
|
||||||
|
}()
|
||||||
}(f.KubeClientSet, f.Namespace)
|
}(f.KubeClientSet, f.Namespace)
|
||||||
|
|
||||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
|
@ -205,7 +206,7 @@ func (f *Framework) GetURL(scheme RequestScheme) string {
|
||||||
func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) {
|
func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) {
|
||||||
err := wait.PollImmediate(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher))
|
err := wait.PollImmediate(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher))
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
|
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
|
||||||
Sleep()
|
Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration
|
// WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration
|
||||||
|
@ -473,6 +474,27 @@ func (f *Framework) newTestClient(config *tls.Config) *httpexpect.Expect {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitForNginxListening waits until NGINX starts accepting connections on a port
|
||||||
|
func (f *Framework) WaitForNginxListening(port int) {
|
||||||
|
err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||||
|
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||||
|
})
|
||||||
|
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready")
|
||||||
|
|
||||||
|
podIP := f.GetNginxIP()
|
||||||
|
err = wait.Poll(500*time.Millisecond, DefaultTimeout, func() (bool, error) {
|
||||||
|
conn, err := net.Dial("tcp", fmt.Sprintf("%v:%v", podIP, port))
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress controller pod listening on port 80")
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateDeployment runs the given updateFunc on the deployment and waits for it to be updated
|
// UpdateDeployment runs the given updateFunc on the deployment and waits for it to be updated
|
||||||
func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name string, replicas int, updateFunc func(d *appsv1.Deployment) error) error {
|
func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name string, replicas int, updateFunc func(d *appsv1.Deployment) error) error {
|
||||||
deployment, err := kubeClientSet.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
deployment, err := kubeClientSet.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
@ -480,10 +502,17 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rolloutStatsCmd := fmt.Sprintf("%v --namespace %s rollout status deployment/%s -w --timeout 5m", KubectlPath, namespace, deployment.Name)
|
||||||
|
|
||||||
if updateFunc != nil {
|
if updateFunc != nil {
|
||||||
if err := updateFunc(deployment); err != nil {
|
if err := updateFunc(deployment); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = exec.Command("bash", "-c", rolloutStatsCmd).Run()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if *deployment.Spec.Replicas != int32(replicas) {
|
if *deployment.Spec.Replicas != int32(replicas) {
|
||||||
|
@ -492,6 +521,11 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "scaling the number of replicas to %v", replicas)
|
return errors.Wrapf(err, "scaling the number of replicas to %v", replicas)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = exec.Command("/bin/bash", "-c", rolloutStatsCmd).Run()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
|
err = waitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
|
||||||
|
@ -528,7 +562,7 @@ func UpdateIngress(kubeClientSet kubernetes.Interface, namespace string, name st
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
Sleep()
|
Sleep(1 * time.Second)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ func (f *Framework) UpdateIngress(ingress *networking.Ingress) *networking.Ingre
|
||||||
}
|
}
|
||||||
|
|
||||||
// updating an ingress requires a reload.
|
// updating an ingress requires a reload.
|
||||||
Sleep()
|
Sleep(1 * time.Second)
|
||||||
|
|
||||||
return ing
|
return ing
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) *appsv1.Depl
|
||||||
|
|
||||||
// waitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
// waitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
||||||
func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
|
func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
|
||||||
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||||
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -152,7 +152,7 @@ func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration,
|
||||||
|
|
||||||
// waitForPodsDeleted waits for a given amount of time until a group of Pods are deleted in the given namespace.
|
// waitForPodsDeleted waits for a given amount of time until a group of Pods are deleted in the given namespace.
|
||||||
func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duration, namespace string, opts metav1.ListOptions) error {
|
func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duration, namespace string, opts metav1.ListOptions) error {
|
||||||
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||||
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -161,6 +161,7 @@ func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duratio
|
||||||
if len(pl.Items) == 0 {
|
if len(pl.Items) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,6 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
||||||
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
|
||||||
annotations := map[string]string{
|
annotations := map[string]string{
|
||||||
|
|
|
@ -44,7 +44,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun
|
||||||
iterations := 10
|
iterations := 10
|
||||||
|
|
||||||
ginkgo.By("Waiting a minute before starting the test")
|
ginkgo.By("Waiting a minute before starting the test")
|
||||||
time.Sleep(1 * time.Minute)
|
framework.Sleep(1 * time.Minute)
|
||||||
|
|
||||||
for iteration := 1; iteration <= iterations; iteration++ {
|
for iteration := 1; iteration <= iterations; iteration++ {
|
||||||
ginkgo.By(fmt.Sprintf("Running iteration %v", iteration))
|
ginkgo.By(fmt.Sprintf("Running iteration %v", iteration))
|
||||||
|
@ -64,7 +64,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun
|
||||||
p.Close()
|
p.Close()
|
||||||
|
|
||||||
ginkgo.By("waiting one minute before next iteration")
|
ginkgo.By("waiting one minute before next iteration")
|
||||||
time.Sleep(1 * time.Minute)
|
framework.Sleep(1 * time.Minute)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -116,7 +116,7 @@ func run(host string, f *framework.Framework) pool.WorkFunc {
|
||||||
ginkgo.By(fmt.Sprintf("\tcreating ingress for host %v", host))
|
ginkgo.By(fmt.Sprintf("\tcreating ingress for host %v", host))
|
||||||
privisionIngress(host, f)
|
privisionIngress(host, f)
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
framework.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("\tchecking ingress for host %v", host))
|
ginkgo.By(fmt.Sprintf("\tchecking ingress for host %v", host))
|
||||||
checkIngress(host, f)
|
checkIngress(host, f)
|
||||||
|
|
|
@ -99,7 +99,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
||||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, replicas, nil)
|
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, replicas, nil)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
|
||||||
time.Sleep(waitForLuaSync)
|
framework.Sleep(waitForLuaSync)
|
||||||
|
|
||||||
f.HTTPTestClient().
|
f.HTTPTestClient().
|
||||||
GET("/").
|
GET("/").
|
||||||
|
@ -117,7 +117,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
||||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, 0, nil)
|
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, 0, nil)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
|
||||||
time.Sleep(waitForLuaSync)
|
framework.Sleep(waitForLuaSync)
|
||||||
|
|
||||||
f.HTTPTestClient().
|
f.HTTPTestClient().
|
||||||
GET("/").
|
GET("/").
|
||||||
|
@ -142,7 +142,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
||||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
|
||||||
time.Sleep(waitForLuaSync)
|
framework.Sleep(waitForLuaSync)
|
||||||
|
|
||||||
resp = f.HTTPTestClient().
|
resp = f.HTTPTestClient().
|
||||||
GET("/").
|
GET("/").
|
||||||
|
@ -155,7 +155,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
||||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
|
||||||
time.Sleep(waitForLuaSync)
|
framework.Sleep(waitForLuaSync)
|
||||||
|
|
||||||
resp = f.HTTPTestClient().
|
resp = f.HTTPTestClient().
|
||||||
GET("/").
|
GET("/").
|
||||||
|
@ -210,8 +210,6 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
||||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil)
|
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
|
||||||
framework.Sleep()
|
|
||||||
|
|
||||||
output, err = f.ExecIngressPod(curlCmd)
|
output, err = f.ExecIngressPod(curlCmd)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
assert.Equal(ginkgo.GinkgoT(), output, `{"controllerPodsCount":3}`)
|
assert.Equal(ginkgo.GinkgoT(), output, `{"controllerPodsCount":3}`)
|
||||||
|
|
|
@ -37,15 +37,13 @@ cleanup() {
|
||||||
|
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ingress-nginx-dev}
|
||||||
|
|
||||||
if ! command -v kind --version &> /dev/null; then
|
if ! command -v kind --version &> /dev/null; then
|
||||||
echo "kind is not installed. Use the package manager or visit the official site https://kind.sigs.k8s.io/"
|
echo "kind is not installed. Use the package manager or visit the official site https://kind.sigs.k8s.io/"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! command -v ginkgo &> /dev/null; then
|
|
||||||
go get github.com/onsi/ginkgo/ginkgo
|
|
||||||
fi
|
|
||||||
|
|
||||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||||
|
|
||||||
# Use 1.0.0-dev to make sure we use the latest configuration in the helm template
|
# Use 1.0.0-dev to make sure we use the latest configuration in the helm template
|
||||||
|
@ -53,29 +51,35 @@ export TAG=1.0.0-dev
|
||||||
export ARCH=${ARCH:-amd64}
|
export ARCH=${ARCH:-amd64}
|
||||||
export REGISTRY=ingress-controller
|
export REGISTRY=ingress-controller
|
||||||
|
|
||||||
export K8S_VERSION=${K8S_VERSION:-v1.18.4@sha256:d8ff5fc405fc679dd3dd0cccc01543ba4942ed90823817d2e9e2c474a5343c4f}
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
|
||||||
export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ingress-nginx-dev}
|
export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/kind-config-$KIND_CLUSTER_NAME}"
|
||||||
|
|
||||||
echo "[dev-env] creating Kubernetes cluster with kind"
|
if [ "${SKIP_CLUSTER_CREATION:-false}" = "false" ]; then
|
||||||
|
echo "[dev-env] creating Kubernetes cluster with kind"
|
||||||
|
|
||||||
export KUBECONFIG="${HOME}/.kube/kind-config-${KIND_CLUSTER_NAME}"
|
export K8S_VERSION=${K8S_VERSION:-v1.18.4@sha256:d8ff5fc405fc679dd3dd0cccc01543ba4942ed90823817d2e9e2c474a5343c4f}
|
||||||
kind create cluster \
|
|
||||||
--verbosity=${KIND_LOG_LEVEL} \
|
|
||||||
--name ${KIND_CLUSTER_NAME} \
|
|
||||||
--config ${DIR}/kind.yaml \
|
|
||||||
--retain \
|
|
||||||
--image "kindest/node:${K8S_VERSION}"
|
|
||||||
|
|
||||||
echo "Kubernetes cluster:"
|
kind create cluster \
|
||||||
kubectl get nodes -o wide
|
--verbosity=${KIND_LOG_LEVEL} \
|
||||||
|
--name ${KIND_CLUSTER_NAME} \
|
||||||
|
--config ${DIR}/kind.yaml \
|
||||||
|
--retain \
|
||||||
|
--image "kindest/node:${K8S_VERSION}"
|
||||||
|
|
||||||
echo "[dev-env] building image"
|
echo "Kubernetes cluster:"
|
||||||
|
kubectl get nodes -o wide
|
||||||
|
fi
|
||||||
|
|
||||||
make -C ${DIR}/../../ clean-image build image
|
if [ "${SKIP_IMAGE_CREATION:-false}" = "false" ]; then
|
||||||
make -C ${DIR}/../e2e-image image
|
if ! command -v ginkgo &> /dev/null; then
|
||||||
|
go get github.com/onsi/ginkgo/ginkgo
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[dev-env] building image"
|
||||||
|
make -C ${DIR}/../../ clean-image build image
|
||||||
|
make -C ${DIR}/../e2e-image image
|
||||||
|
fi
|
||||||
|
|
||||||
#make -C ${DIR}/../../images/fastcgi-helloserver/ build image
|
#make -C ${DIR}/../../images/fastcgi-helloserver/ build image
|
||||||
#make -C ${DIR}/../../images/echo/ image
|
#make -C ${DIR}/../../images/echo/ image
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gavv/httpexpect/v2"
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
@ -172,7 +173,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() {
|
||||||
GET("/get").
|
GET("/get").
|
||||||
WithHeader("Host", host).
|
WithHeader("Host", host).
|
||||||
Expect().
|
Expect().
|
||||||
Status(http.StatusBadGateway)
|
StatusRange(httpexpect.Status5xx)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should return 200 for service type=ExternalName using a port name", func() {
|
ginkgo.It("should return 200 for service type=ExternalName using a port name", func() {
|
||||||
|
|
|
@ -178,6 +178,7 @@ var _ = framework.DescribeSetting("[Security] global-auth-url", func() {
|
||||||
|
|
||||||
err := f.DeleteDeployment(framework.HTTPBinService)
|
err := f.DeleteDeployment(framework.HTTPBinService)
|
||||||
assert.Nil(ginkgo.GinkgoT(), err)
|
assert.Nil(ginkgo.GinkgoT(), err)
|
||||||
|
framework.Sleep()
|
||||||
|
|
||||||
f.HTTPTestClient().
|
f.HTTPTestClient().
|
||||||
GET(barPath).
|
GET(barPath).
|
||||||
|
|
|
@ -40,7 +40,7 @@ const (
|
||||||
var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies", func() {
|
var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies", func() {
|
||||||
f := framework.NewDefaultFramework("pod-security-policies")
|
f := framework.NewDefaultFramework("pod-security-policies")
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.It("should be running with a Pod Security Policy", func() {
|
||||||
psp := createPodSecurityPolicy()
|
psp := createPodSecurityPolicy()
|
||||||
_, err := f.KubeClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{})
|
_, err := f.KubeClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{})
|
||||||
if !k8sErrors.IsAlreadyExists(err) {
|
if !k8sErrors.IsAlreadyExists(err) {
|
||||||
|
@ -73,10 +73,10 @@ var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies", func(
|
||||||
})
|
})
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating ingress controller deployment flags")
|
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating ingress controller deployment flags")
|
||||||
|
|
||||||
f.NewEchoDeployment()
|
f.WaitForNginxListening(80)
|
||||||
})
|
|
||||||
|
f.NewEchoDeployment()
|
||||||
|
|
||||||
ginkgo.It("should be running with a Pod Security Policy", func() {
|
|
||||||
f.WaitForNginxConfiguration(
|
f.WaitForNginxConfiguration(
|
||||||
func(cfg string) bool {
|
func(cfg string) bool {
|
||||||
return strings.Contains(cfg, "server_tokens on")
|
return strings.Contains(cfg, "server_tokens on")
|
||||||
|
@ -125,5 +125,4 @@ func createPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,6 +96,8 @@ var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies with vo
|
||||||
})
|
})
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress controller deployment")
|
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress controller deployment")
|
||||||
|
|
||||||
|
f.WaitForNginxListening(80)
|
||||||
|
|
||||||
f.NewEchoDeployment()
|
f.NewEchoDeployment()
|
||||||
|
|
||||||
f.WaitForNginxConfiguration(
|
f.WaitForNginxConfiguration(
|
||||||
|
|
|
@ -80,7 +80,7 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.Logf("waiting for leader election and initial status update")
|
framework.Logf("waiting for leader election and initial status update")
|
||||||
time.Sleep(30 * time.Second)
|
framework.Sleep(30 * time.Second)
|
||||||
|
|
||||||
err = cmd.Process.Kill()
|
err = cmd.Process.Kill()
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error terminating kubectl proxy")
|
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error terminating kubectl proxy")
|
||||||
|
@ -91,7 +91,7 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() {
|
||||||
ing.Status.LoadBalancer.Ingress = []apiv1.LoadBalancerIngress{}
|
ing.Status.LoadBalancer.Ingress = []apiv1.LoadBalancerIngress{}
|
||||||
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).UpdateStatus(context.TODO(), ing, metav1.UpdateOptions{})
|
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).UpdateStatus(context.TODO(), ing, metav1.UpdateOptions{})
|
||||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error cleaning Ingress status")
|
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error cleaning Ingress status")
|
||||||
time.Sleep(10 * time.Second)
|
framework.Sleep(10 * time.Second)
|
||||||
|
|
||||||
err = f.KubeClientSet.CoreV1().
|
err = f.KubeClientSet.CoreV1().
|
||||||
ConfigMaps(f.Namespace).
|
ConfigMaps(f.Namespace).
|
||||||
|
|
Loading…
Reference in a new issue