diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 000000000..c0932f2f3 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,117 @@ +name: CI + +on: + pull_request: + branches: + - "*" + push: + branches: + - master + +jobs: + build: + name: Build + runs-on: ubuntu-latest + + steps: + - name: Set up Go 1.14 + uses: actions/setup-go@v1 + with: + go-version: 1.14 + id: go + + - name: Checkout + uses: actions/checkout@v1 + + - name: Set up Docker Buildx + id: buildx + uses: crazy-max/ghaction-docker-buildx@v1 + with: + buildx-version: latest + qemu-version: latest + + - name: Available platforms + run: echo ${{ steps.buildx.outputs.platforms }} + + - name: Prepare Host + run: | + sudo apt-get -qq update || true + sudo apt-get install -y pigz + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl + chmod +x ./kubectl + sudo mv ./kubectl /usr/local/bin/kubectl + + - name: Build images + env: + TAG: 1.0.0-dev + ARCH: amd64 + REGISTRY: ingress-controller + run: | + echo "building images..." + make clean-image build image + make -C test/e2e-image image + + echo "creating images cache..." + docker save \ + nginx-ingress-controller:e2e \ + ingress-controller/nginx-ingress-controller:1.0.0-dev \ + | pigz > docker.tar.gz + + - name: cache + uses: actions/upload-artifact@v2 + with: + name: docker.tar.gz + path: docker.tar.gz + + kubernetes: + name: Kubernetes + runs-on: ubuntu-latest + needs: build + strategy: + matrix: + k8s: [v1.14.10, v1.15.11, v1.16.9, v1.17.5, v1.18.4] + + steps: + - name: Checkout + uses: actions/checkout@v1 + + - name: cache + uses: actions/download-artifact@v2 + with: + name: docker.tar.gz + + - name: Create Kubernetes ${{ matrix.k8s }} cluster + id: kind + uses: engineerd/setup-kind@v0.4.0 + with: + version: v0.8.1 + config: test/e2e/kind.yaml + image: kindest/node:${{ matrix.k8s }} + + # delete-artifact + - uses: geekyeggo/delete-artifact@v1 + with: + name: docker.tar.gz + failOnError: false + + - name: Prepare cluster for testing + id: local-path + run: | + kubectl version + echo + echo "installing helm 3..." + curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + + - name: Load images from cache + run: | + echo "loading docker images..." + pigz -dc docker.tar.gz | docker load + + - name: Run e2e tests + env: + KIND_CLUSTER_NAME: kind + SKIP_CLUSTER_CREATION: true + SKIP_IMAGE_CREATION: true + run: | + kind get kubeconfig > $HOME/.kube/kind-config-kind + make kind-e2e-test diff --git a/Makefile b/Makefile index aa3e80772..580a7b6dc 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ TAG ?= $(shell cat VERSION) # Allow limiting the scope of the e2e tests. By default run everything FOCUS ?= .* # number of parallel test -E2E_NODES ?= 10 +E2E_NODES ?= 8 # run e2e test suite with tests that check for memory leaks? (default is false) E2E_CHECK_LEAKS ?= diff --git a/test/e2e/annotations/affinitymode.go b/test/e2e/annotations/affinitymode.go index 362c68ae7..7c5ccb49c 100644 --- a/test/e2e/annotations/affinitymode.go +++ b/test/e2e/annotations/affinitymode.go @@ -104,6 +104,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() { err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil) assert.Nil(ginkgo.GinkgoT(), err) framework.Sleep() + response = request.WithCookies(cookies).Expect() newHostName := getHostnameFromResponseBody(response.Body().Raw()) assert.Equal(ginkgo.GinkgoT(), originalHostName, newHostName, diff --git a/test/e2e/annotations/auth.go b/test/e2e/annotations/auth.go index 11e4807b9..2d2c2ea0f 100644 --- a/test/e2e/annotations/auth.go +++ b/test/e2e/annotations/auth.go @@ -470,6 +470,8 @@ var _ = framework.DescribeAnnotation("auth-*", func() { return strings.Contains(server, "location /bar") }) } + + framework.Sleep() }) ginkgo.It("should return status code 200 when signed in after auth backend is deleted ", func() { @@ -482,6 +484,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() { err := f.DeleteDeployment(framework.HTTPBinService) assert.Nil(ginkgo.GinkgoT(), err) + framework.Sleep() f.HTTPTestClient(). GET(fooPath). @@ -501,6 +504,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() { err := f.DeleteDeployment(framework.HTTPBinService) assert.Nil(ginkgo.GinkgoT(), err) + framework.Sleep() f.HTTPTestClient(). GET(fooPath). @@ -516,7 +520,6 @@ var _ = framework.DescribeAnnotation("auth-*", func() { WithBasicAuth("user", "password"). Expect(). Status(http.StatusInternalServerError) - }) ginkgo.It("should deny login for different servers", func() { @@ -530,6 +533,7 @@ var _ = framework.DescribeAnnotation("auth-*", func() { err := f.DeleteDeployment(framework.HTTPBinService) assert.Nil(ginkgo.GinkgoT(), err) + framework.Sleep() ginkgo.By("receiving an internal server error without cache on thisHost location /bar") f.HTTPTestClient(). diff --git a/test/e2e/annotations/influxdb.go b/test/e2e/annotations/influxdb.go index 769e49bb0..fd8504fd2 100644 --- a/test/e2e/annotations/influxdb.go +++ b/test/e2e/annotations/influxdb.go @@ -72,7 +72,7 @@ var _ = framework.DescribeAnnotation("influxdb-*", func() { Expect(). Status(http.StatusOK) - time.Sleep(10 * time.Second) + framework.Sleep(10 * time.Second) var measurements string var err error diff --git a/test/e2e/annotations/serversnippet.go b/test/e2e/annotations/serversnippet.go index 1e0455f59..8a4f25ea4 100644 --- a/test/e2e/annotations/serversnippet.go +++ b/test/e2e/annotations/serversnippet.go @@ -31,7 +31,7 @@ var _ = framework.DescribeAnnotation("server-snippet", func() { f.NewEchoDeployment() }) - ginkgo.It(`add valid directives to server via server snippet"`, func() { + ginkgo.It(`add valid directives to server via server snippet`, func() { host := "serversnippet.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/server-snippet": ` diff --git a/test/e2e/defaultbackend/custom_default_backend.go b/test/e2e/defaultbackend/custom_default_backend.go index 30c946a91..f3c06f73b 100644 --- a/test/e2e/defaultbackend/custom_default_backend.go +++ b/test/e2e/defaultbackend/custom_default_backend.go @@ -42,13 +42,10 @@ var _ = framework.IngressNginxDescribe("[Default Backend] custom service", func( args = append(args, fmt.Sprintf("--default-backend-service=%v/%v", f.Namespace, framework.EchoService)) deployment.Spec.Template.Spec.Containers[0].Args = args _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) - framework.Sleep() return err }) assert.Nil(ginkgo.GinkgoT(), err, "updating deployment") - framework.Sleep() - f.WaitForNginxServer("_", func(server string) bool { return strings.Contains(server, `set $proxy_upstream_name "upstream-default-backend"`) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index b33e7ccb3..aeed17c19 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -17,7 +17,9 @@ import ( "context" "crypto/tls" "fmt" + "net" "net/http" + "os/exec" "strings" "time" @@ -106,17 +108,16 @@ func (f *Framework) BeforeEach() { err = f.newIngressController(f.Namespace, f.BaseName) assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller") - err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{ - LabelSelector: "app.kubernetes.io/name=ingress-nginx", - }) - assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready") + f.WaitForNginxListening(80) } // AfterEach deletes the namespace, after reading its events. func (f *Framework) AfterEach() { defer func(kubeClient kubernetes.Interface, ns string) { - err := deleteKubeNamespace(kubeClient, ns) - assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace) + go func() { + err := deleteKubeNamespace(kubeClient, ns) + assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace) + }() }(f.KubeClientSet, f.Namespace) if !ginkgo.CurrentGinkgoTestDescription().Failed { @@ -205,7 +206,7 @@ func (f *Framework) GetURL(scheme RequestScheme) string { func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) { err := wait.PollImmediate(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher)) assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s") - Sleep() + Sleep(1 * time.Second) } // WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration @@ -473,6 +474,27 @@ func (f *Framework) newTestClient(config *tls.Config) *httpexpect.Expect { }) } +// WaitForNginxListening waits until NGINX starts accepting connections on a port +func (f *Framework) WaitForNginxListening(port int) { + err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/name=ingress-nginx", + }) + assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready") + + podIP := f.GetNginxIP() + err = wait.Poll(500*time.Millisecond, DefaultTimeout, func() (bool, error) { + conn, err := net.Dial("tcp", fmt.Sprintf("%v:%v", podIP, port)) + if err != nil { + return false, nil + } + + defer conn.Close() + + return true, nil + }) + assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress controller pod listening on port 80") +} + // UpdateDeployment runs the given updateFunc on the deployment and waits for it to be updated func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name string, replicas int, updateFunc func(d *appsv1.Deployment) error) error { deployment, err := kubeClientSet.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) @@ -480,10 +502,17 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name return err } + rolloutStatsCmd := fmt.Sprintf("%v --namespace %s rollout status deployment/%s -w --timeout 5m", KubectlPath, namespace, deployment.Name) + if updateFunc != nil { if err := updateFunc(deployment); err != nil { return err } + + err = exec.Command("bash", "-c", rolloutStatsCmd).Run() + if err != nil { + return err + } } if *deployment.Spec.Replicas != int32(replicas) { @@ -492,6 +521,11 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name if err != nil { return errors.Wrapf(err, "scaling the number of replicas to %v", replicas) } + + err = exec.Command("/bin/bash", "-c", rolloutStatsCmd).Run() + if err != nil { + return err + } } err = waitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{ @@ -528,7 +562,7 @@ func UpdateIngress(kubeClientSet kubernetes.Interface, namespace string, name st return err } - Sleep() + Sleep(1 * time.Second) return nil } diff --git a/test/e2e/framework/k8s.go b/test/e2e/framework/k8s.go index 95c8635e6..258cc070e 100644 --- a/test/e2e/framework/k8s.go +++ b/test/e2e/framework/k8s.go @@ -98,7 +98,7 @@ func (f *Framework) UpdateIngress(ingress *networking.Ingress) *networking.Ingre } // updating an ingress requires a reload. - Sleep() + Sleep(1 * time.Second) return ing } @@ -129,7 +129,7 @@ func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) *appsv1.Depl // waitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace. func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error { - return wait.PollImmediate(Poll, timeout, func() (bool, error) { + return wait.Poll(Poll, timeout, func() (bool, error) { pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts) if err != nil { return false, nil @@ -152,7 +152,7 @@ func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, // waitForPodsDeleted waits for a given amount of time until a group of Pods are deleted in the given namespace. func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duration, namespace string, opts metav1.ListOptions) error { - return wait.PollImmediate(Poll, timeout, func() (bool, error) { + return wait.Poll(Poll, timeout, func() (bool, error) { pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts) if err != nil { return false, nil @@ -161,6 +161,7 @@ func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duratio if len(pl.Items) == 0 { return true, nil } + return false, nil }) } diff --git a/test/e2e/gracefulshutdown/shutdown.go b/test/e2e/gracefulshutdown/shutdown.go index e1f41a2cc..5ed1f93e7 100644 --- a/test/e2e/gracefulshutdown/shutdown.go +++ b/test/e2e/gracefulshutdown/shutdown.go @@ -71,7 +71,6 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() { _, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) return err }) - assert.Nil(ginkgo.GinkgoT(), err) annotations := map[string]string{ diff --git a/test/e2e/leaks/lua_ssl.go b/test/e2e/leaks/lua_ssl.go index e6a0372db..4374c4c04 100644 --- a/test/e2e/leaks/lua_ssl.go +++ b/test/e2e/leaks/lua_ssl.go @@ -44,7 +44,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun iterations := 10 ginkgo.By("Waiting a minute before starting the test") - time.Sleep(1 * time.Minute) + framework.Sleep(1 * time.Minute) for iteration := 1; iteration <= iterations; iteration++ { ginkgo.By(fmt.Sprintf("Running iteration %v", iteration)) @@ -64,7 +64,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun p.Close() ginkgo.By("waiting one minute before next iteration") - time.Sleep(1 * time.Minute) + framework.Sleep(1 * time.Minute) } }) }) @@ -116,7 +116,7 @@ func run(host string, f *framework.Framework) pool.WorkFunc { ginkgo.By(fmt.Sprintf("\tcreating ingress for host %v", host)) privisionIngress(host, f) - time.Sleep(100 * time.Millisecond) + framework.Sleep(100 * time.Millisecond) ginkgo.By(fmt.Sprintf("\tchecking ingress for host %v", host)) checkIngress(host, f) diff --git a/test/e2e/lua/dynamic_configuration.go b/test/e2e/lua/dynamic_configuration.go index e7363403c..a207d1573 100644 --- a/test/e2e/lua/dynamic_configuration.go +++ b/test/e2e/lua/dynamic_configuration.go @@ -99,7 +99,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() { err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, replicas, nil) assert.Nil(ginkgo.GinkgoT(), err) - time.Sleep(waitForLuaSync) + framework.Sleep(waitForLuaSync) f.HTTPTestClient(). GET("/"). @@ -117,7 +117,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() { err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, 0, nil) assert.Nil(ginkgo.GinkgoT(), err) - time.Sleep(waitForLuaSync) + framework.Sleep(waitForLuaSync) f.HTTPTestClient(). GET("/"). @@ -142,7 +142,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() { err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil) assert.Nil(ginkgo.GinkgoT(), err) - time.Sleep(waitForLuaSync) + framework.Sleep(waitForLuaSync) resp = f.HTTPTestClient(). GET("/"). @@ -155,7 +155,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() { err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil) assert.Nil(ginkgo.GinkgoT(), err) - time.Sleep(waitForLuaSync) + framework.Sleep(waitForLuaSync) resp = f.HTTPTestClient(). GET("/"). @@ -210,8 +210,6 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() { err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil) assert.Nil(ginkgo.GinkgoT(), err) - framework.Sleep() - output, err = f.ExecIngressPod(curlCmd) assert.Nil(ginkgo.GinkgoT(), err) assert.Equal(ginkgo.GinkgoT(), output, `{"controllerPodsCount":3}`) diff --git a/test/e2e/run.sh b/test/e2e/run.sh index c4216914c..2d3b51b99 100755 --- a/test/e2e/run.sh +++ b/test/e2e/run.sh @@ -37,15 +37,13 @@ cleanup() { trap cleanup EXIT +export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ingress-nginx-dev} + if ! command -v kind --version &> /dev/null; then echo "kind is not installed. Use the package manager or visit the official site https://kind.sigs.k8s.io/" exit 1 fi -if ! command -v ginkgo &> /dev/null; then - go get github.com/onsi/ginkgo/ginkgo -fi - DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Use 1.0.0-dev to make sure we use the latest configuration in the helm template @@ -53,29 +51,35 @@ export TAG=1.0.0-dev export ARCH=${ARCH:-amd64} export REGISTRY=ingress-controller -export K8S_VERSION=${K8S_VERSION:-v1.18.4@sha256:d8ff5fc405fc679dd3dd0cccc01543ba4942ed90823817d2e9e2c474a5343c4f} - export DOCKER_CLI_EXPERIMENTAL=enabled -export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ingress-nginx-dev} +export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/kind-config-$KIND_CLUSTER_NAME}" -echo "[dev-env] creating Kubernetes cluster with kind" +if [ "${SKIP_CLUSTER_CREATION:-false}" = "false" ]; then + echo "[dev-env] creating Kubernetes cluster with kind" -export KUBECONFIG="${HOME}/.kube/kind-config-${KIND_CLUSTER_NAME}" -kind create cluster \ - --verbosity=${KIND_LOG_LEVEL} \ - --name ${KIND_CLUSTER_NAME} \ - --config ${DIR}/kind.yaml \ - --retain \ - --image "kindest/node:${K8S_VERSION}" + export K8S_VERSION=${K8S_VERSION:-v1.18.4@sha256:d8ff5fc405fc679dd3dd0cccc01543ba4942ed90823817d2e9e2c474a5343c4f} -echo "Kubernetes cluster:" -kubectl get nodes -o wide + kind create cluster \ + --verbosity=${KIND_LOG_LEVEL} \ + --name ${KIND_CLUSTER_NAME} \ + --config ${DIR}/kind.yaml \ + --retain \ + --image "kindest/node:${K8S_VERSION}" -echo "[dev-env] building image" + echo "Kubernetes cluster:" + kubectl get nodes -o wide +fi -make -C ${DIR}/../../ clean-image build image -make -C ${DIR}/../e2e-image image +if [ "${SKIP_IMAGE_CREATION:-false}" = "false" ]; then + if ! command -v ginkgo &> /dev/null; then + go get github.com/onsi/ginkgo/ginkgo + fi + + echo "[dev-env] building image" + make -C ${DIR}/../../ clean-image build image + make -C ${DIR}/../e2e-image image +fi #make -C ${DIR}/../../images/fastcgi-helloserver/ build image #make -C ${DIR}/../../images/echo/ image diff --git a/test/e2e/servicebackend/service_externalname.go b/test/e2e/servicebackend/service_externalname.go index 6369862dd..0738c744f 100644 --- a/test/e2e/servicebackend/service_externalname.go +++ b/test/e2e/servicebackend/service_externalname.go @@ -22,6 +22,7 @@ import ( "net/http" "strings" + "github.com/gavv/httpexpect/v2" "github.com/onsi/ginkgo" "github.com/stretchr/testify/assert" core "k8s.io/api/core/v1" @@ -172,7 +173,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { GET("/get"). WithHeader("Host", host). Expect(). - Status(http.StatusBadGateway) + StatusRange(httpexpect.Status5xx) }) ginkgo.It("should return 200 for service type=ExternalName using a port name", func() { diff --git a/test/e2e/settings/global_external_auth.go b/test/e2e/settings/global_external_auth.go index 570517809..d8159ad93 100755 --- a/test/e2e/settings/global_external_auth.go +++ b/test/e2e/settings/global_external_auth.go @@ -178,6 +178,7 @@ var _ = framework.DescribeSetting("[Security] global-auth-url", func() { err := f.DeleteDeployment(framework.HTTPBinService) assert.Nil(ginkgo.GinkgoT(), err) + framework.Sleep() f.HTTPTestClient(). GET(barPath). diff --git a/test/e2e/settings/pod_security_policy.go b/test/e2e/settings/pod_security_policy.go index c99edc5d5..c35ce53c8 100644 --- a/test/e2e/settings/pod_security_policy.go +++ b/test/e2e/settings/pod_security_policy.go @@ -40,7 +40,7 @@ const ( var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies", func() { f := framework.NewDefaultFramework("pod-security-policies") - ginkgo.BeforeEach(func() { + ginkgo.It("should be running with a Pod Security Policy", func() { psp := createPodSecurityPolicy() _, err := f.KubeClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{}) if !k8sErrors.IsAlreadyExists(err) { @@ -73,10 +73,10 @@ var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies", func( }) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating ingress controller deployment flags") - f.NewEchoDeployment() - }) + f.WaitForNginxListening(80) + + f.NewEchoDeployment() - ginkgo.It("should be running with a Pod Security Policy", func() { f.WaitForNginxConfiguration( func(cfg string) bool { return strings.Contains(cfg, "server_tokens on") @@ -125,5 +125,4 @@ func createPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { }, }, } - } diff --git a/test/e2e/settings/pod_security_policy_volumes.go b/test/e2e/settings/pod_security_policy_volumes.go index 6f3006ff7..ca6783f86 100644 --- a/test/e2e/settings/pod_security_policy_volumes.go +++ b/test/e2e/settings/pod_security_policy_volumes.go @@ -96,6 +96,8 @@ var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies with vo }) assert.Nil(ginkgo.GinkgoT(), err, "updating ingress controller deployment") + f.WaitForNginxListening(80) + f.NewEchoDeployment() f.WaitForNginxConfiguration( diff --git a/test/e2e/status/update.go b/test/e2e/status/update.go index 1a9f19102..f18ff0686 100644 --- a/test/e2e/status/update.go +++ b/test/e2e/status/update.go @@ -80,7 +80,7 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() { }) framework.Logf("waiting for leader election and initial status update") - time.Sleep(30 * time.Second) + framework.Sleep(30 * time.Second) err = cmd.Process.Kill() assert.Nil(ginkgo.GinkgoT(), err, "unexpected error terminating kubectl proxy") @@ -91,7 +91,7 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() { ing.Status.LoadBalancer.Ingress = []apiv1.LoadBalancerIngress{} _, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).UpdateStatus(context.TODO(), ing, metav1.UpdateOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error cleaning Ingress status") - time.Sleep(10 * time.Second) + framework.Sleep(10 * time.Second) err = f.KubeClientSet.CoreV1(). ConfigMaps(f.Namespace).