Remove unused variables and verbose e2e logs
This commit is contained in:
parent
47b5e20a88
commit
10dcf0db15
35 changed files with 331 additions and 427 deletions
66
Makefile
66
Makefile
|
@ -29,22 +29,11 @@ SHELL=/bin/bash -o pipefail -o errexit
|
|||
# Use the 0.0 tag for testing, it shouldn't clobber any release builds
|
||||
TAG ?= 0.33.0
|
||||
|
||||
# Use docker to run makefile tasks
|
||||
USE_DOCKER ?= true
|
||||
|
||||
# Disable run docker tasks if running in prow.
|
||||
# only checks the existence of the variable, not the value.
|
||||
ifdef DIND_TASKS
|
||||
USE_DOCKER=false
|
||||
endif
|
||||
|
||||
# e2e settings
|
||||
# Allow limiting the scope of the e2e tests. By default run everything
|
||||
FOCUS ?= .*
|
||||
# number of parallel test
|
||||
E2E_NODES ?= 10
|
||||
# slow test only if takes > 50s
|
||||
SLOW_E2E_THRESHOLD ?= 50
|
||||
E2E_NODES ?= 6
|
||||
# run e2e test suite with tests that check for memory leaks? (default is false)
|
||||
E2E_CHECK_LEAKS ?=
|
||||
|
||||
|
@ -84,8 +73,7 @@ clean-image: ## Removes local image
|
|||
@docker rmi -f $(REGISTRY)/nginx-ingress-controller:$(TAG) || true
|
||||
|
||||
.PHONY: build
|
||||
build: check-go-version ## Build ingress controller, debug tool and pre-stop hook.
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
build: ## Build ingress controller, debug tool and pre-stop hook.
|
||||
@build/run-in-docker.sh \
|
||||
PKG=$(PKG) \
|
||||
ARCH=$(ARCH) \
|
||||
|
@ -94,13 +82,9 @@ ifeq ($(USE_DOCKER), true)
|
|||
TAG=$(TAG) \
|
||||
GOBUILD_FLAGS=$(GOBUILD_FLAGS) \
|
||||
build/build.sh
|
||||
else
|
||||
@build/build.sh
|
||||
endif
|
||||
|
||||
.PHONY: build-plugin
|
||||
build-plugin: check-go-version ## Build ingress-nginx krew plugin.
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
build-plugin: ## Build ingress-nginx krew plugin.
|
||||
@build/run-in-docker.sh \
|
||||
PKG=$(PKG) \
|
||||
ARCH=$(ARCH) \
|
||||
|
@ -109,9 +93,6 @@ ifeq ($(USE_DOCKER), true)
|
|||
TAG=$(TAG) \
|
||||
GOBUILD_FLAGS=$(GOBUILD_FLAGS) \
|
||||
build/build-plugin.sh
|
||||
else
|
||||
@build/build-plugin.sh
|
||||
endif
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Remove .gocache directory.
|
||||
|
@ -119,16 +100,11 @@ clean: ## Remove .gocache directory.
|
|||
|
||||
.PHONY: static-check
|
||||
static-check: ## Run verification script for boilerplate, codegen, gofmt, golint, lualint and chart-lint.
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
@build/run-in-docker.sh \
|
||||
hack/verify-all.sh
|
||||
else
|
||||
@hack/verify-all.sh
|
||||
endif
|
||||
|
||||
.PHONY: test
|
||||
test: check-go-version ## Run go unit tests.
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
test: ## Run go unit tests.
|
||||
@build/run-in-docker.sh \
|
||||
PKG=$(PKG) \
|
||||
ARCH=$(ARCH) \
|
||||
|
@ -137,32 +113,21 @@ ifeq ($(USE_DOCKER), true)
|
|||
TAG=$(TAG) \
|
||||
GOBUILD_FLAGS=$(GOBUILD_FLAGS) \
|
||||
build/test.sh
|
||||
else
|
||||
@build/test.sh
|
||||
endif
|
||||
|
||||
.PHONY: lua-test
|
||||
lua-test: ## Run lua unit tests.
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
@build/run-in-docker.sh \
|
||||
BUSTED_ARGS=$(BUSTED_ARGS) \
|
||||
build/test-lua.sh
|
||||
else
|
||||
@build/test-lua.sh
|
||||
endif
|
||||
|
||||
.PHONY: e2e-test
|
||||
e2e-test: check-go-version ## Run e2e tests (expects access to a working Kubernetes cluster).
|
||||
e2e-test: ## Run e2e tests (expects access to a working Kubernetes cluster).
|
||||
@build/run-e2e-suite.sh
|
||||
|
||||
.PHONY: e2e-test-binary
|
||||
e2e-test-binary: check-go-version ## Build ginkgo binary for e2e tests.
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
e2e-test-binary: ## Build binary for e2e tests.
|
||||
@build/run-in-docker.sh \
|
||||
ginkgo build ./test/e2e
|
||||
else
|
||||
@ginkgo build ./test/e2e
|
||||
endif
|
||||
|
||||
.PHONY: print-e2e-suite
|
||||
print-e2e-suite: e2e-test-binary ## Prints information about the suite of e2e tests.
|
||||
|
@ -170,7 +135,7 @@ print-e2e-suite: e2e-test-binary ## Prints information about the suite of e2e te
|
|||
hack/print-e2e-suite.sh
|
||||
|
||||
.PHONY: cover
|
||||
cover: check-go-version ## Run go coverage unit tests.
|
||||
cover: ## Run go coverage unit tests.
|
||||
@build/cover.sh
|
||||
echo "Uploading coverage results..."
|
||||
@curl -s https://codecov.io/bash | bash
|
||||
|
@ -187,7 +152,7 @@ check_dead_links: ## Check if the documentation contains dead links.
|
|||
--allow-redirect $(shell find $$PWD -mindepth 1 -name "*.md" -printf '%P\n' | grep -v vendor | grep -v Changelog.md)
|
||||
|
||||
.PHONY: dev-env
|
||||
dev-env: check-go-version ## Starts a local Kubernetes cluster using kind, building and deploying the ingress controller.
|
||||
dev-env: ## Starts a local Kubernetes cluster using kind, building and deploying the ingress controller.
|
||||
@build/dev-env.sh
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
|
@ -202,7 +167,7 @@ live-docs: ## Build and launch a local copy of the documentation website in http
|
|||
squidfunk/mkdocs-material:5.2.3
|
||||
|
||||
.PHONY: misspell
|
||||
misspell: check-go-version ## Check for spelling errors.
|
||||
misspell: ## Check for spelling errors.
|
||||
@go get github.com/client9/misspell/cmd/misspell
|
||||
misspell \
|
||||
-locale US \
|
||||
|
@ -210,7 +175,7 @@ misspell: check-go-version ## Check for spelling errors.
|
|||
cmd/* internal/* deploy/* docs/* design/* test/* README.md
|
||||
|
||||
.PHONY: kind-e2e-test
|
||||
kind-e2e-test: check-go-version ## Run e2e tests using kind.
|
||||
kind-e2e-test: ## Run e2e tests using kind.
|
||||
@test/e2e/run.sh
|
||||
|
||||
.PHONY: kind-e2e-chart-tests
|
||||
|
@ -221,20 +186,9 @@ kind-e2e-chart-tests: ## Run helm chart e2e tests
|
|||
run-ingress-controller: ## Run the ingress controller locally using a kubectl proxy connection.
|
||||
@build/run-ingress-controller.sh
|
||||
|
||||
.PHONY: check-go-version
|
||||
check-go-version:
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
@build/run-in-docker.sh \
|
||||
hack/check-go-version.sh
|
||||
else
|
||||
@hack/check-go-version.sh
|
||||
endif
|
||||
|
||||
.PHONY: ensure-buildx
|
||||
ensure-buildx:
|
||||
ifeq ($(DIND_TASKS),)
|
||||
./hack/init-buildx.sh
|
||||
endif
|
||||
|
||||
.PHONY: show-version
|
||||
show-version:
|
||||
|
|
|
@ -29,7 +29,6 @@ BGREEN='\e[32m'
|
|||
declare -a mandatory
|
||||
mandatory=(
|
||||
E2E_NODES
|
||||
SLOW_E2E_THRESHOLD
|
||||
)
|
||||
|
||||
missing=false
|
||||
|
@ -77,6 +76,5 @@ kubectl run --rm \
|
|||
--env="E2E_NODES=${E2E_NODES}" \
|
||||
--env="FOCUS=${FOCUS}" \
|
||||
--env="E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS}" \
|
||||
--env="SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD}" \
|
||||
--overrides='{ "apiVersion": "v1", "spec":{"serviceAccountName": "ingress-nginx-e2e"}}' \
|
||||
e2e --image=nginx-ingress-controller:e2e
|
||||
|
|
|
@ -37,6 +37,7 @@ trap cleanup EXIT
|
|||
E2E_IMAGE=${E2E_IMAGE:-gcr.io/k8s-staging-ingress-nginx/e2e-test-runner:v20200627-ingress-nginx-2.9.0-9-ga003eabd5}
|
||||
|
||||
DOCKER_OPTS=${DOCKER_OPTS:-}
|
||||
DOCKER_IN_DOCKER_ENABLED=${DOCKER_IN_DOCKER_ENABLED:-}
|
||||
|
||||
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd -P)
|
||||
|
||||
|
@ -51,17 +52,21 @@ fi
|
|||
# create output directory as current user to avoid problem with docker.
|
||||
mkdir -p "${KUBE_ROOT}/bin" "${KUBE_ROOT}/bin/${ARCH}"
|
||||
|
||||
docker run \
|
||||
--tty \
|
||||
--rm \
|
||||
${DOCKER_OPTS} \
|
||||
-e GOCACHE="/go/src/${PKG}/.cache" \
|
||||
-e DIND_TASKS=0 \
|
||||
-v "${HOME}/.kube:${HOME}/.kube" \
|
||||
-v "${KUBE_ROOT}:/go/src/${PKG}" \
|
||||
-v "${KUBE_ROOT}/bin/${ARCH}:/go/bin/linux_${ARCH}" \
|
||||
-v "/var/run/docker.sock:/var/run/docker.sock" \
|
||||
-v "${INGRESS_VOLUME}:/etc/ingress-controller/" \
|
||||
-w "/go/src/${PKG}" \
|
||||
-u $(id -u ${USER}):$(id -g ${USER}) \
|
||||
${E2E_IMAGE} /bin/bash -c "${FLAGS}"
|
||||
if [[ "$DOCKER_IN_DOCKER_ENABLED" == "true" ]]; then
|
||||
/bin/bash -c "${FLAGS}"
|
||||
else
|
||||
docker run \
|
||||
--tty \
|
||||
--rm \
|
||||
${DOCKER_OPTS} \
|
||||
-e GOCACHE="/go/src/${PKG}/.cache" \
|
||||
-e DOCKER_IN_DOCKER_ENABLED="true" \
|
||||
-v "${HOME}/.kube:${HOME}/.kube" \
|
||||
-v "${KUBE_ROOT}:/go/src/${PKG}" \
|
||||
-v "${KUBE_ROOT}/bin/${ARCH}:/go/bin/linux_${ARCH}" \
|
||||
-v "/var/run/docker.sock:/var/run/docker.sock" \
|
||||
-v "${INGRESS_VOLUME}:/etc/ingress-controller/" \
|
||||
-w "/go/src/${PKG}" \
|
||||
-u $(id -u ${USER}):$(id -g ${USER}) \
|
||||
${E2E_IMAGE} /bin/bash -c "${FLAGS}"
|
||||
fi
|
||||
|
|
|
@ -74,10 +74,10 @@ To find the registry simply run: `docker system info | grep Registry`
|
|||
The e2e test image can also be built through the Makefile.
|
||||
|
||||
```console
|
||||
$ make -C test/e2e-image image
|
||||
$ make -C test/e2e-image build
|
||||
```
|
||||
|
||||
Then you can load the docker image using kind:
|
||||
Then you can load the docker image using kind:
|
||||
|
||||
```console
|
||||
$ kind load docker-image --name="ingress-nginx-dev" nginx-ingress-controller:e2e
|
||||
|
|
19
go.sum
19
go.sum
|
@ -89,6 +89,7 @@ github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlR
|
|||
github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0=
|
||||
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
|
@ -102,12 +103,15 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
|||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
|
@ -132,6 +136,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QL
|
|||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k=
|
||||
github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0=
|
||||
|
@ -338,9 +343,12 @@ github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7
|
|||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
|
@ -363,6 +371,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
|||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
@ -583,6 +592,7 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4
|
|||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
|
@ -628,6 +638,7 @@ github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLY
|
|||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
|
@ -652,6 +663,7 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
|
|||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=
|
||||
github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
|
@ -666,7 +678,9 @@ github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZ
|
|||
github.com/zakjan/cert-chain-resolver v0.0.0-20200409100953-fa92b0b5236f h1:HLON7COPorM4TiXxq3waC3MvwowdrnB37hpHpnjzHXU=
|
||||
github.com/zakjan/cert-chain-resolver v0.0.0-20200409100953-fa92b0b5236f/go.mod h1:KNkcm66cr4ilOiEcjydK+tc2ShPUhqmuoXCljXUBPu8=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
|
@ -882,6 +896,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
|||
gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
|
@ -926,6 +941,7 @@ k8s.io/component-base v0.18.4 h1:Kr53Fp1iCGNsl9Uv4VcRvLy7YyIqi9oaJOQ7SXtKI98=
|
|||
k8s.io/component-base v0.18.4/go.mod h1:7jr/Ef5PGmKwQhyAz/pjByxJbC58mhKAhiaDu0vXfPk=
|
||||
k8s.io/cri-api v0.18.4 h1:9TUrAJ/+JjUXNtCc8MLXoEQ9XTZCbm0GTYnQPOssg8o=
|
||||
k8s.io/cri-api v0.18.4/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s=
|
||||
k8s.io/csi-translation-lib v0.18.4 h1:nn5FktCrz3L/gf/v7k3m6rRXmZd/KSlePBhLT2dWHb8=
|
||||
k8s.io/csi-translation-lib v0.18.4/go.mod h1:FTci2m8/3oN8E+8OyblBXei8w4mwbiH4boNPeob4piE=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
||||
|
@ -940,7 +956,9 @@ k8s.io/kube-controller-manager v0.18.4/go.mod h1:GrY1S0F7zA0LQlt0ApOLt4iMpphKTk3
|
|||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kube-proxy v0.18.4/go.mod h1:h2c+ckQC1XpybDs53mWhLCvvM6txduWVLPQwwvGqR9M=
|
||||
k8s.io/kube-scheduler v0.18.4 h1:pXlUSGJ7Ih4AapVnoEHnindPjJ15avwOwmrQswFwtzA=
|
||||
k8s.io/kube-scheduler v0.18.4/go.mod h1:vRFb/8Yi7hh670beaPrXttMpjt7H8EooDkgwFm8ts4k=
|
||||
k8s.io/kubectl v0.18.4 h1:l9DUYPTEMs1+qNtoqPpTyaJOosvj7l7tQqphCO1K52s=
|
||||
k8s.io/kubectl v0.18.4/go.mod h1:EzB+nfeUWk6fm6giXQ8P4Fayw3dsN+M7Wjy23mTRtB0=
|
||||
k8s.io/kubelet v0.18.4/go.mod h1:D0V9JYaTJRF+ry+9JfnM4uyg3ySRLQ02XjfQ5f2u4CM=
|
||||
k8s.io/kubernetes v1.18.4 h1:AYtJ24PIT91P1K8ekCrvay8LK8WctWhC5+NI0HZ8sqE=
|
||||
|
@ -965,6 +983,7 @@ mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskX
|
|||
pault.ag/go/sniff v0.0.0-20200207005214-cf7e4d167732 h1:SAElp8THCfmBdM+4lmWX5gebiSSkEr7PAYDVF91qpfg=
|
||||
pault.ag/go/sniff v0.0.0-20200207005214-cf7e4d167732/go.mod h1:lpvCfhqEHNJSSpG5R5A2EgsVzG8RTt4RfPoQuRAcDmg=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
||||
sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM=
|
||||
sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo=
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ -n "$DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
MINIMUM_GO_VERSION=go1.13
|
||||
|
||||
if [[ -z "$(command -v go)" ]]; then
|
||||
echo "
|
||||
Can't find 'go' in PATH, please fix and retry.
|
||||
See http://golang.org/doc/install for installation instructions.
|
||||
"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=" " read -ra go_version <<< "$(go version)"
|
||||
|
||||
if [[ "${MINIMUM_GO_VERSION}" != $(echo -e "${MINIMUM_GO_VERSION}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then
|
||||
echo "
|
||||
Detected go version: ${go_version[*]}.
|
||||
ingress-nginx requires ${MINIMUM_GO_VERSION} or greater.
|
||||
|
||||
Please install ${MINIMUM_GO_VERSION} or later.
|
||||
"
|
||||
exit 1
|
||||
fi
|
|
@ -16,15 +16,14 @@
|
|||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
CODEGEN_VERSION=$(grep 'k8s.io/code-generator' go.sum | awk '{print $2}' | sed 's/\/go.mod//g' | head -1)
|
||||
CODEGEN_PKG=$(echo `go env GOPATH`"/pkg/mod/k8s.io/code-generator@${CODEGEN_VERSION}")
|
||||
|
||||
if [[ ! -d ${CODEGEN_PKG} ]]; then
|
||||
echo "${CODEGEN_PKG} is missing. Running 'go mod download'."
|
||||
go mod download
|
||||
echo "${CODEGEN_PKG} is missing. Running 'go mod download'."
|
||||
go mod download
|
||||
fi
|
||||
|
||||
# Ensure we can execute.
|
||||
|
@ -42,4 +41,4 @@ ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \
|
|||
k8s.io/ingress-nginx/internal k8s.io/ingress-nginx/internal \
|
||||
.:ingress \
|
||||
--output-base "$(dirname ${BASH_SOURCE})/../../.." \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate/boilerplate.go.txt
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate/boilerplate.go.txt
|
||||
|
|
|
@ -51,7 +51,7 @@ if $SILENT ; then
|
|||
echo "Running in the silent mode, run with -v if you want to see script logs."
|
||||
fi
|
||||
|
||||
EXCLUDE="verify-all.sh verify-codegen.sh"
|
||||
EXCLUDE="verify-all.sh"
|
||||
|
||||
ret=0
|
||||
for t in `ls $KUBE_ROOT/hack/verify-*.sh`
|
||||
|
|
|
@ -19,14 +19,12 @@ set -o nounset
|
|||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
SCRIPT_BASE=${SCRIPT_ROOT}/../..
|
||||
|
||||
DIFFROOT="${SCRIPT_ROOT}/internal"
|
||||
TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/internal"
|
||||
_tmp="${SCRIPT_ROOT}/_tmp"
|
||||
TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp"
|
||||
|
||||
cleanup() {
|
||||
rm -rf "${_tmp}"
|
||||
rm -rf "${TMP_DIFFROOT}"
|
||||
}
|
||||
trap "cleanup" EXIT SIGINT
|
||||
|
||||
|
@ -38,10 +36,10 @@ cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
|
|||
"${SCRIPT_ROOT}/hack/update-codegen.sh"
|
||||
echo "diffing ${DIFFROOT} against freshly generated codegen"
|
||||
ret=0
|
||||
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
|
||||
diff -Naupr --no-dereference "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=1
|
||||
|
||||
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
|
||||
if [[ $ret -eq 0 ]]
|
||||
then
|
||||
if [[ $ret -eq 0 ]]; then
|
||||
echo "${DIFFROOT} up to date."
|
||||
else
|
||||
echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh"
|
||||
|
|
|
@ -1,24 +1,8 @@
|
|||
.PHONY: all
|
||||
all: image
|
||||
|
||||
DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
|
||||
# Use docker to run makefile tasks
|
||||
USE_DOCKER ?= true
|
||||
|
||||
# Disable run docker tasks if running in prow.
|
||||
# only checks the existence of the variable, not the value.
|
||||
ifdef DIND_TASKS
|
||||
USE_DOCKER=false
|
||||
endif
|
||||
|
||||
.PHONY: image
|
||||
image:
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
@$(DIR)/../../build/run-in-docker.sh make e2e-test-binary
|
||||
else
|
||||
@make -C $(DIR)/../../ e2e-test-binary
|
||||
endif
|
||||
make -C $(DIR)/../../ e2e-test-binary
|
||||
|
||||
cp $(DIR)/../e2e/e2e.test .
|
||||
cp $(DIR)/../e2e/wait-for-nginx.sh .
|
||||
|
@ -30,7 +14,9 @@ endif
|
|||
docker build \
|
||||
--tag nginx-ingress-controller:e2e .
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf _cache e2e.test kubectl cluster ginkgo
|
||||
docker rmi -f nginx-ingress-controller:e2e || true
|
||||
|
||||
|
||||
.PHONY: image clean
|
|
@ -19,48 +19,37 @@ set -e
|
|||
NC='\e[0m'
|
||||
BGREEN='\e[32m'
|
||||
|
||||
SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-50}
|
||||
SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-5}
|
||||
FOCUS=${FOCUS:-.*}
|
||||
E2E_NODES=${E2E_NODES:-5}
|
||||
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-""}
|
||||
|
||||
if [ ! -f "${HOME}/.kube/config" ]; then
|
||||
kubectl config set-cluster dev --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt --embed-certs=true --server="https://kubernetes.default/"
|
||||
kubectl config set-credentials user --token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
|
||||
kubectl config set-context default --cluster=dev --user=user
|
||||
kubectl config use-context default
|
||||
fi
|
||||
|
||||
ginkgo_args=(
|
||||
"-randomizeSuites"
|
||||
"-randomizeAllSpecs"
|
||||
"-flakeAttempts=2"
|
||||
"-p"
|
||||
"-trace"
|
||||
"-progress"
|
||||
"-slowSpecThreshold=${SLOW_E2E_THRESHOLD}"
|
||||
"-r"
|
||||
"-succinct"
|
||||
"-timeout=45m" # Suite timeout should be lower than Prow job timeout to avoid abrupt termination
|
||||
)
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite (FOCUS=${FOCUS})...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="${FOCUS}" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
-focus="${FOCUS}" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
/e2e.test
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that require serial execution...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[Serial\]" \
|
||||
-skip="\[MemoryLeak\]" \
|
||||
-nodes=1 \
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[Serial\]" \
|
||||
-skip="\[MemoryLeak\]" \
|
||||
/e2e.test
|
||||
|
||||
if [[ ${E2E_CHECK_LEAKS} != "" ]]; then
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that check for memory leaks...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[MemoryLeak\]" \
|
||||
-skip="\[Serial\]" \
|
||||
-nodes=1 \
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[MemoryLeak\]" \
|
||||
-skip="\[Serial\]" \
|
||||
/e2e.test
|
||||
fi
|
||||
|
|
|
@ -86,7 +86,7 @@ var _ = framework.DescribeAnnotation("affinity session-cookie-name", func() {
|
|||
|
||||
_, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress")
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.HTTPTestClient().
|
||||
GET("/").
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -104,7 +103,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
|
|||
replicas = replicas + 1
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(3 * time.Second)
|
||||
framework.Sleep()
|
||||
response = request.WithCookies(cookies).Expect()
|
||||
newHostName := getHostnameFromResponseBody(response.Body().Raw())
|
||||
assert.Equal(ginkgo.GinkgoT(), originalHostName, newHostName,
|
||||
|
@ -116,7 +115,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
|
|||
replicas = 0
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
// validate, there is no backend to serve the request
|
||||
response = request.WithCookies(cookies).Expect().Status(http.StatusServiceUnavailable)
|
||||
|
@ -125,13 +124,13 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
|
|||
replicas = 2
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
// wait brand new backends to spawn
|
||||
response = request.WithCookies(cookies).Expect()
|
||||
try := 0
|
||||
for (response.Raw().StatusCode == http.StatusServiceUnavailable) && (try < 30) {
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
response = request.WithCookies(cookies).Expect()
|
||||
try++
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -81,7 +80,7 @@ var _ = framework.DescribeAnnotation("from-to-www-redirect", func() {
|
|||
ing.Spec.TLS[0].SecretName,
|
||||
ing.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxServer(toHost,
|
||||
func(server string) bool {
|
||||
|
|
|
@ -77,7 +77,7 @@ var _ = framework.DescribeAnnotation("influxdb-*", func() {
|
|||
var measurements string
|
||||
var err error
|
||||
|
||||
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
err = wait.Poll(time.Second, time.Minute, func() (bool, error) {
|
||||
measurements, err = extractInfluxDBMeasurements(f)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
|
|
|
@ -39,7 +39,7 @@ func startIngress(f *framework.Framework, annotations map[string]string) map[str
|
|||
return strings.Contains(server, fmt.Sprintf("server_name %s ;", host))
|
||||
})
|
||||
|
||||
err := wait.PollImmediate(framework.Poll, framework.DefaultTimeout, func() (bool, error) {
|
||||
err := wait.Poll(framework.Poll, framework.DefaultTimeout, func() (bool, error) {
|
||||
|
||||
resp := f.HTTPTestClient().
|
||||
GET("/").
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -43,12 +42,12 @@ var _ = framework.IngressNginxDescribe("[Default Backend] custom service", func(
|
|||
args = append(args, fmt.Sprintf("--default-backend-service=%v/%v", f.Namespace, framework.EchoService))
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
return err
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating deployment")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxServer("_",
|
||||
func(server string) bool {
|
||||
|
|
|
@ -59,6 +59,8 @@ var _ = framework.IngressNginxDescribe("[Default Backend]", func() {
|
|||
{"basic HTTPS POST request to host foo.bar.com and path /demo should return 404", " foo.bar.com", framework.HTTPS, "POST", "/demo", http.StatusNotFound},
|
||||
}
|
||||
|
||||
framework.Sleep()
|
||||
|
||||
for _, test := range testCases {
|
||||
ginkgo.By(test.Name)
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ func (f *Framework) NewNewFastCGIHelloServerDeploymentWithReplicas(replicas int3
|
|||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
||||
err := WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "failed to wait for to become ready")
|
||||
|
|
|
@ -24,6 +24,9 @@ import (
|
|||
"github.com/gavv/httpexpect/v2"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/pkg/errors"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
@ -37,6 +40,7 @@ import (
|
|||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/ingress-nginx/internal/k8s"
|
||||
"k8s.io/klog"
|
||||
kubeframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// RequestScheme define a scheme used in a test request.
|
||||
|
@ -72,22 +76,8 @@ type Framework struct {
|
|||
func NewDefaultFramework(baseName string) *Framework {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating kubernetes API client configuration")
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating Kubernetes API client")
|
||||
|
||||
_, isIngressV1Ready := k8s.NetworkingIngressAvailable(kubeClient)
|
||||
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
KubeConfig: kubeConfig,
|
||||
KubeClientSet: kubeClient,
|
||||
IsIngressV1Ready: isIngressV1Ready,
|
||||
BaseName: baseName,
|
||||
}
|
||||
|
||||
ginkgo.BeforeEach(f.BeforeEach)
|
||||
|
@ -98,63 +88,74 @@ func NewDefaultFramework(baseName string) *Framework {
|
|||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
ingressNamespace, err := CreateKubeNamespace(f.BaseName, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating namespace")
|
||||
var err error
|
||||
|
||||
f.Namespace = ingressNamespace
|
||||
if f.KubeClientSet == nil {
|
||||
f.KubeConfig, err = kubeframework.LoadConfig()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "loading a kubernetes client configuration")
|
||||
f.KubeClientSet, err = kubernetes.NewForConfig(f.KubeConfig)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating a kubernetes client")
|
||||
|
||||
_, isIngressV1Ready := k8s.NetworkingIngressAvailable(f.KubeClientSet)
|
||||
f.IsIngressV1Ready = isIngressV1Ready
|
||||
}
|
||||
|
||||
f.Namespace, err = CreateKubeNamespace(f.BaseName, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating namespace")
|
||||
|
||||
err = f.newIngressController(f.Namespace, f.BaseName)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller")
|
||||
|
||||
err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready")
|
||||
|
||||
// wait before any request
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
Logf("Unexpected error searching for ingress controller pod: %v", err)
|
||||
return
|
||||
}
|
||||
defer func(kubeClient kubernetes.Interface, ns string) {
|
||||
err := deleteKubeNamespace(kubeClient, ns)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
|
||||
}(f.KubeClientSet, f.Namespace)
|
||||
|
||||
cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf")
|
||||
o, err := f.ExecCommand(pod, cmd)
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining nginx.conf file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX configuration after failure")
|
||||
Logf("%v", o)
|
||||
|
||||
log, err := f.NginxLogs()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining NGINX logs: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX logs")
|
||||
Logf("%v", log)
|
||||
|
||||
o, err = f.NamespaceContent()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining namespace information: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping namespace content")
|
||||
Logf("%v", o)
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
err := DeleteKubeNamespace(f.KubeClientSet, f.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
Logf("Unexpected error searching for ingress controller pod: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf")
|
||||
o, err := f.ExecCommand(pod, cmd)
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining nginx.conf file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX configuration after failure")
|
||||
Logf("%v", o)
|
||||
|
||||
log, err := f.NginxLogs()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining NGINX logs: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX logs")
|
||||
Logf("%v", log)
|
||||
|
||||
o, err = f.NamespaceContent()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining namespace information: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping namespace content")
|
||||
Logf("%v", o)
|
||||
}
|
||||
|
||||
// IngressNginxDescribe wrapper function for ginkgo describe. Adds namespacing.
|
||||
|
@ -188,20 +189,10 @@ func (f *Framework) GetNginxIP() string {
|
|||
}
|
||||
|
||||
// GetNginxPodIP returns the IP addresses of the running pods
|
||||
func (f *Framework) GetNginxPodIP() []string {
|
||||
e, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
Endpoints(f.Namespace).
|
||||
Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX IP address")
|
||||
eips := make([]string, 0)
|
||||
for _, s := range e.Subsets {
|
||||
for _, a := range s.Addresses {
|
||||
eips = append(eips, a.IP)
|
||||
}
|
||||
}
|
||||
|
||||
return eips
|
||||
func (f *Framework) GetNginxPodIP() string {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX Pod")
|
||||
return pod.Status.PodIP
|
||||
}
|
||||
|
||||
// GetURL returns the URL should be used to make a request to NGINX
|
||||
|
@ -214,7 +205,7 @@ func (f *Framework) GetURL(scheme RequestScheme) string {
|
|||
func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) {
|
||||
err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher))
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep()
|
||||
}
|
||||
|
||||
// WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration
|
||||
|
@ -334,13 +325,15 @@ func (f *Framework) SetNginxConfigMapData(cmData map[string]string) {
|
|||
|
||||
cfgMap.Data = cmData
|
||||
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), cfgMap, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
fn := func() {
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), cfgMap, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
f.waitForReload(fn)
|
||||
}
|
||||
|
||||
// CreateConfigMap creates a new configmap in the current namespace
|
||||
|
@ -363,13 +356,60 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
|
|||
|
||||
config.Data[key] = value
|
||||
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), config, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
fn := func() {
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), config, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep(1)
|
||||
f.waitForReload(fn)
|
||||
}
|
||||
|
||||
func (f *Framework) waitForReload(fn func()) {
|
||||
reloadCount := f.getReloadCount()
|
||||
|
||||
fn()
|
||||
|
||||
var count int
|
||||
err := wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
// most of the cases reload the ingress controller
|
||||
// in cases where the value is not modified we could wait forever
|
||||
if count > 4 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
count++
|
||||
|
||||
return (f.getReloadCount() > reloadCount), nil
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "while waiting for ingress controller reload")
|
||||
}
|
||||
|
||||
func (f *Framework) getReloadCount() int {
|
||||
ip := f.GetNginxPodIP()
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
rc0, err := extractReloadCount(mf)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
return int(rc0)
|
||||
}
|
||||
|
||||
func extractReloadCount(mf *dto.MetricFamily) (float64, error) {
|
||||
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
|
||||
Timestamp: model.Now(),
|
||||
}, mf)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return float64(vec[0].Value), nil
|
||||
}
|
||||
|
||||
// DeleteNGINXPod deletes the currently running pod. It waits for the replacement pod to be up.
|
||||
|
@ -451,7 +491,7 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
|
|||
}
|
||||
}
|
||||
|
||||
err = WaitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
|
||||
err = waitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(deployment.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -485,7 +525,7 @@ func UpdateIngress(kubeClientSet kubernetes.Interface, namespace string, name st
|
|||
return err
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -621,3 +661,18 @@ func newSingleIngress(name, ns string, annotations map[string]string, spec netwo
|
|||
|
||||
return ing
|
||||
}
|
||||
|
||||
// defaultWaitDuration default sleep time for operations related
|
||||
// to the API server and NGINX reloads.
|
||||
var defaultWaitDuration = 5 * time.Second
|
||||
|
||||
// Sleep pauses the current goroutine for at least the duration d.
|
||||
// If no duration is defined, it uses a default
|
||||
func Sleep(duration ...time.Duration) {
|
||||
sleepFor := defaultWaitDuration
|
||||
if len(duration) != 0 {
|
||||
sleepFor = duration[0]
|
||||
}
|
||||
|
||||
time.Sleep(sleepFor)
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32
|
|||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
||||
err := WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "failed to wait for to become ready")
|
||||
|
|
|
@ -136,7 +136,7 @@ func (f *Framework) NewInfluxDBDeployment() {
|
|||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
||||
err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for influxdb pod to become ready")
|
||||
|
|
|
@ -72,18 +72,18 @@ func (f *Framework) GetIngress(namespace string, name string) *networking.Ingres
|
|||
|
||||
// EnsureIngress creates an Ingress object and retunrs it, throws error if it already exists.
|
||||
func (f *Framework) EnsureIngress(ingress *networking.Ingress) *networking.Ingress {
|
||||
err := createIngressWithRetries(f.KubeClientSet, f.Namespace, ingress)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating ingress")
|
||||
fn := func() {
|
||||
err := createIngressWithRetries(f.KubeClientSet, f.Namespace, ingress)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating ingress")
|
||||
}
|
||||
|
||||
f.waitForReload(fn)
|
||||
|
||||
ing := f.GetIngress(f.Namespace, ingress.Name)
|
||||
|
||||
if ing.Annotations == nil {
|
||||
ing.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
// creating an ingress requires a reload.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
return ing
|
||||
}
|
||||
|
||||
|
@ -93,13 +93,12 @@ func (f *Framework) UpdateIngress(ingress *networking.Ingress) *networking.Ingre
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress")
|
||||
|
||||
ing := f.GetIngress(f.Namespace, ingress.Name)
|
||||
|
||||
if ing.Annotations == nil {
|
||||
ing.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
// updating an ingress requires a reload.
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep()
|
||||
|
||||
return ing
|
||||
}
|
||||
|
@ -128,8 +127,8 @@ func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) *appsv1.Depl
|
|||
return d
|
||||
}
|
||||
|
||||
// WaitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
||||
func WaitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
|
||||
// waitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
||||
func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
|
@ -215,6 +214,13 @@ func podRunningReady(p *core.Pod) (bool, error) {
|
|||
|
||||
// GetIngressNGINXPod returns the ingress controller running pod
|
||||
func GetIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Pod, error) {
|
||||
err := waitForPodsReady(kubeClientSet, DefaultTimeout, 1, ns, metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l, err := kubeClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
|
@ -222,26 +228,15 @@ func GetIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Po
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
if len(l.Items) == 0 {
|
||||
return nil, fmt.Errorf("there is no ingress-nginx pods running in namespace %v", ns)
|
||||
}
|
||||
|
||||
var pod *core.Pod
|
||||
|
||||
for _, p := range l.Items {
|
||||
if strings.HasPrefix(p.GetName(), "nginx-ingress-controller") {
|
||||
if isRunning, err := podRunningReady(&p); err == nil && isRunning {
|
||||
pod = &p
|
||||
break
|
||||
return &p, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pod == nil {
|
||||
return nil, fmt.Errorf("there is no ingress-nginx pods running in namespace %v", ns)
|
||||
}
|
||||
|
||||
return pod, nil
|
||||
return nil, fmt.Errorf("there is no ingress-nginx pods running in namespace %v", ns)
|
||||
}
|
||||
|
||||
func createDeploymentWithRetries(c kubernetes.Interface, namespace string, obj *appsv1.Deployment) error {
|
||||
|
|
|
@ -32,21 +32,10 @@ type TestContextType struct {
|
|||
// TestContext is the global client context for tests.
|
||||
var TestContext TestContextType
|
||||
|
||||
// RegisterCommonFlags registers flags common to all e2e test suites.
|
||||
func RegisterCommonFlags() {
|
||||
// Turn on verbose by default to get spec names
|
||||
config.DefaultReporterConfig.Verbose = true
|
||||
|
||||
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
|
||||
// registerCommonFlags registers flags common to all e2e test suites.
|
||||
func registerCommonFlags() {
|
||||
config.GinkgoConfig.EmitSpecProgress = true
|
||||
|
||||
// Randomize specs as well as suites
|
||||
config.GinkgoConfig.RandomizeAllSpecs = true
|
||||
|
||||
// Default SlowSpecThreshold is 5 seconds.
|
||||
// Too low for the kind of operations we need to tests
|
||||
config.DefaultReporterConfig.SlowSpecThreshold = 20
|
||||
|
||||
flag.StringVar(&TestContext.KubeHost, "kubernetes-host", "http://127.0.0.1:8080", "The kubernetes host, or apiserver, to connect to")
|
||||
//flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar)
|
||||
flag.StringVar(&TestContext.KubeContext, "kubernetes-context", "", "config context to use for kubernetes. If unset, will use value from 'current-context'")
|
||||
|
@ -54,6 +43,6 @@ func RegisterCommonFlags() {
|
|||
|
||||
// RegisterParseFlags registers and parses flags for the test binary.
|
||||
func RegisterParseFlags() {
|
||||
RegisterCommonFlags()
|
||||
registerCommonFlags()
|
||||
flag.Parse()
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
context2 "context"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
@ -93,8 +93,8 @@ func CreateKubeNamespace(baseName string, c kubernetes.Interface) (string, error
|
|||
var got *corev1.Namespace
|
||||
var err error
|
||||
|
||||
err = wait.PollImmediate(Poll, DefaultTimeout, func() (bool, error) {
|
||||
got, err = c.CoreV1().Namespaces().Create(context2.TODO(), ns, metav1.CreateOptions{})
|
||||
err = wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
got, err = c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
Logf("Unexpected error while creating namespace: %v", err)
|
||||
return false, nil
|
||||
|
@ -107,11 +107,11 @@ func CreateKubeNamespace(baseName string, c kubernetes.Interface) (string, error
|
|||
return got.Name, nil
|
||||
}
|
||||
|
||||
// DeleteKubeNamespace deletes a namespace and all the objects inside
|
||||
func DeleteKubeNamespace(c kubernetes.Interface, namespace string) error {
|
||||
// deleteKubeNamespace deletes a namespace and all the objects inside
|
||||
func deleteKubeNamespace(c kubernetes.Interface, namespace string) error {
|
||||
grace := int64(0)
|
||||
pb := metav1.DeletePropagationBackground
|
||||
return c.CoreV1().Namespaces().Delete(context2.TODO(), namespace, metav1.DeleteOptions{
|
||||
return c.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &grace,
|
||||
PropagationPolicy: &pb,
|
||||
})
|
||||
|
@ -119,12 +119,12 @@ func DeleteKubeNamespace(c kubernetes.Interface, namespace string) error {
|
|||
|
||||
// WaitForKubeNamespaceNotExist waits until a namespaces is not present in the cluster
|
||||
func WaitForKubeNamespaceNotExist(c kubernetes.Interface, namespace string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, namespaceNotExist(c, namespace))
|
||||
return wait.Poll(Poll, DefaultTimeout, namespaceNotExist(c, namespace))
|
||||
}
|
||||
|
||||
func namespaceNotExist(c kubernetes.Interface, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
_, err := c.CoreV1().Namespaces().Get(context2.TODO(), namespace, metav1.GetOptions{})
|
||||
_, err := c.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -137,12 +137,12 @@ func namespaceNotExist(c kubernetes.Interface, namespace string) wait.ConditionF
|
|||
|
||||
// WaitForNoPodsInNamespace waits until there are no pods running in a namespace
|
||||
func WaitForNoPodsInNamespace(c kubernetes.Interface, namespace string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, noPodsInNamespace(c, namespace))
|
||||
return wait.Poll(Poll, DefaultTimeout, noPodsInNamespace(c, namespace))
|
||||
}
|
||||
|
||||
func noPodsInNamespace(c kubernetes.Interface, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
items, err := c.CoreV1().Pods(namespace).List(context2.TODO(), metav1.ListOptions{})
|
||||
items, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -167,17 +167,17 @@ func WaitForPodRunningInNamespace(c kubernetes.Interface, pod *corev1.Pod) error
|
|||
}
|
||||
|
||||
func waitTimeoutForPodRunningInNamespace(c kubernetes.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, podRunning(c, podName, namespace))
|
||||
return wait.Poll(Poll, DefaultTimeout, podRunning(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForSecretInNamespace waits a default amount of time for the specified secret is present in a particular namespace
|
||||
func WaitForSecretInNamespace(c kubernetes.Interface, namespace, name string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, secretInNamespace(c, namespace, name))
|
||||
return wait.Poll(Poll, DefaultTimeout, secretInNamespace(c, namespace, name))
|
||||
}
|
||||
|
||||
func secretInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
s, err := c.CoreV1().Secrets(namespace).Get(context2.TODO(), name, metav1.GetOptions{})
|
||||
s, err := c.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ func secretInNamespace(c kubernetes.Interface, namespace, name string) wait.Cond
|
|||
|
||||
// WaitForFileInFS waits a default amount of time for the specified file is present in the filesystem
|
||||
func WaitForFileInFS(file string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, fileInFS(file))
|
||||
return wait.Poll(Poll, DefaultTimeout, fileInFS(file))
|
||||
}
|
||||
|
||||
func fileInFS(file string) wait.ConditionFunc {
|
||||
|
@ -218,12 +218,12 @@ func fileInFS(file string) wait.ConditionFunc {
|
|||
|
||||
// WaitForNoIngressInNamespace waits until there is no ingress object in a particular namespace
|
||||
func WaitForNoIngressInNamespace(c kubernetes.Interface, namespace, name string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, noIngressInNamespace(c, namespace, name))
|
||||
return wait.Poll(Poll, DefaultTimeout, noIngressInNamespace(c, namespace, name))
|
||||
}
|
||||
|
||||
func noIngressInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context2.TODO(), name, metav1.GetOptions{})
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -240,12 +240,12 @@ func noIngressInNamespace(c kubernetes.Interface, namespace, name string) wait.C
|
|||
|
||||
// WaitForIngressInNamespace waits until a particular ingress object exists namespace
|
||||
func WaitForIngressInNamespace(c kubernetes.Interface, namespace, name string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, ingressInNamespace(c, namespace, name))
|
||||
return wait.Poll(Poll, DefaultTimeout, ingressInNamespace(c, namespace, name))
|
||||
}
|
||||
|
||||
func ingressInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context2.TODO(), name, metav1.GetOptions{})
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ func ingressInNamespace(c kubernetes.Interface, namespace, name string) wait.Con
|
|||
|
||||
func podRunning(c kubernetes.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context2.TODO(), podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -37,13 +37,10 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
|
||||
ginkgo.BeforeEach(func() {
|
||||
f.UpdateNginxConfigMapData("worker-shutdown-timeout", "600s")
|
||||
|
||||
f.NewSlowEchoDeployment()
|
||||
})
|
||||
|
||||
ginkgo.It("should shutdown in less than 60 secons without pending connections", func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, framework.SlowEchoService, 80, nil))
|
||||
|
||||
f.WaitForNginxServer(host,
|
||||
|
@ -64,11 +61,7 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
assert.LessOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 60, "waiting shutdown")
|
||||
})
|
||||
|
||||
type asyncResult struct {
|
||||
status int
|
||||
}
|
||||
|
||||
ginkgo.It("should shutdown after waiting 60 seconds for pending connections to be closed", func() {
|
||||
ginkgo.It("should shutdown after waiting 60 seconds for pending connections to be closed", func(done ginkgo.Done) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
|
@ -92,11 +85,10 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
return strings.Contains(server, "server_name shutdown")
|
||||
})
|
||||
|
||||
result := make(chan *asyncResult)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
go func(host string, c chan *asyncResult) {
|
||||
result := make(chan int)
|
||||
go func(host string, c chan int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
resp := f.HTTPTestClient().
|
||||
|
@ -105,34 +97,19 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
Expect().
|
||||
Raw()
|
||||
|
||||
code := 0
|
||||
if resp != nil {
|
||||
code = resp.StatusCode
|
||||
}
|
||||
|
||||
c <- &asyncResult{code}
|
||||
c <- resp.StatusCode
|
||||
}(host, result)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.ScaleDeploymentToZero("nginx-ingress-controller")
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
assert.Equal(ginkgo.GinkgoT(), <-result, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 60, "waiting shutdown")
|
||||
close(done)
|
||||
}, 100)
|
||||
|
||||
for {
|
||||
select {
|
||||
case res := <-result:
|
||||
assert.Equal(ginkgo.GinkgoT(), res.status, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 60, "waiting shutdown")
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
framework.Logf("waiting for request completion after shutdown")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should shutdown after waiting 150 seconds for pending connections to be closed", func() {
|
||||
ginkgo.It("should shutdown after waiting 150 seconds for pending connections to be closed", func(done ginkgo.Done) {
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
grace := int64(3600)
|
||||
|
@ -153,11 +130,10 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
return strings.Contains(server, "server_name shutdown")
|
||||
})
|
||||
|
||||
result := make(chan *asyncResult)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
go func(host string, c chan *asyncResult) {
|
||||
result := make(chan int)
|
||||
go func(host string, c chan int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
resp := f.HTTPTestClient().
|
||||
|
@ -166,30 +142,15 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
Expect().
|
||||
Raw()
|
||||
|
||||
code := 0
|
||||
if resp != nil {
|
||||
code = resp.StatusCode
|
||||
}
|
||||
|
||||
c <- &asyncResult{code}
|
||||
c <- resp.StatusCode
|
||||
}(host, result)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.ScaleDeploymentToZero("nginx-ingress-controller")
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
|
||||
for {
|
||||
select {
|
||||
case res := <-result:
|
||||
assert.Equal(ginkgo.GinkgoT(), res.status, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 150, "waiting shutdown")
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
framework.Logf("waiting for request completion after shutdown")
|
||||
}
|
||||
}
|
||||
})
|
||||
assert.Equal(ginkgo.GinkgoT(), <-result, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 150, "waiting shutdown")
|
||||
close(done)
|
||||
}, 200)
|
||||
})
|
||||
|
|
|
@ -19,7 +19,6 @@ package gracefulshutdown
|
|||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
|
@ -55,7 +54,7 @@ var _ = framework.IngressNginxDescribe("[Shutdown] Graceful shutdown with pendin
|
|||
Status(http.StatusOK)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
f.DeleteNGINXPod(60)
|
||||
<-done
|
||||
})
|
||||
|
|
|
@ -74,7 +74,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
time.Sleep(waitForLuaSync)
|
||||
|
||||
ip := f.GetNginxPodIP()
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
@ -99,7 +99,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
assert.NotEmpty(ginkgo.GinkgoT(), log)
|
||||
|
||||
ginkgo.By("skipping Nginx reload")
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
@ -189,7 +189,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
ensureHTTPSRequest(f, fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host)
|
||||
|
||||
ip := f.GetNginxPodIP()
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
@ -204,7 +204,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
ginkgo.By("serving the default certificate on HTTPS endpoint")
|
||||
ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, "ingress.local")
|
||||
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
|||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
output, err = f.ExecIngressPod(curlCmd)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
|
|
@ -83,7 +83,7 @@ func smugglingRequest(host, addr string, port int) (string, error) {
|
|||
}
|
||||
|
||||
// wait for /_hidden/index.html response
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
var buf = make([]byte, 1024)
|
||||
r := bufio.NewReader(conn)
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -271,7 +270,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() {
|
|||
_, err = f.KubeClientSet.CoreV1().Services(f.Namespace).Update(context.Background(), svc, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating httpbin service")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
body = f.HTTPTestClient().
|
||||
GET("/get").
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -38,28 +38,32 @@ import (
|
|||
var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
||||
f := framework.NewDefaultFramework("ingress-class")
|
||||
|
||||
f.KubeClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ingress-nginx-class"},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingressclasses"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
}},
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
f.KubeClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
var doOnce sync.Once
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
f.NewEchoDeploymentWithReplicas(1)
|
||||
|
||||
doOnce.Do(func() {
|
||||
f.KubeClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ingress-nginx-class"},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingressclasses"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
}},
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
f.KubeClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Without a specific ingress-class", func() {
|
||||
|
@ -175,6 +179,8 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(ing.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(func(cfg string) bool {
|
||||
return !strings.Contains(cfg, "server_name foo")
|
||||
})
|
||||
|
@ -277,7 +283,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
return strings.Contains(cfg, fmt.Sprintf("server_name %v", host))
|
||||
})
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.HTTPTestClient().
|
||||
GET("/").
|
||||
|
@ -297,7 +303,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(func(cfg string) bool {
|
||||
return !strings.Contains(cfg, fmt.Sprintf("server_name %v", host))
|
||||
|
@ -319,7 +325,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(func(cfg string) bool {
|
||||
return !strings.Contains(cfg, fmt.Sprintf("server_name %v", host))
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -126,7 +125,7 @@ var _ = framework.DescribeSetting("OCSP", func() {
|
|||
|
||||
// give time the lua request to the OCSP
|
||||
// URL to finish and update the cache
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
// TODO: is possible to avoid second request?
|
||||
resp := f.HTTPTestClientWithTLSConfig(tlsConfig).
|
||||
|
@ -235,7 +234,7 @@ func prepareCertificates(namespace string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
command = "cfssl gencert -remote=localhost -profile=server leaf_csr.json | cfssljson -bare leaf"
|
||||
ginkgo.By(fmt.Sprintf("running %v", command))
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -58,7 +57,7 @@ var _ = framework.IngressNginxDescribe("[SSL] secret update", func() {
|
|||
ing.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxServer(host,
|
||||
func(server string) bool {
|
||||
|
@ -70,7 +69,7 @@ var _ = framework.IngressNginxDescribe("[SSL] secret update", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining nginx logs")
|
||||
assert.NotContains(ginkgo.GinkgoT(), log, fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace))
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
dummySecret.Data["some-key"] = []byte("some value")
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service")
|
||||
|
||||
// wait for update and nginx reload and new endpoint is available
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(
|
||||
func(cfg string) bool {
|
||||
|
|
Loading…
Reference in a new issue