diff --git a/.github/actions/mkdocs/Dockerfile b/.github/actions/mkdocs/Dockerfile index 843eaf5ca..960e8b992 100644 --- a/.github/actions/mkdocs/Dockerfile +++ b/.github/actions/mkdocs/Dockerfile @@ -1,4 +1,4 @@ -FROM squidfunk/mkdocs-material:5.1.0 +FROM squidfunk/mkdocs-material:5.2.3 COPY action.sh /action.sh diff --git a/.luacheckrc b/.luacheckrc index 5d16ac1e3..a24399e26 100644 --- a/.luacheckrc +++ b/.luacheckrc @@ -1,7 +1,5 @@ std = 'ngx_lua' -globals = { - '_TEST' -} +max_line_length = 100 exclude_files = {'./rootfs/etc/nginx/lua/test/**/*.lua', './rootfs/etc/nginx/lua/plugins/**/test/**/*.lua'} files["rootfs/etc/nginx/lua/lua_ingress.lua"] = { ignore = { "122" }, diff --git a/Changelog.md b/Changelog.md index c324a46d6..e4a62ebcf 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,87 @@ # Changelog +### 0.33.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0` + +_New Features:_ + +- NGINX 1.19.0 +- TLSv1.3 is enabled by default +- Experimental support for s390x +- Allow combination of NGINX variables in annotation [upstream-hash-by](https://github.com/kubernetes/ingress-nginx/pull/5571) +- New setting to configure different access logs for http and stream sections: [http-access-log-path](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#http-access-log-path) and [stream-access-log-path](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#stream-access-log-path) options in configMap + +_Deprecations:_ + +- Setting [access-log-path](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#access-log-path) is deprecated and will be removed in 0.35.0. Please use [http-access-log-path](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#http-access-log-path) and [stream-access-log-path](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#stream-access-log-path) + +_Changes:_ + +- [X] [#5463](https://github.com/kubernetes/ingress-nginx/pull/5463) Wait before any request to the ingress controller pod +- [X] [#5488](https://github.com/kubernetes/ingress-nginx/pull/5488) Update kind +- [X] [#5491](https://github.com/kubernetes/ingress-nginx/pull/5491) Actually enable TLSv1.3 by default +- [X] [#5494](https://github.com/kubernetes/ingress-nginx/pull/5494) Add configuration option for the runAsUser parameter of the webhook patch job +- [X] [#5503](https://github.com/kubernetes/ingress-nginx/pull/5503) Update job-patchWebhook.yaml +- [X] [#5504](https://github.com/kubernetes/ingress-nginx/pull/5504) Add configuration option for the imagePullSecrets in the webhook jobs +- [X] [#5505](https://github.com/kubernetes/ingress-nginx/pull/5505) Update helm chart +- [X] [#5516](https://github.com/kubernetes/ingress-nginx/pull/5516) build: remove unnecessary tag line in e2e +- [X] [#5522](https://github.com/kubernetes/ingress-nginx/pull/5522) Remove duplicate annotation parsing for annotationAffinityCookieChangeOnFailure +- [X] [#5534](https://github.com/kubernetes/ingress-nginx/pull/5534) Add annotation ssl-prefer-server-ciphers. +- [X] [#5536](https://github.com/kubernetes/ingress-nginx/pull/5536) Fix error setting $service_name NGINX variable +- [X] [#5553](https://github.com/kubernetes/ingress-nginx/pull/5553) Check service If publish-service flag is defined +- [X] [#5571](https://github.com/kubernetes/ingress-nginx/pull/5571) feat: support the combination of Nginx variables for annotation upstream-hash-by. +- [X] [#5572](https://github.com/kubernetes/ingress-nginx/pull/5572) [chart] Add toleration support for admission webhooks +- [X] [#5578](https://github.com/kubernetes/ingress-nginx/pull/5578) Use image promoter to push images to gcr +- [X] [#5582](https://github.com/kubernetes/ingress-nginx/pull/5582) Allow pulling images by digest +- [X] [#5584](https://github.com/kubernetes/ingress-nginx/pull/5584) Add note about initial delay during first start +- [X] [#5586](https://github.com/kubernetes/ingress-nginx/pull/5586) Add MaxMind GeoIP2 Anonymous IP support +- [X] [#5589](https://github.com/kubernetes/ingress-nginx/pull/5589) Do not reload NGINX if master process dies +- [X] [#5596](https://github.com/kubernetes/ingress-nginx/pull/5596) Update go dependencies +- [X] [#5603](https://github.com/kubernetes/ingress-nginx/pull/5603) Update nginx to 1.19.0 +- [X] [#5604](https://github.com/kubernetes/ingress-nginx/pull/5604) Update debian-base image +- [X] [#5606](https://github.com/kubernetes/ingress-nginx/pull/5606) Update nginx image and go to 1.14.3 +- [X] [#5613](https://github.com/kubernetes/ingress-nginx/pull/5613) fix oauth2-proxy image repository +- [X] [#5614](https://github.com/kubernetes/ingress-nginx/pull/5614) Add support for s390x +- [X] [#5619](https://github.com/kubernetes/ingress-nginx/pull/5619) Use new multi-arch nginx image +- [X] [#5621](https://github.com/kubernetes/ingress-nginx/pull/5621) Update terraform build images +- [X] [#5624](https://github.com/kubernetes/ingress-nginx/pull/5624) feat: add lj-releng tool to check Lua code for finding the potential problems +- [X] [#5625](https://github.com/kubernetes/ingress-nginx/pull/5625) Update nginx image to use alpine 3.12 +- [X] [#5626](https://github.com/kubernetes/ingress-nginx/pull/5626) Update nginx image +- [X] [#5629](https://github.com/kubernetes/ingress-nginx/pull/5629) Build multi-arch images by default +- [X] [#5630](https://github.com/kubernetes/ingress-nginx/pull/5630) Fix makefile task names +- [X] [#5631](https://github.com/kubernetes/ingress-nginx/pull/5631) Update e2e image +- [X] [#5632](https://github.com/kubernetes/ingress-nginx/pull/5632) Update buildx progress configuration +- [X] [#5636](https://github.com/kubernetes/ingress-nginx/pull/5636) Enable coredumps for e2e tests +- [X] [#5637](https://github.com/kubernetes/ingress-nginx/pull/5637) Refactor build of docker images +- [X] [#5641](https://github.com/kubernetes/ingress-nginx/pull/5641) Add missing ARCH variable +- [X] [#5642](https://github.com/kubernetes/ingress-nginx/pull/5642) Fix dev-env makefile task +- [X] [#5643](https://github.com/kubernetes/ingress-nginx/pull/5643) Fix build of image on osx +- [X] [#5644](https://github.com/kubernetes/ingress-nginx/pull/5644) Remove copy of binaries and deprecated e2e task +- [X] [#5656](https://github.com/kubernetes/ingress-nginx/pull/5656) feat: add http-access-log-path and stream-access-log-path options in configMap +- [X] [#5659](https://github.com/kubernetes/ingress-nginx/pull/5659) Update cloud-build configuration +- [X] [#5660](https://github.com/kubernetes/ingress-nginx/pull/5660) Set missing USER in cloud-build +- [X] [#5661](https://github.com/kubernetes/ingress-nginx/pull/5661) Add missing REPO_INFO en variable to cloud-build +- [X] [#5662](https://github.com/kubernetes/ingress-nginx/pull/5662) Increase cloud-build timeout +- [X] [#5663](https://github.com/kubernetes/ingress-nginx/pull/5663) Fix cloud-timeout setting +- [X] [#5664](https://github.com/kubernetes/ingress-nginx/pull/5664) fix undefined variable $auth_cookie error due to when location is denied +- [X] [#5665](https://github.com/kubernetes/ingress-nginx/pull/5665) Fix: improve performance +- [X] [#5669](https://github.com/kubernetes/ingress-nginx/pull/5669) Serve correct TLS certificate for requests with uppercase host +- [X] [#5672](https://github.com/kubernetes/ingress-nginx/pull/5672) feat: enable lj-releng tool to lint lua code. +- [X] [#5684](https://github.com/kubernetes/ingress-nginx/pull/5684) Fix proxy_protocol duplication in listen definition + +_Documentation:_ + +- [X] [#5487](https://github.com/kubernetes/ingress-nginx/pull/5487) Add note about firewall ports for admission webhook +- [X] [#5512](https://github.com/kubernetes/ingress-nginx/pull/5512) Wrong filename in documantation example +- [X] [#5563](https://github.com/kubernetes/ingress-nginx/pull/5563) Use ingress-nginx-* naming in docs to match the default deployments +- [X] [#5566](https://github.com/kubernetes/ingress-nginx/pull/5566) Update configmap name in custom-headers/README.md +- [X] [#5639](https://github.com/kubernetes/ingress-nginx/pull/5639) Update timeout to align values +- [X] [#5646](https://github.com/kubernetes/ingress-nginx/pull/5646) Add minor doc fixes to user guide and chart readme +- [X] [#5652](https://github.com/kubernetes/ingress-nginx/pull/5652) Add documentation for loading e2e tests without using minikube +- [X] [#5677](https://github.com/kubernetes/ingress-nginx/pull/5677) Add URL to official grafana dashboards +- [X] [#5682](https://github.com/kubernetes/ingress-nginx/pull/5682) Fix typo + ### 0.32.0 **Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0` diff --git a/Makefile b/Makefile index 31821aa1a..8480b708b 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ endif SHELL=/bin/bash -o pipefail -o errexit # Use the 0.0 tag for testing, it shouldn't clobber any release builds -TAG ?= 0.32.0 +TAG ?= 0.33.0 # Use docker to run makefile tasks USE_DOCKER ?= true @@ -42,7 +42,7 @@ endif # Allow limiting the scope of the e2e tests. By default run everything FOCUS ?= .* # number of parallel test -E2E_NODES ?= 14 +E2E_NODES ?= 10 # slow test only if takes > 50s SLOW_E2E_THRESHOLD ?= 50 # run e2e test suite with tests that check for memory leaks? (default is false) @@ -61,76 +61,30 @@ endif REGISTRY ?= quay.io/kubernetes-ingress-controller -BASE_IMAGE ?= quay.io/kubernetes-ingress-controller/nginx -BASE_TAG ?= 5d67794f4fbf38ec6575476de46201b068eabf87 +BASE_IMAGE ?= quay.io/kubernetes-ingress-controller/nginx:e3c49c52f4b74fe47ad65d6f3266a02e8b6b622f GOARCH=$(ARCH) -GOBUILD_FLAGS := -v # use vendor directory instead of go modules https://github.com/golang/go/wiki/Modules GO111MODULE=off -TEMP_DIR := $(shell mktemp -d) -DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile - help: ## Display this help @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -# internal task -.PHONY: sub-container-% -sub-container-%: - $(MAKE) ARCH=$* build container - -# internal task -.PHONY: sub-push-% -sub-push-%: ## Publish image for a particular arch. - $(MAKE) ARCH=$* push - -.PHONY: container -container: clean-container .container-$(ARCH) ## Build image for a particular arch. - -# internal task to build image for a particular arch. -.PHONY: .container-$(ARCH) -.container-$(ARCH): init-docker-buildx - mkdir -p $(TEMP_DIR)/rootfs - cp bin/$(ARCH)/nginx-ingress-controller $(TEMP_DIR)/rootfs/nginx-ingress-controller - cp bin/$(ARCH)/dbg $(TEMP_DIR)/rootfs/dbg - cp bin/$(ARCH)/wait-shutdown $(TEMP_DIR)/rootfs/wait-shutdown - - cp -RP rootfs/* $(TEMP_DIR)/rootfs - +.PHONY: image +image: clean-image ## Build image for a particular arch. echo "Building docker image ($(ARCH))..." - # buildx assumes images are multi-arch - docker buildx build \ - --pull \ - --load \ + @docker build \ --no-cache \ - --progress plain \ - --platform linux/$(ARCH) \ - --build-arg BASE_IMAGE="$(BASE_IMAGE)-$(ARCH):$(BASE_TAG)" \ + --build-arg BASE_IMAGE="$(BASE_IMAGE)" \ --build-arg VERSION="$(TAG)" \ - -t $(REGISTRY)/nginx-ingress-controller-${ARCH}:$(TAG) $(TEMP_DIR)/rootfs + --build-arg TARGETARCH="$(ARCH)" \ + -t $(REGISTRY)/nginx-ingress-controller:$(TAG) rootfs -.PHONY: clean-container -clean-container: ## Removes local image - echo "removing old image $(BASE_IMAGE)-$(ARCH):$(TAG)" - @docker rmi -f $(BASE_IMAGE)-$(ARCH):$(TAG) || true - -.PHONY: push -push: .push-$(ARCH) ## Publish image for a particular arch. - -# internal task -.PHONY: .push-$(ARCH) -.push-$(ARCH): - docker push $(REGISTRY)/nginx-ingress-controller-${ARCH}:$(TAG) - -.PHONY: push-manifest -push-manifest: - docker manifest create $(REGISTRY)/nginx-ingress-controller:$(TAG) \ - $(REGISTRY)/nginx-ingress-controller-amd64:$(TAG) \ - $(REGISTRY)/nginx-ingress-controller-arm:$(TAG) \ - $(REGISTRY)/nginx-ingress-controller-arm64:$(TAG) - docker manifest push --purge $(REGISTRY)/nginx-ingress-controller:$(TAG) +.PHONY: clean-image +clean-image: ## Removes local image + echo "removing old image $(REGISTRY)/nginx-ingress-controller:$(TAG)" + @docker rmi -f $(REGISTRY)/nginx-ingress-controller:$(TAG) || true .PHONY: build build: check-go-version ## Build ingress controller, debug tool and pre-stop hook. @@ -204,10 +158,6 @@ endif e2e-test: check-go-version ## Run e2e tests (expects access to a working Kubernetes cluster). @build/run-e2e-suite.sh -.PHONY: e2e-test-image -e2e-test-image: ## Build image for e2e tests. - @make -C test/e2e-image - .PHONY: e2e-test-binary e2e-test-binary: check-go-version ## Build ginkgo binary for e2e tests. ifeq ($(USE_DOCKER), true) @@ -255,7 +205,10 @@ dev-env-stop: ## Deletes local Kubernetes cluster created by kind. .PHONY: live-docs live-docs: ## Build and launch a local copy of the documentation website in http://localhost:3000 - @docker run --rm -it -p 8000:8000 -v ${PWD}:/docs squidfunk/mkdocs-material:5.1.0 + @docker run --rm -it \ + -p 8000:8000 \ + -v ${PWD}:/docs \ + squidfunk/mkdocs-material:5.2.3 .PHONY: misspell misspell: check-go-version ## Check for spelling errors. @@ -292,7 +245,7 @@ ifeq ($(DIND_TASKS),) ifneq ($(shell docker buildx 2>&1 >/dev/null; echo $?),) $(error "buildx not available. Docker 19.03 or higher is required with experimental features enabled") endif - docker run --rm --privileged docker/binfmt:66f9012c56a8316f9244ffd7622d7c21c1f6f28d + docker run --rm --privileged docker/binfmt:a7996909642ee92942dcd6cff44b9b95f08dad64 docker buildx create --name ingress-nginx --use || true docker buildx inspect --bootstrap endif @@ -300,3 +253,24 @@ endif .PHONY: show-version show-version: echo -n $(TAG) + +PLATFORMS ?= amd64 arm arm64 s390x + +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +COMMA := , + +.PHONY: release # Build a multi-arch docker image +release: init-docker-buildx clean + echo "Building binaries..." + $(foreach PLATFORM,$(PLATFORMS), echo -n "$(PLATFORM)..."; ARCH=$(PLATFORM) make build;) + + echo "Building and pushing ingress-nginx image..." + @docker buildx build \ + --no-cache \ + --push \ + --progress plain \ + --platform $(subst $(SPACE),$(COMMA),$(PLATFORMS)) \ + --build-arg BASE_IMAGE="$(BASE_IMAGE)" \ + --build-arg VERSION="$(TAG)" \ + -t $(REGISTRY)/nginx-ingress-controller:$(TAG) rootfs diff --git a/build/build-ingress-controller.sh b/build/build-ingress-controller.sh index edd6af30c..033c66b26 100755 --- a/build/build-ingress-controller.sh +++ b/build/build-ingress-controller.sh @@ -38,9 +38,7 @@ if [ ! -f "${ENV_FILE}" ]; then fi # build local terraform image to build nginx -docker buildx build \ - --load \ - --platform linux/amd64 \ +docker build \ --tag build-ingress-controller-terraform $DIR/images/ingress-controller # build nginx and publish docker images to quay.io. diff --git a/build/build-nginx-image.sh b/build/build-nginx-image.sh index 5de66beff..7f3a767b9 100755 --- a/build/build-nginx-image.sh +++ b/build/build-nginx-image.sh @@ -37,12 +37,7 @@ if [ ! -f "${ENV_FILE}" ]; then exit 1 fi -# build local terraform image to build nginx -export DOCKER_CLI_EXPERIMENTAL=enabled -docker buildx build \ - --load \ - --no-cache \ - --platform linux/amd64 \ +docker build \ --tag build-nginx-terraform $DIR/images/nginx # build nginx and publish docker images to quay.io. diff --git a/build/build.sh b/build/build.sh index e8138c6a6..3c235d598 100755 --- a/build/build.sh +++ b/build/build.sh @@ -47,26 +47,22 @@ export CGO_ENABLED=0 export GOARCH=${ARCH} go build \ - "${GOBUILD_FLAGS}" \ -ldflags "-s -w \ -X ${PKG}/version.RELEASE=${TAG} \ -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ -X ${PKG}/version.REPO=${REPO_INFO}" \ - -o "bin/${ARCH}/nginx-ingress-controller" "${PKG}/cmd/nginx" + -o "rootfs/bin/${ARCH}/nginx-ingress-controller" "${PKG}/cmd/nginx" go build \ - "${GOBUILD_FLAGS}" \ -ldflags "-s -w \ -X ${PKG}/version.RELEASE=${TAG} \ -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ -X ${PKG}/version.REPO=${REPO_INFO}" \ - -o "bin/${ARCH}/dbg" "${PKG}/cmd/dbg" - + -o "rootfs/bin/${ARCH}/dbg" "${PKG}/cmd/dbg" go build \ - "${GOBUILD_FLAGS}" \ -ldflags "-s -w \ -X ${PKG}/version.RELEASE=${TAG} \ -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ -X ${PKG}/version.REPO=${REPO_INFO}" \ - -o "bin/${ARCH}/wait-shutdown" "${PKG}/cmd/waitshutdown" + -o "rootfs/bin/${ARCH}/wait-shutdown" "${PKG}/cmd/waitshutdown" diff --git a/build/dev-env.sh b/build/dev-env.sh index 14efe0825..59682aa89 100755 --- a/build/dev-env.sh +++ b/build/dev-env.sh @@ -25,7 +25,6 @@ set -o pipefail DIR=$(cd $(dirname "${BASH_SOURCE}") && pwd -P) export TAG=1.0.0-dev -export ARCH=amd64 export REGISTRY=${REGISTRY:-ingress-controller} DEV_IMAGE=${REGISTRY}/nginx-ingress-controller:${TAG} @@ -57,9 +56,9 @@ if [[ ${KUBE_CLIENT_VERSION} -lt 14 ]]; then exit 1 fi -echo "[dev-env] building container" -make build container -docker tag "${REGISTRY}/nginx-ingress-controller-${ARCH}:${TAG}" "${DEV_IMAGE}" +echo "[dev-env] building image" +make build image +docker tag "${REGISTRY}/nginx-ingress-controller:${TAG}" "${DEV_IMAGE}" export K8S_VERSION=${K8S_VERSION:-v1.18.0@sha256:0e20578828edd939d25eb98496a685c76c98d54084932f76069f886ec315d694} diff --git a/build/images/ingress-controller/.dockerignore b/build/images/ingress-controller/.dockerignore index c45cf4169..226021878 100644 --- a/build/images/ingress-controller/.dockerignore +++ b/build/images/ingress-controller/.dockerignore @@ -1 +1,9 @@ *.tfvars +*.tfvars +.terraform* +terraform* +*.tfstate +*.tfstate.backup +id_rsa* +aws.tfvars +env.tfvars diff --git a/build/images/ingress-controller/Dockerfile b/build/images/ingress-controller/Dockerfile index d1d3b15a0..abc053668 100644 --- a/build/images/ingress-controller/Dockerfile +++ b/build/images/ingress-controller/Dockerfile @@ -1,6 +1,6 @@ -FROM k8s.gcr.io/debian-base:v2.0.0 +FROM us.gcr.io/k8s-artifacts-prod/build-image/debian-base-amd64:v2.1.0 -ENV TERRAFORM_VERSION 0.12.19 +ENV TERRAFORM_VERSION 0.12.26 RUN clean-install \ bash \ @@ -8,6 +8,7 @@ RUN clean-install \ ca-certificates \ unzip \ git \ + python3 \ openssh-client RUN curl -sSL -o /terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" \ diff --git a/build/images/ingress-controller/build-ingress-controller.sh b/build/images/ingress-controller/build.sh similarity index 85% rename from build/images/ingress-controller/build-ingress-controller.sh rename to build/images/ingress-controller/build.sh index 8bb5f0d85..879cb5486 100644 --- a/build/images/ingress-controller/build-ingress-controller.sh +++ b/build/images/ingress-controller/build.sh @@ -34,16 +34,10 @@ source_tfvars /tmp/env export DEBIAN_FRONTEND=noninteractive -apt -q=3 update +apt update +apt dist-upgrade --yes -apt -q=3 dist-upgrade --yes - -add-apt-repository universe --yes -add-apt-repository multiverse --yes - -apt -q=3 update - -apt -q=3 install \ +apt install \ apt-transport-https \ ca-certificates \ curl \ @@ -58,16 +52,15 @@ add-apt-repository \ $(lsb_release -cs) \ stable" --yes -apt -q=3 update - -apt -q=3 install docker-ce --yes +apt update +apt install docker-ce --yes echo ${docker_password} | docker login -u ${docker_username} --password-stdin quay.io curl -sL -o /usr/local/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme chmod +x /usr/local/bin/gimme -eval "$(gimme 1.14.2)" +eval "$(gimme 1.14.3)" export GOPATH="/tmp/go" @@ -89,9 +82,9 @@ docker buildx use ingress-nginx --default --global export DIND_TASKS=0 echo "Building NGINX image..." -ARCH=amd64 make build container push -ARCH=arm make build container push -ARCH=arm64 make build container push +ARCH=amd64 make build image push +ARCH=arm make build image push +ARCH=arm64 make build image push echo "Creating multi-arch images..." make push-manifest diff --git a/build/images/ingress-controller/entrypoint.sh b/build/images/ingress-controller/entrypoint.sh index 9170134d0..7aa5b13fa 100755 --- a/build/images/ingress-controller/entrypoint.sh +++ b/build/images/ingress-controller/entrypoint.sh @@ -35,8 +35,14 @@ trap 'catch $? $LINENO' ERR terraform init +GET_UNTIL_VALID=" +from datetime import datetime, timedelta +two_hours_from_now = datetime.utcnow() + timedelta(hours=2) +print(two_hours_from_now.strftime('%Y-%m-%dT%H:%M:%SZ')) +" + # destroy spot instance after two hours -EC2_VALID_UNTIL=$(date -d "+2 hours" +%Y-%m-%dT%H:%M:%SZ) +EC2_VALID_UNTIL=$(python3 -c "$GET_UNTIL_VALID") terraform plan \ -var-file /root/aws.tfvars \ diff --git a/build/images/ingress-controller/main.tf b/build/images/ingress-controller/main.tf index ab585f844..1a26c6878 100644 --- a/build/images/ingress-controller/main.tf +++ b/build/images/ingress-controller/main.tf @@ -165,8 +165,8 @@ resource "aws_spot_instance_request" "build_worker" { } provisioner "file" { - source = "build-ingress-controller.sh" - destination = "/tmp/build-ingress-controller.sh" + source = "build.sh" + destination = "/tmp/build.sh" } provisioner "file" { @@ -177,8 +177,8 @@ resource "aws_spot_instance_request" "build_worker" { provisioner "remote-exec" { inline = [ "echo Building ingress controller images...", - "chmod +x /tmp/build-ingress-controller.sh", - "sudo /tmp/build-ingress-controller.sh", + "chmod +x /tmp/build.sh", + "sudo /tmp/build.sh", ] } } diff --git a/build/images/ingress-controller/variables.tf b/build/images/ingress-controller/variables.tf index 48b0935d0..e1cbcb7af 100644 --- a/build/images/ingress-controller/variables.tf +++ b/build/images/ingress-controller/variables.tf @@ -44,7 +44,7 @@ variable "ssh_public_key_path" { variable "instance_type" { description = "EC2 instance" - default = "c5.18xlarge" + default = "c5.xlarge" } variable "project_tag" { diff --git a/build/images/nginx/.dockerignore b/build/images/nginx/.dockerignore index c45cf4169..8496cc2be 100644 --- a/build/images/nginx/.dockerignore +++ b/build/images/nginx/.dockerignore @@ -1 +1,8 @@ *.tfvars +.terraform* +terraform* +*.tfstate +*.tfstate.backup +id_rsa* +aws.tfvars +env.tfvars diff --git a/build/images/nginx/Dockerfile b/build/images/nginx/Dockerfile index d1d3b15a0..abc053668 100644 --- a/build/images/nginx/Dockerfile +++ b/build/images/nginx/Dockerfile @@ -1,6 +1,6 @@ -FROM k8s.gcr.io/debian-base:v2.0.0 +FROM us.gcr.io/k8s-artifacts-prod/build-image/debian-base-amd64:v2.1.0 -ENV TERRAFORM_VERSION 0.12.19 +ENV TERRAFORM_VERSION 0.12.26 RUN clean-install \ bash \ @@ -8,6 +8,7 @@ RUN clean-install \ ca-certificates \ unzip \ git \ + python3 \ openssh-client RUN curl -sSL -o /terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" \ diff --git a/build/images/nginx/build-nginx.sh b/build/images/nginx/build.sh similarity index 81% rename from build/images/nginx/build-nginx.sh rename to build/images/nginx/build.sh index d3100e2f3..50a034bd4 100644 --- a/build/images/nginx/build-nginx.sh +++ b/build/images/nginx/build.sh @@ -35,16 +35,11 @@ source_tfvars /tmp/env export DEBIAN_FRONTEND=noninteractive export AR_FLAGS=cr -apt -q=3 update +apt update +apt dist-upgrade --yes +apt update -apt -q=3 dist-upgrade --yes - -add-apt-repository universe --yes -add-apt-repository multiverse --yes - -apt -q=3 update - -apt -q=3 install \ +apt install \ apt-transport-https \ ca-certificates \ curl \ @@ -59,21 +54,15 @@ add-apt-repository \ $(lsb_release -cs) \ stable" --yes -apt -q=3 update +apt update -apt -q=3 install docker-ce --yes +apt install docker-ce --yes export DOCKER_CLI_EXPERIMENTAL=enabled echo ${docker_password} | docker login -u ${docker_username} --password-stdin quay.io -curl -sL -o /usr/local/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme -chmod +x /usr/local/bin/gimme - -eval "$(gimme 1.14.2)" - git clone https://github.com/kubernetes/ingress-nginx - cd ingress-nginx/images/nginx export TAG=$(git rev-parse HEAD) @@ -82,4 +71,4 @@ make init-docker-buildx docker buildx use ingress-nginx --default --global echo "Building NGINX images..." -make release +make image diff --git a/build/images/nginx/entrypoint.sh b/build/images/nginx/entrypoint.sh index 9170134d0..7aa5b13fa 100755 --- a/build/images/nginx/entrypoint.sh +++ b/build/images/nginx/entrypoint.sh @@ -35,8 +35,14 @@ trap 'catch $? $LINENO' ERR terraform init +GET_UNTIL_VALID=" +from datetime import datetime, timedelta +two_hours_from_now = datetime.utcnow() + timedelta(hours=2) +print(two_hours_from_now.strftime('%Y-%m-%dT%H:%M:%SZ')) +" + # destroy spot instance after two hours -EC2_VALID_UNTIL=$(date -d "+2 hours" +%Y-%m-%dT%H:%M:%SZ) +EC2_VALID_UNTIL=$(python3 -c "$GET_UNTIL_VALID") terraform plan \ -var-file /root/aws.tfvars \ diff --git a/build/images/nginx/main.tf b/build/images/nginx/main.tf index e991c815f..bd7e871bc 100644 --- a/build/images/nginx/main.tf +++ b/build/images/nginx/main.tf @@ -165,8 +165,8 @@ resource "aws_spot_instance_request" "build_worker" { } provisioner "file" { - source = "build-nginx.sh" - destination = "/tmp/build-nginx.sh" + source = "build.sh" + destination = "/tmp/build.sh" } provisioner "file" { @@ -177,8 +177,8 @@ resource "aws_spot_instance_request" "build_worker" { provisioner "remote-exec" { inline = [ "echo Building nginx images...", - "chmod +x /tmp/build-nginx.sh", - "sudo /tmp/build-nginx.sh", + "chmod +x /tmp/build.sh", + "sudo /tmp/build.sh", ] } } diff --git a/build/images/nginx/variables.tf b/build/images/nginx/variables.tf index 48b0935d0..7471f09e1 100644 --- a/build/images/nginx/variables.tf +++ b/build/images/nginx/variables.tf @@ -44,7 +44,7 @@ variable "ssh_public_key_path" { variable "instance_type" { description = "EC2 instance" - default = "c5.18xlarge" + default = "c5.24xlarge" } variable "project_tag" { diff --git a/build/run-e2e-suite.sh b/build/run-e2e-suite.sh index 3f14d34cd..b93be4c05 100755 --- a/build/run-e2e-suite.sh +++ b/build/run-e2e-suite.sh @@ -74,7 +74,6 @@ echo -e "Starting the e2e test pod" kubectl run --rm \ --attach \ --restart=Never \ - --generator=run-pod/v1 \ --env="E2E_NODES=${E2E_NODES}" \ --env="FOCUS=${FOCUS}" \ --env="E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS}" \ diff --git a/build/run-in-docker.sh b/build/run-in-docker.sh index ba829f72b..76e930f8c 100755 --- a/build/run-in-docker.sh +++ b/build/run-in-docker.sh @@ -34,7 +34,7 @@ function cleanup { } trap cleanup EXIT -E2E_IMAGE=quay.io/kubernetes-ingress-controller/e2e:v04212020-5d67794f4 +E2E_IMAGE=${E2E_IMAGE:-quay.io/kubernetes-ingress-controller/e2e:v05312020-d250b97b4} DOCKER_OPTS=${DOCKER_OPTS:-} diff --git a/build/run-ingress-controller.sh b/build/run-ingress-controller.sh index 57292cd71..29338241f 100755 --- a/build/run-ingress-controller.sh +++ b/build/run-ingress-controller.sh @@ -76,7 +76,7 @@ if [[ "${USE_EXISTING_IMAGE}" == "true" ]]; then docker pull "${IMAGE}-${ARCH}:${TAG}" else echo -e "${BGREEN}Building ingress controller image${NC}" - make -C "${KUBE_ROOT}" build "sub-container-${ARCH}" + make -C "${KUBE_ROOT}" build "sub-image-${ARCH}" fi CONTEXT=$(kubectl config current-context) diff --git a/charts/ingress-nginx/Chart.yaml b/charts/ingress-nginx/Chart.yaml index 04ed89832..955e3d0b2 100644 --- a/charts/ingress-nginx/Chart.yaml +++ b/charts/ingress-nginx/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: ingress-nginx -version: 2.1.0 -appVersion: 0.32.0 +version: 2.7.0 +appVersion: 0.33.0 home: https://github.com/kubernetes/ingress-nginx description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png diff --git a/charts/ingress-nginx/README.md b/charts/ingress-nginx/README.md index af0de1345..c10208181 100644 --- a/charts/ingress-nginx/README.md +++ b/charts/ingress-nginx/README.md @@ -49,6 +49,7 @@ Parameter | Description | Default --- | --- | --- `controller.image.repository` | controller container image repository | `quay.io/kubernetes-ingress-controller/nginx-ingress-controller` `controller.image.tag` | controller container image tag | `0.30.0` +`controller.image.digest` | controller container image digest | `""` `controller.image.pullPolicy` | controller container image pull policy | `IfNotPresent` `controller.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. | `101` `controller.containerPort.http` | The port that the controller container listens on for http connections. | `80` @@ -78,6 +79,7 @@ Parameter | Description | Default `controller.autoscaling.maxReplicas` | If autoscaling enabled, this field sets maximum replica count | `11` `controller.autoscaling.targetCPUUtilizationPercentage` | Target CPU utilization percentage to scale | `"50"` `controller.autoscaling.targetMemoryUtilizationPercentage` | Target memory utilization percentage to scale | `"50"` +`controller.autoscaling.autoscalingTemplate` | If autoscaling template provided, creates custom autoscaling metric | false `controller.hostPort.enabled` | This enable `hostPort` for ports defined in TCP/80 and TCP/443 | false `controller.hostPort.ports.http` | If `controller.hostPort.enabled` is `true` and this is non-empty, it sets the hostPort | `"80"` `controller.hostPort.ports.https` | If `controller.hostPort.enabled` is `true` and this is non-empty, it sets the hostPort | `"443"` @@ -89,15 +91,16 @@ Parameter | Description | Default `controller.podAnnotations` | annotations to be added to pods | `{}` `controller.podLabels` | labels to add to the pod container metadata | `{}` `controller.podSecurityContext` | Security context policies to add to the controller pod | `{}` +`controller.sysctls` | Map of optional sysctls to enable in the controller and in the PodSecurityPolicy | `{}` `controller.replicaCount` | desired number of controller pods | `1` `controller.minAvailable` | minimum number of available controller pods for PodDisruptionBudget | `1` `controller.resources` | controller pod resource requests & limits | `{}` `controller.priorityClassName` | controller priorityClassName | `nil` `controller.lifecycle` | controller pod lifecycle hooks | `{}` +`controller.publishService.enabled` | if true, the controller will set the endpoint records on the ingress objects to reflect those on the service | `true` +`controller.publishService.pathOverride` | override of the default publish-service name | `""` `controller.service.annotations` | annotations for controller service | `{}` `controller.service.labels` | labels for controller service | `{}` -`controller.publishService.enabled` | if true, the controller will set the endpoint records on the ingress objects to reflect those on the service | `false` -`controller.publishService.pathOverride` | override of the default publish-service name | `""` `controller.service.enabled` | if disabled no service will be created. This is especially useful when `controller.kind` is set to `DaemonSet` and `controller.hostPorts.enabled` is `true` | true `controller.service.clusterIP` | internal controller cluster service IP (set to `"-"` to pass an empty value) | `nil` `controller.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the controller service | `false` @@ -118,6 +121,8 @@ Parameter | Description | Default `controller.service.nodePorts.https` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 443 | `""` `controller.service.nodePorts.tcp` | Sets the nodePort for an entry referenced by its key from `tcp` | `{}` `controller.service.nodePorts.udp` | Sets the nodePort for an entry referenced by its key from `udp` | `{}` +`controller.service.internal.enabled` | Enables an (additional) internal load balancer | false +`controller.service.internal.annotations` | Annotations for configuring the additional internal load balancer | `{}` `controller.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 10 `controller.livenessProbe.periodSeconds` | How often to perform the probe | 10 `controller.livenessProbe.timeoutSeconds` | When the probe times out | 5 @@ -164,10 +169,12 @@ Parameter | Description | Default `controller.admissionWebhooks.patch.enabled` | If true, will use a pre and post install hooks to generate a CA and certificate to use for the prometheus operator tls proxy, and patch the created webhooks with the CA. | `true` `controller.admissionWebhooks.patch.image.repository` | Repository to use for the webhook integration jobs | `jettech/kube-webhook-certgen` `controller.admissionWebhooks.patch.image.tag` | Tag to use for the webhook integration jobs | `v1.2.0` +`controller.admissionWebhooks.patch.image.digest` | Digest to use for the webhook integration jobs | `""` `controller.admissionWebhooks.patch.image.pullPolicy` | Image pull policy for the webhook integration jobs | `IfNotPresent` `controller.admissionWebhooks.patch.priorityClassName` | Priority class for the webhook integration jobs | `""` `controller.admissionWebhooks.patch.podAnnotations` | Annotations for the webhook job pods | `{}` `controller.admissionWebhooks.patch.nodeSelector` | Node selector for running admission hook patch jobs | `{}` +`controller.admissionWebhooks.patch.tolerations` | Node taints/tolerations for running admission hook patch jobs | `[]` `controller.customTemplate.configMapName` | configMap containing a custom nginx template | `""` `controller.customTemplate.configMapKey` | configMap key containing the nginx template | `""` `controller.addHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers) added before sending response to the client | `{}` @@ -182,6 +189,7 @@ Parameter | Description | Default `defaultBackend.enabled` | Use default backend component | `false` `defaultBackend.image.repository` | default backend container image repository | `k8s.gcr.io/defaultbackend-amd64` `defaultBackend.image.tag` | default backend container image tag | `1.5` +`defaultBackend.image.digest` | default backend container image digest | `""` `defaultBackend.image.pullPolicy` | default backend container image pull policy | `IfNotPresent` `defaultBackend.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. By default uses nobody user. | `65534` `defaultBackend.extraArgs` | Additional default backend container arguments | `{}` @@ -310,6 +318,48 @@ controller: domainName: "kubernetes-example.com" ``` +## Additional internal load balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS +``` +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + # Any other annotation can be declared here. +``` + +Example for GCE +``` +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB + cloud.google.com/load-balancer-type: "Internal" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + + ## Ingress Admission Webhooks With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. @@ -327,4 +377,4 @@ Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: In Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. -As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. \ No newline at end of file diff --git a/charts/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/charts/ingress-nginx/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 000000000..443e39d8b --- /dev/null +++ b/charts/ingress-nginx/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/ingress-nginx/ci/deployment-internal-lb-values.yaml b/charts/ingress-nginx/ci/deployment-internal-lb-values.yaml new file mode 100644 index 000000000..892f6de3f --- /dev/null +++ b/charts/ingress-nginx/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,9 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml index 695cf44a6..3b43946b8 100644 --- a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -32,7 +32,9 @@ spec: {{- end }} containers: - name: create - image: {{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{.repository}}{{- if (.digest) -}} @{{.digest}} {{- else -}} :{{ .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} args: - create @@ -43,6 +45,9 @@ spec: serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} {{- end }} securityContext: runAsNonRoot: true diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml index da0f25881..7afd03f2f 100644 --- a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +++ b/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -32,7 +32,9 @@ spec: {{- end }} containers: - name: patch - image: {{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{.repository}}{{- if (.digest) -}} @{{.digest}} {{- else -}} :{{ .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} args: - patch @@ -45,8 +47,11 @@ spec: serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} {{- end }} securityContext: runAsNonRoot: true runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/ingress-nginx/templates/controller-daemonset.yaml b/charts/ingress-nginx/templates/controller-daemonset.yaml index e0f4800a4..94195946b 100644 --- a/charts/ingress-nginx/templates/controller-daemonset.yaml +++ b/charts/ingress-nginx/templates/controller-daemonset.yaml @@ -42,12 +42,24 @@ spec: {{- if .Values.controller.priorityClassName }} priorityClassName: {{ .Values.controller.priorityClassName }} {{- end }} - {{- if .Values.controller.podSecurityContext }} - securityContext: {{ toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl }} + value: {{ $value }} + {{- end }} {{- end }} containers: - name: controller - image: {{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }} + {{- with .Values.controller.image }} + image: "{{.repository}}{{- if (.digest) -}} @{{.digest}} {{- else -}} :{{ .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.controller.image.pullPolicy }} {{- if .Values.controller.lifecycle }} lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} diff --git a/charts/ingress-nginx/templates/controller-deployment.yaml b/charts/ingress-nginx/templates/controller-deployment.yaml index 72d62cb00..271ef660f 100644 --- a/charts/ingress-nginx/templates/controller-deployment.yaml +++ b/charts/ingress-nginx/templates/controller-deployment.yaml @@ -46,12 +46,24 @@ spec: {{- if .Values.controller.priorityClassName }} priorityClassName: {{ .Values.controller.priorityClassName }} {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} {{- if .Values.controller.podSecurityContext }} - securityContext: {{ toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl }} + value: {{ $value }} + {{- end }} {{- end }} containers: - name: controller - image: {{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }} + {{- with .Values.controller.image }} + image: "{{.repository}}{{- if (.digest) -}} @{{.digest}} {{- else -}} :{{ .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.controller.image.pullPolicy }} {{- if .Values.controller.lifecycle }} lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} diff --git a/charts/ingress-nginx/templates/controller-hpa.yaml b/charts/ingress-nginx/templates/controller-hpa.yaml index dbcf008eb..4923cf8d2 100644 --- a/charts/ingress-nginx/templates/controller-hpa.yaml +++ b/charts/ingress-nginx/templates/controller-hpa.yaml @@ -1,5 +1,5 @@ {{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} -apiVersion: autoscaling/v2beta1 +apiVersion: autoscaling/v2beta2 kind: HorizontalPodAutoscaler metadata: labels: @@ -15,15 +15,22 @@ spec: maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} metrics: {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ . }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} {{- end }} {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ . }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} +{{- toYaml . | nindent 2 }} {{- end }} {{- end }} diff --git a/charts/ingress-nginx/templates/controller-psp.yaml b/charts/ingress-nginx/templates/controller-psp.yaml index 8388d7ec9..bcf588c3c 100644 --- a/charts/ingress-nginx/templates/controller-psp.yaml +++ b/charts/ingress-nginx/templates/controller-psp.yaml @@ -9,6 +9,12 @@ metadata: spec: allowedCapabilities: - NET_BIND_SERVICE +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} privileged: false allowPrivilegeEscalation: true # Allow core volume types. diff --git a/charts/ingress-nginx/templates/controller-service-internal.yaml b/charts/ingress-nginx/templates/controller-service-internal.yaml new file mode 100644 index 000000000..1edfba704 --- /dev/null +++ b/charts/ingress-nginx/templates/controller-service-internal.yaml @@ -0,0 +1,41 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal +spec: + type: "{{ .Values.controller.service.type }}" + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/charts/ingress-nginx/templates/default-backend-deployment.yaml b/charts/ingress-nginx/templates/default-backend-deployment.yaml index 63ffe4544..dac925b1b 100644 --- a/charts/ingress-nginx/templates/default-backend-deployment.yaml +++ b/charts/ingress-nginx/templates/default-backend-deployment.yaml @@ -36,7 +36,9 @@ spec: {{- end }} containers: - name: {{ template "ingress-nginx.name" . }}-default-backend - image: {{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }} + {{- with .Values.defaultBackend.image }} + image: "{{.repository}}{{- if (.digest) -}} @{{.digest}} {{- else -}} :{{ .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} {{- if .Values.defaultBackend.extraArgs }} args: diff --git a/charts/ingress-nginx/values.yaml b/charts/ingress-nginx/values.yaml index 54a97348c..766565627 100644 --- a/charts/ingress-nginx/values.yaml +++ b/charts/ingress-nginx/values.yaml @@ -4,7 +4,7 @@ controller: image: repository: quay.io/kubernetes-ingress-controller/nginx-ingress-controller - tag: "0.32.0" + tag: "0.33.0" pullPolicy: IfNotPresent # www-data -> uid 101 runAsUser: 101 @@ -67,11 +67,16 @@ controller: # key: value ## Security Context policies for controller pods - ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for - ## notes on enabling and using sysctls ## podSecurityContext: {} + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ### + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + ## Allows customization of the source of the IP address or FQDN to report ## in the ingress status field. By default, it reads the information provided ## by the service. If disable, the status field reports the IP address of the @@ -238,6 +243,17 @@ controller: targetCPUUtilizationPercentage: 50 targetMemoryUtilizationPercentage: 50 + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + ## Enable mimalloc as a drop-in replacement for malloc. ## ref: https://github.com/microsoft/mimalloc ## @@ -302,6 +318,12 @@ controller: tcp: {} udp: {} + ## Enables an additional internal load balancer (besides the external one). + ## Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + internal: + enabled: false + annotations: {} + extraContainers: [] ## Additional containers to be added to the controller pod. ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. @@ -367,6 +389,7 @@ controller: priorityClassName: "" podAnnotations: {} nodeSelector: {} + tolerations: [] runAsUser: 2000 metrics: diff --git a/cloudbuild.yaml b/cloudbuild.yaml new file mode 100644 index 000000000..d4d1c3578 --- /dev/null +++ b/cloudbuild.yaml @@ -0,0 +1,30 @@ +# See https://cloud.google.com/cloud-build/docs/build-config + +timeout: 1800s +# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF, +# or any new substitutions added in the future. +options: + substitution_option: ALLOW_LOOSE +steps: + - name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20200422-b25d964' + entrypoint: bash + env: + - DOCKER_CLI_EXPERIMENTAL=enabled + - TAG=$_GIT_TAG + - BASE_REF=$_PULL_BASE_REF + - REGISTRY=gcr.io/k8s-staging-ingress-nginx + - REPO_INFO=https://github.com/kubernetes/ingress-nginx + - HOME=/root + - USER=root + args: + - -c + - | + gcloud auth configure-docker \ + && make release +substitutions: + # _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and + # can be used as a substitution + _GIT_TAG: "12345" + # _PULL_BASE_REF will contain the ref that was pushed to to trigger this build - + # a branch like 'master' or 'release-0.2', or a tag like 'v0.2'. + _PULL_BASE_REF: "master" diff --git a/cmd/nginx/main.go b/cmd/nginx/main.go index 571bf2143..ee4d4dcfb 100644 --- a/cmd/nginx/main.go +++ b/cmd/nginx/main.go @@ -24,6 +24,8 @@ import ( "net/http/pprof" "os" "os/signal" + "path/filepath" + "runtime" "syscall" "time" @@ -199,10 +201,21 @@ func handleSigterm(ngx *controller.NGINXController, exit exiter) { // the in-cluster config is missing or fails, we fallback to the default config. func createApiserverClient(apiserverHost, rootCAFile, kubeConfig string) (*kubernetes.Clientset, error) { cfg, err := clientcmd.BuildConfigFromFlags(apiserverHost, kubeConfig) + if err != nil { return nil, err } + // Configure the User-Agent used for the HTTP requests made to the API server. + cfg.UserAgent = fmt.Sprintf( + "%s/%s (%s/%s) ingress-nginx/%s", + filepath.Base(os.Args[0]), + version.RELEASE, + runtime.GOOS, + runtime.GOARCH, + version.COMMIT, + ) + if apiserverHost != "" && rootCAFile != "" { tlsClientConfig := rest.TLSClientConfig{} diff --git a/deploy/static/provider/aws/deploy-tls-termination.yaml b/deploy/static/provider/aws/deploy-tls-termination.yaml index c202778ee..bbd962e1c 100644 --- a/deploy/static/provider/aws/deploy-tls-termination.yaml +++ b/deploy/static/provider/aws/deploy-tls-termination.yaml @@ -13,10 +13,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -27,10 +27,10 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -49,10 +49,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -120,10 +120,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -141,10 +141,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -236,10 +236,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -258,10 +258,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller-admission @@ -289,10 +289,10 @@ metadata: service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https service.beta.kubernetes.io/aws-load-balancer-type: elb labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -319,10 +319,10 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -345,7 +345,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -431,10 +431,10 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook name: ingress-nginx-admission @@ -468,10 +468,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -493,10 +493,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -518,10 +518,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -530,10 +530,10 @@ spec: metadata: name: ingress-nginx-admission-create labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -561,10 +561,10 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -573,10 +573,10 @@ spec: metadata: name: ingress-nginx-admission-patch labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -606,10 +606,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -631,10 +631,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -656,10 +656,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx diff --git a/deploy/static/provider/aws/deploy.yaml b/deploy/static/provider/aws/deploy.yaml index b7fce308f..d142a5b04 100644 --- a/deploy/static/provider/aws/deploy.yaml +++ b/deploy/static/provider/aws/deploy.yaml @@ -13,10 +13,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -27,10 +27,10 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -42,10 +42,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -113,10 +113,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -134,10 +134,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -229,10 +229,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -251,10 +251,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller-admission @@ -280,10 +280,10 @@ metadata: service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' service.beta.kubernetes.io/aws-load-balancer-type: nlb labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -310,10 +310,10 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -336,7 +336,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -419,10 +419,10 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook name: ingress-nginx-admission @@ -456,10 +456,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -481,10 +481,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -506,10 +506,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -518,10 +518,10 @@ spec: metadata: name: ingress-nginx-admission-create labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -549,10 +549,10 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -561,10 +561,10 @@ spec: metadata: name: ingress-nginx-admission-patch labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -594,10 +594,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -619,10 +619,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -644,10 +644,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx diff --git a/deploy/static/provider/baremetal/deploy.yaml b/deploy/static/provider/baremetal/deploy.yaml index f12eabc88..f0303a587 100644 --- a/deploy/static/provider/baremetal/deploy.yaml +++ b/deploy/static/provider/baremetal/deploy.yaml @@ -13,10 +13,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -27,10 +27,10 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -42,10 +42,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -113,10 +113,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -134,10 +134,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -229,10 +229,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -251,10 +251,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller-admission @@ -275,10 +275,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -304,10 +304,10 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -330,7 +330,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -412,10 +412,10 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook name: ingress-nginx-admission @@ -449,10 +449,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -474,10 +474,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -499,10 +499,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -511,10 +511,10 @@ spec: metadata: name: ingress-nginx-admission-create labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -542,10 +542,10 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -554,10 +554,10 @@ spec: metadata: name: ingress-nginx-admission-patch labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -587,10 +587,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -612,10 +612,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -637,10 +637,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx diff --git a/deploy/static/provider/cloud/deploy.yaml b/deploy/static/provider/cloud/deploy.yaml index 3e342cc80..04788b8c4 100644 --- a/deploy/static/provider/cloud/deploy.yaml +++ b/deploy/static/provider/cloud/deploy.yaml @@ -13,10 +13,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -27,10 +27,10 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -42,10 +42,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -113,10 +113,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -134,10 +134,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -229,10 +229,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -251,10 +251,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller-admission @@ -275,10 +275,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -305,10 +305,10 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -331,7 +331,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -414,10 +414,10 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook name: ingress-nginx-admission @@ -451,10 +451,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -476,10 +476,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -501,10 +501,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -513,10 +513,10 @@ spec: metadata: name: ingress-nginx-admission-create labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -544,10 +544,10 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -556,10 +556,10 @@ spec: metadata: name: ingress-nginx-admission-patch labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -589,10 +589,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -614,10 +614,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -639,10 +639,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx diff --git a/deploy/static/provider/do/deploy.yaml b/deploy/static/provider/do/deploy.yaml index a0cf58649..a8a662273 100644 --- a/deploy/static/provider/do/deploy.yaml +++ b/deploy/static/provider/do/deploy.yaml @@ -13,10 +13,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -27,10 +27,10 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -43,10 +43,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -114,10 +114,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -135,10 +135,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -230,10 +230,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -252,10 +252,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller-admission @@ -278,10 +278,10 @@ metadata: annotations: service.beta.kubernetes.io/do-loadbalancer-enable-proxy-protocol: 'true' labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -308,10 +308,10 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -334,7 +334,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -417,10 +417,10 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook name: ingress-nginx-admission @@ -454,10 +454,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -479,10 +479,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -504,10 +504,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -516,10 +516,10 @@ spec: metadata: name: ingress-nginx-admission-create labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -547,10 +547,10 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -559,10 +559,10 @@ spec: metadata: name: ingress-nginx-admission-patch labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -592,10 +592,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -617,10 +617,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -642,10 +642,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx diff --git a/deploy/static/provider/kind/deploy.yaml b/deploy/static/provider/kind/deploy.yaml index 826eaabfe..c3ca2f5a2 100644 --- a/deploy/static/provider/kind/deploy.yaml +++ b/deploy/static/provider/kind/deploy.yaml @@ -13,10 +13,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -27,10 +27,10 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -42,10 +42,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -113,10 +113,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx @@ -134,10 +134,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -229,10 +229,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx @@ -251,10 +251,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller-admission @@ -275,10 +275,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -304,10 +304,10 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller @@ -334,7 +334,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -425,10 +425,10 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook name: ingress-nginx-admission @@ -462,10 +462,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -487,10 +487,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -512,10 +512,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -524,10 +524,10 @@ spec: metadata: name: ingress-nginx-admission-create labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -555,10 +555,10 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -567,10 +567,10 @@ spec: metadata: name: ingress-nginx-admission-patch labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: @@ -600,10 +600,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -625,10 +625,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx @@ -650,10 +650,10 @@ metadata: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: - helm.sh/chart: ingress-nginx-2.1.0 + helm.sh/chart: ingress-nginx-2.4.0 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/version: 0.32.0 + app.kubernetes.io/version: 0.33.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx diff --git a/docs/deploy/index.md b/docs/deploy/index.md index dfe48651d..0036889f5 100644 --- a/docs/deploy/index.md +++ b/docs/deploy/index.md @@ -13,6 +13,19 @@ In case [Network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) or additional firewalls, please allow access to port `8443`. +!!! attention + The first time the ingress controller starts, two [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) create the SSL Certificate used by the admission webhook. + For this reason, there is an initial delay of up to two minutes until it is possible to create and validate Ingress definitions. + + You can wait until is ready to running the next command: + + ```yaml + kubectl wait --namespace ingress-nginx \ + --for=condition=ready pod \ + --selector=app.kubernetes.io/component=controller \ + --timeout=120s + ``` + ## Contents - [Provider Specific Steps](#provider-specific-steps) @@ -36,7 +49,7 @@ Kubernetes is available in Docker for Mac (from [version 18.06.0-ce](https://doc [enable]: https://docs.docker.com/docker-for-mac/#kubernetes ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/cloud/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud/deploy.yaml ``` #### minikube @@ -71,7 +84,7 @@ In AWS we use a Network load balancer (NLB) to expose the NGINX Ingress controll ##### Network Load Balancer (NLB) ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/aws/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/deploy.yaml ``` ##### TLS termination in AWS Load Balancer (ELB) @@ -80,10 +93,10 @@ In some scenarios is required to terminate TLS in the Load Balancer and not in t For this purpose we provide a template: -- Download [deploy-tls-termination.yaml](https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/aws/deploy-tls-termination.yaml) +- Download [deploy-tls-termination.yaml](https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/deploy-tls-termination.yaml) ```console -wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/aws/deploy-tls-termination.yaml +wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/deploy-tls-termination.yaml ``` - Edit the file and change: @@ -133,7 +146,7 @@ More information with regards to timeouts for can be found in the [official AWS ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/cloud/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud/deploy.yaml ``` !!! failure Important @@ -142,13 +155,13 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/cont #### Azure ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/cloud/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud/deploy.yaml ``` #### Digital Ocean ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/do/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/do/deploy.yaml ``` #### Bare-metal @@ -156,7 +169,7 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/cont Using [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport): ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/baremetal/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/deploy.yaml ``` !!! tip @@ -164,6 +177,9 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/cont ### Verify installation +!!! info + In minikube the ingress addon is installed in the namespace **kube-system** instead of ingress-nginx + To check if the ingress controller pods have started, run the following command: ```console diff --git a/docs/deploy/upgrade.md b/docs/deploy/upgrade.md index 10beb5dc0..c720079fc 100644 --- a/docs/deploy/upgrade.md +++ b/docs/deploy/upgrade.md @@ -33,7 +33,7 @@ The easiest way to do this is e.g. (do note you may need to change the name para ``` kubectl set image deployment/nginx-ingress-controller \ - nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 ``` For interactive editing, use `kubectl edit deployment nginx-ingress-controller`. diff --git a/docs/development.md b/docs/development.md index 367123f77..20bd35f70 100644 --- a/docs/development.md +++ b/docs/development.md @@ -29,7 +29,7 @@ $ make dev-env The nginx controller container image can be rebuilt using: ``` -$ ARCH=amd64 TAG=dev REGISTRY=$USER/ingress-controller make build container +$ ARCH=amd64 TAG=dev REGISTRY=$USER/ingress-controller make build image ``` The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: @@ -76,16 +76,15 @@ To find the registry simply run: `docker system info | grep Registry` The e2e test image can also be built through the Makefile. ```console -$ make e2e-test-image +$ make -C test/e2e-image image ``` -You can then make this image available on your minikube host by exporting the image and loading it with the minikube docker context: +Then you can load the docker image using kind: ```console -$ docker save nginx-ingress-controller:e2e | (eval $(minikube docker-env) && docker load) +$ kind load docker-image --name="ingress-nginx-dev" nginx-ingress-controller:e2e ``` - ### Nginx Controller Build a raw server binary @@ -98,19 +97,13 @@ $ make build Build a local container image ```console -$ TAG= REGISTRY=$USER/ingress-controller make container -``` - -Push the container image to a remote repository - -```console -$ TAG= REGISTRY=$USER/ingress-controller make push +$ TAG= REGISTRY=$USER/ingress-controller make image ``` ## Deploying There are several ways to deploy the ingress controller onto a cluster. -Please check the [deployment guide](../deploy/) +Please check the [deployment guide](./deploy/) ## Testing @@ -125,7 +118,12 @@ If you have access to a Kubernetes cluster, you can also run e2e tests using gin ```console $ cd $GOPATH/src/k8s.io/ingress-nginx -$ make e2e-test +$ KIND_CLUSTER_NAME="ingress-nginx-test" make kind-e2e-test +``` +To set focus to a particular set of tests, a FOCUS flag can be set. + +```console +KIND_CLUSTER_NAME="ingress-nginx-test" FOCUS="no-auth-locations" make kind-e2e-test ``` NOTE: if your e2e pod keeps hanging in an ImagePullBackoff, make sure you've made your e2e nginx-ingress-controller image available to minikube as explained in the **Building the e2e test image** section diff --git a/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml b/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml index dc5b7a354..b383ab95e 100644 --- a/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml +++ b/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml @@ -31,7 +31,7 @@ spec: # docker run -ti --rm python:3-alpine python -c 'import secrets,base64; print(base64.b64encode(base64.b64encode(secrets.token_bytes(16))));' - name: OAUTH2_PROXY_COOKIE_SECRET value: SECRET - image: quay.io/pusher/oauth2_proxy:latest + image: quay.io/oauth2-proxy/oauth2-proxy:latest imagePullPolicy: Always name: oauth2-proxy ports: diff --git a/docs/examples/customization/custom-headers/README.md b/docs/examples/customization/custom-headers/README.md index 6db64e3a1..cb7c251e6 100644 --- a/docs/examples/customization/custom-headers/README.md +++ b/docs/examples/customization/custom-headers/README.md @@ -10,13 +10,13 @@ server. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml ``` -[configmap.yaml](configmap.yaml) defines a ConfigMap in the `ingress-nginx` namespace named `nginx-configuration`. This controls the [global configuration](../../../user-guide/nginx-configuration/configmap.md) of the ingress controller, and already exists in a standard installation. The key `proxy-set-headers` is set to cite the previously-created `ingress-nginx/custom-headers` ConfigMap. +[configmap.yaml](configmap.yaml) defines a ConfigMap in the `ingress-nginx` namespace named `ingress-nginx-controller`. This controls the [global configuration](../../../user-guide/nginx-configuration/configmap.md) of the ingress controller, and already exists in a standard installation. The key `proxy-set-headers` is set to cite the previously-created `ingress-nginx/custom-headers` ConfigMap. ```console kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml ``` -The nginx ingress controller will read the `ingress-nginx/nginx-configuration` ConfigMap, find the `proxy-set-headers` key, read HTTP headers from the `ingress-nginx/custom-headers` ConfigMap, and include those HTTP headers in all requests flowing from nginx to the backends. +The nginx ingress controller will read the `ingress-nginx/ingress-nginx-controller` ConfigMap, find the `proxy-set-headers` key, read HTTP headers from the `ingress-nginx/custom-headers` ConfigMap, and include those HTTP headers in all requests flowing from nginx to the backends. ## Test diff --git a/docs/examples/static-ip/nginx-ingress-controller.yaml b/docs/examples/static-ip/nginx-ingress-controller.yaml index 83be0c57d..30885ec54 100644 --- a/docs/examples/static-ip/nginx-ingress-controller.yaml +++ b/docs/examples/static-ip/nginx-ingress-controller.yaml @@ -24,7 +24,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 + - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 915c38aad..6f9f31dc4 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -165,7 +165,7 @@ Kubernetes Workstation ### Service Account -If using a service account to connect to the API server, Dashboard expects the file +If using a service account to connect to the API server, the ingress-controller expects the file `/var/run/secrets/kubernetes.io/serviceaccount/token` to be present. It provides a secret token that is required to authenticate with the API server. diff --git a/docs/user-guide/custom-errors.md b/docs/user-guide/custom-errors.md index 9f46b7395..05678f9e6 100644 --- a/docs/user-guide/custom-errors.md +++ b/docs/user-guide/custom-errors.md @@ -5,7 +5,7 @@ that it passes several HTTP headers down to its `default-backend` in case of err | Header | Value | | ---------------- | ------------------------------------------------------------------- | -| `X-Code` | HTTP status code retuned by the request | +| `X-Code` | HTTP status code returned by the request | | `X-Format` | Value of the `Accept` header sent by the client | | `X-Original-URI` | URI that caused the error | | `X-Namespace` | Namespace where the backend Service is located | diff --git a/docs/user-guide/monitoring.md b/docs/user-guide/monitoring.md index 17692a5d2..660fb5101 100644 --- a/docs/user-guide/monitoring.md +++ b/docs/user-guide/monitoring.md @@ -74,6 +74,6 @@ According to the above example, this URL will be http://10.192.0.3:31086 The username and password is `admin` -After the login you can import the Grafana dashboard from _https://github.com/kubernetes/ingress-nginx/tree/master/deploy/grafana/dashboards_ +After the login you can import the Grafana dashboard from [official dashboards](https://github.com/kubernetes/ingress-nginx/tree/master/deploy/grafana/dashboards) ![Dashboard](../images/grafana.png) diff --git a/docs/user-guide/nginx-configuration/annotations.md b/docs/user-guide/nginx-configuration/annotations.md index 78c77d5db..f3b32315a 100755 --- a/docs/user-guide/nginx-configuration/annotations.md +++ b/docs/user-guide/nginx-configuration/annotations.md @@ -100,6 +100,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/proxy-buffer-size](#proxy-buffer-size)|string| |[nginx.ingress.kubernetes.io/proxy-max-temp-file-size](#proxy-max-temp-file-size)|string| |[nginx.ingress.kubernetes.io/ssl-ciphers](#ssl-ciphers)|string| +|[nginx.ingress.kubernetes.io/ssl-prefer-server-ciphers](#ssl-ciphers)|"true" or "false"| |[nginx.ingress.kubernetes.io/connection-proxy-header](#connection-proxy-header)|string| |[nginx.ingress.kubernetes.io/enable-access-log](#enable-access-log)|"true" or "false"| |[nginx.ingress.kubernetes.io/enable-opentracing](#enable-opentracing)|"true" or "false"| @@ -172,7 +173,7 @@ Use `nginx.ingress.kubernetes.io/session-cookie-samesite` to apply a `SameSite` ### Authentication -Is possible to add authentication adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords. +It is possible to add authentication by adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords. The annotations are: ``` @@ -212,7 +213,7 @@ There is a special mode of upstream hashing called subset. In this mode, upstrea To enable consistent hashing for a backend: -`nginx.ingress.kubernetes.io/upstream-hash-by`: the nginx variable, text value or any combination thereof to use for consistent hashing. For example `nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri"` to consistently hash upstream requests by the current request URI. +`nginx.ingress.kubernetes.io/upstream-hash-by`: the nginx variable, text value or any combination thereof to use for consistent hashing. For example: `nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri"` or `nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri$host"` or `nginx.ingress.kubernetes.io/upstream-hash-by: "${request_uri}-text-value"` to consistently hash upstream requests by the current request URI. "subset" hashing can be enabled setting `nginx.ingress.kubernetes.io/upstream-hash-by-subset`: "true". This maps requests to subset of nodes instead of a single one. `upstream-hash-by-subset-size` determines the size of each subset (default 3). @@ -646,6 +647,12 @@ Using this annotation will set the `ssl_ciphers` directive at the server level. nginx.ingress.kubernetes.io/ssl-ciphers: "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP" ``` +The following annotation will set the `ssl_prefer_server_ciphers` directive at the server level. This configuration specifies that server ciphers should be preferred over client ciphers when using the SSLv3 and TLS protocols. + +```yaml +nginx.ingress.kubernetes.io/ssl-prefer-server-ciphers: "true" +``` + ### Connection proxy header Using this annotation will override the default connection header set by NGINX. diff --git a/docs/user-guide/nginx-configuration/configmap.md b/docs/user-guide/nginx-configuration/configmap.md index e43090fb3..688c8012c 100755 --- a/docs/user-guide/nginx-configuration/configmap.md +++ b/docs/user-guide/nginx-configuration/configmap.md @@ -32,6 +32,8 @@ The following table shows a configuration option's name, type, and the default v |[hide-headers](#hide-headers)|string array|empty| |[access-log-params](#access-log-params)|string|""| |[access-log-path](#access-log-path)|string|"/var/log/nginx/access.log"| +|[http-access-log-path](#http-access-log-path)|string|""| +|[stream-access-log-path](#stream-access-log-path)|string|""| |[enable-access-log-for-default-backend](#enable-access-log-for-default-backend)|bool|"false"| |[error-log-path](#error-log-path)|string|"/var/log/nginx/error.log"| |[enable-modsecurity](#enable-modsecurity)|bool|"false"| @@ -207,10 +209,24 @@ _References:_ ## access-log-path -Access log path. Goes to `/var/log/nginx/access.log` by default. +Access log path for both http and stream context. Goes to `/var/log/nginx/access.log` by default. __Note:__ the file `/var/log/nginx/access.log` is a symlink to `/dev/stdout` +## http-access-log-path + +Access log path for http context globally. +_**default:**_ "" + +__Note:__ If not specified, the `access-log-path` will be used. + +## stream-access-log-path + +Access log path for stream context globally. +_**default:**_ "" + +__Note:__ If not specified, the `access-log-path` will be used. + ## enable-access-log-for-default-backend Enables logging access to default backend. _**default:**_ is disabled. diff --git a/go.mod b/go.mod index aab0c3d9d..7ef16af0f 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/moul/pb v0.0.0-20180404114147-54bdd96e6a52 github.com/ncabatoff/process-exporter v0.6.0 github.com/onsi/ginkgo v1.12.0 - github.com/opencontainers/runc v1.0.0-rc9 + github.com/opencontainers/runc v1.0.0-rc10 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.4.0 github.com/prometheus/client_model v0.2.0 @@ -44,40 +44,40 @@ require ( gopkg.in/gavv/httpexpect.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/go-playground/pool.v3 v3.1.1 - k8s.io/api v0.18.2 - k8s.io/apiextensions-apiserver v0.18.2 - k8s.io/apimachinery v0.18.2 - k8s.io/apiserver v0.18.2 - k8s.io/cli-runtime v0.18.2 - k8s.io/client-go v0.18.2 - k8s.io/code-generator v0.18.2 - k8s.io/component-base v0.18.2 + k8s.io/api v0.18.3 + k8s.io/apiextensions-apiserver v0.18.3 + k8s.io/apimachinery v0.18.3 + k8s.io/apiserver v0.18.3 + k8s.io/cli-runtime v0.18.3 + k8s.io/client-go v0.18.3 + k8s.io/code-generator v0.18.3 + k8s.io/component-base v0.18.3 k8s.io/klog v1.0.0 - k8s.io/kubernetes v1.18.2 + k8s.io/kubernetes v1.18.3 pault.ag/go/sniff v0.0.0-20200207005214-cf7e4d167732 - sigs.k8s.io/controller-runtime v0.5.1-0.20200327213554-2d4c4877f906 + sigs.k8s.io/controller-runtime v0.6.0 ) replace ( - k8s.io/api => k8s.io/api v0.18.2 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.2 - k8s.io/apimachinery => k8s.io/apimachinery v0.18.2 - k8s.io/apiserver => k8s.io/apiserver v0.18.2 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.2 - k8s.io/client-go => k8s.io/client-go v0.18.2 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.2 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.2 - k8s.io/code-generator => k8s.io/code-generator v0.18.2 - k8s.io/component-base => k8s.io/component-base v0.18.2 - k8s.io/cri-api => k8s.io/cri-api v0.18.2 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.2 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.2 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.2 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.2 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.2 - k8s.io/kubectl => k8s.io/kubectl v0.18.2 - k8s.io/kubelet => k8s.io/kubelet v0.18.2 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.2 - k8s.io/metrics => k8s.io/metrics v0.18.2 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.2 + k8s.io/api => k8s.io/api v0.18.3 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.3 + k8s.io/apimachinery => k8s.io/apimachinery v0.18.3 + k8s.io/apiserver => k8s.io/apiserver v0.18.3 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.3 + k8s.io/client-go => k8s.io/client-go v0.18.3 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.3 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.3 + k8s.io/code-generator => k8s.io/code-generator v0.18.3 + k8s.io/component-base => k8s.io/component-base v0.18.3 + k8s.io/cri-api => k8s.io/cri-api v0.18.3 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.3 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.3 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.3 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.3 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.3 + k8s.io/kubectl => k8s.io/kubectl v0.18.3 + k8s.io/kubelet => k8s.io/kubelet v0.18.3 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.3 + k8s.io/metrics => k8s.io/metrics v0.18.3 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.3 ) diff --git a/go.sum b/go.sum index ceab2660a..85880f07d 100644 --- a/go.sum +++ b/go.sum @@ -132,7 +132,6 @@ github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -524,9 +523,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.0-rc10 h1:AbmCEuSZXVflng0/cboQkpdEOeBsPMjz6tmq4Pv8MZw= github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg= github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= @@ -934,28 +932,28 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= -k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic= -k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= -k8s.io/cli-runtime v0.18.2 h1:JiTN5RgkFNTiMxHBRyrl6n26yKWAuNRlei1ZJALUmC8= -k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ= -k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/cloud-provider v0.18.2 h1:bwVSHGbT6/FP2tf/yOmb+K4w6OR0BsKekwDltKfrVy0= -k8s.io/cloud-provider v0.18.2/go.mod h1:t1HjnQN2l5wK/fORo/yyu0Q+bZTYuReHYCIpi/qqfms= -k8s.io/cluster-bootstrap v0.18.2/go.mod h1:lHDOrHDzZi3eQE9bYMFpkwwUuLYiAiBuQuHaAnoGWTk= -k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso= -k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y= -k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= -k8s.io/cri-api v0.18.2 h1:bykYbClh5Bnjo2EMjlYbYQ3ksxHjjLcbriKPm831hVk= -k8s.io/cri-api v0.18.2/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s= -k8s.io/csi-translation-lib v0.18.2/go.mod h1:2lyXP0OP6MuzAEde802d4L/Rhzj4teNdNBKGVxVKV78= +k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0= +k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= +k8s.io/apiextensions-apiserver v0.18.3 h1:h6oZO+iAgg0HjxmuNnguNdKNB9+wv3O1EBDdDWJViQ0= +k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= +k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk= +k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apiserver v0.18.3 h1:BVjccwKP/kEqY+ResOyWs0EKs7f4XL0d0E5GkU3uiqI= +k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= +k8s.io/cli-runtime v0.18.3 h1:8IBtaTYmXiXipKdx2FAKotvnQMjcF0kSLvX4szY340c= +k8s.io/cli-runtime v0.18.3/go.mod h1:pqbbi4nqRIQhUWAVzen8uE8DD/zcZLwf+8sQYO4lwLk= +k8s.io/client-go v0.18.3 h1:QaJzz92tsN67oorwzmoB0a9r9ZVHuD5ryjbCKP0U22k= +k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= +k8s.io/cloud-provider v0.18.3 h1:h3zk/I1+Bkz4Wl5TAcJhK4wT+CYLws5mL1XTxIjkDwE= +k8s.io/cloud-provider v0.18.3/go.mod h1:sZelqNhA+TI+FqV6smLvZ84/DQCNdrEUmdQLneZpfC4= +k8s.io/cluster-bootstrap v0.18.3/go.mod h1:iM3iptIPGNWCvFBvm67JJWaFdYb+7Gzle2bj125ZBy8= +k8s.io/code-generator v0.18.3 h1:5H57pYEbkMMXCLKD16YQH3yDPAbVLweUsB1M3m70D1c= +k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/component-base v0.18.3 h1:QXq+P4lgi4LCIREya1RDr5gTcBaVFhxEcALir3QCSDA= +k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= +k8s.io/cri-api v0.18.3 h1:XDR/4XxbEgalHfKkfwNpk+iIYeBT/dZLnpnZYrm1dbY= +k8s.io/cri-api v0.18.3/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s= +k8s.io/csi-translation-lib v0.18.3/go.mod h1:4UtVGtxPzhtFdadhRCYBL084NvJLNMouCat3UcTbbu0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -964,20 +962,20 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-aggregator v0.18.2/go.mod h1:ijq6FnNUoKinA6kKbkN6svdTacSoQVNtKqmQ1+XJEYQ= -k8s.io/kube-controller-manager v0.18.2/go.mod h1:v45wCqexTrOltgwj92V4ve7hm5f70GQzi4a47/RQ0HQ= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-proxy v0.18.2/go.mod h1:VTgyDMdylYGgHVqLQo/Nt4yDWkh/LRsSnxRiG8GVgDo= -k8s.io/kube-scheduler v0.18.2/go.mod h1:dL+C0Hp/ahQOQK3BsgmV8btb3BtMZvz6ONUw/v1N8sk= -k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4= -k8s.io/kubelet v0.18.2/go.mod h1:7x/nzlIWJLg7vOfmbQ4lgsYazEB0gOhjiYiHK1Gii4M= -k8s.io/kubernetes v1.18.2 h1:37sJPq6p+gx5hEHQSwCWXIiXDc9AajzV1A5UrswnDq0= -k8s.io/kubernetes v1.18.2/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0= -k8s.io/legacy-cloud-providers v0.18.2/go.mod h1:zzFRqgDC6cP1SgPl7lMmo1fjILDZ+bsNtTjLnxAfgI0= -k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg= +k8s.io/kube-aggregator v0.18.3/go.mod h1:fux0WabUOggW2yAACL4jQGVd6kv7mSgBnJ3GgCXCris= +k8s.io/kube-controller-manager v0.18.3/go.mod h1:gKpzON0DWgbn5oNAXrsBJAQR0ztw9GQQ7mBBGVYM7Xk= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-proxy v0.18.3/go.mod h1:Uyqd3mVXhJeNzTmZYW/6N00Bu3kVJ6jzLQQ/T7f8jY0= +k8s.io/kube-scheduler v0.18.3/go.mod h1:55V1fgqzVXEDJB/zkBYjVceixZFQVOVWZwfLrnXt3yA= +k8s.io/kubectl v0.18.3/go.mod h1:k/EpvXBDgEsHBzWr0A44l9+ArvYi3txBBnzXBjQasUQ= +k8s.io/kubelet v0.18.3/go.mod h1:KXTAte7pUtoMyIlysam9g6HIY8C+D5Djd4fZvGXqLtg= +k8s.io/kubernetes v1.18.3 h1:6qtm8v3z+OwYm2SnsTxYUtGCsIbGBZ/Dh9yER+aNIoI= +k8s.io/kubernetes v1.18.3/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M= +k8s.io/legacy-cloud-providers v0.18.3/go.mod h1:ZsvkD18BRzT2PUxvlX4ueqDA2+eM35d0N0GZC4Jynl8= +k8s.io/metrics v0.18.3/go.mod h1:TkuJE3ezDZ1ym8pYkZoEzJB7HDiFE7qxl+EmExEBoPA= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= -k8s.io/sample-apiserver v0.18.2/go.mod h1:qYk6alcVIlWzmypsSmsWw5Kj4eUNr5jzJZZFJDUXwXE= +k8s.io/sample-apiserver v0.18.3/go.mod h1:Un04reJ2OCi73A/ZKrZkMtumJznUl98AM18pfu8bM0g= k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= @@ -993,8 +991,8 @@ pault.ag/go/sniff v0.0.0-20200207005214-cf7e4d167732 h1:SAElp8THCfmBdM+4lmWX5geb pault.ag/go/sniff v0.0.0-20200207005214-cf7e4d167732/go.mod h1:lpvCfhqEHNJSSpG5R5A2EgsVzG8RTt4RfPoQuRAcDmg= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/controller-runtime v0.5.1-0.20200327213554-2d4c4877f906 h1:GmjdjkxJjSpke49jWgDxBsd9uuHFdxEkBntoImFd2D8= -sigs.k8s.io/controller-runtime v0.5.1-0.20200327213554-2d4c4877f906/go.mod h1:j4echH3Y/UPHRpXS65rxGXujda8iWOheMQvDh1uNgaY= +sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM= +sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= diff --git a/hack/verify-lualint.sh b/hack/verify-lualint.sh index 617d35390..1f6048de0 100755 --- a/hack/verify-lualint.sh +++ b/hack/verify-lualint.sh @@ -19,3 +19,5 @@ set -o nounset set -o pipefail luacheck --codes -q rootfs/etc/nginx/lua/ + +find rootfs/etc/nginx/lua/ -name "*.lua" -not -path "*/test/*" -exec lj-releng -L -s {} + && echo "lj-releng validation is success!" diff --git a/images/cfssl/Makefile b/images/cfssl/Makefile index 39acad214..bbc8259ab 100644 --- a/images/cfssl/Makefile +++ b/images/cfssl/Makefile @@ -18,16 +18,12 @@ TAG ?= 0.0 REGISTRY ?= ingress-controller -DOCKER ?= docker -IMGNAME = cfssl -IMAGE = $(REGISTRY)/$(IMGNAME) +IMAGE = $(REGISTRY)/cfssl -container: - $(DOCKER) buildx build \ - --load \ - --platform linux/amd64 \ +image: + docker build \ -t $(IMAGE):$(TAG) rootfs clean: - $(DOCKER) rmi -f $(IMAGE):$(TAG) || true + docker rmi -f $(IMAGE):$(TAG) || true diff --git a/images/cfssl/rootfs/Dockerfile b/images/cfssl/rootfs/Dockerfile index 62db9ec37..5713a9073 100644 --- a/images/cfssl/rootfs/Dockerfile +++ b/images/cfssl/rootfs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM alpine:3.11 +FROM alpine:3.12 RUN echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories RUN apk add --no-cache \ diff --git a/images/e2e-prow/Makefile b/images/e2e-prow/Makefile index e16b9f356..d48dea486 100644 --- a/images/e2e-prow/Makefile +++ b/images/e2e-prow/Makefile @@ -1,22 +1,26 @@ +# Copyright 2018 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + TAG ?=v$(shell date +%m%d%Y)-$(shell git rev-parse --short HEAD) REGISTRY ?= quay.io/kubernetes-ingress-controller -DOCKER ?= docker IMAGE = $(REGISTRY)/e2e-prow -all: docker-build docker-push - -docker-build: - $(DOCKER) buildx build \ - --pull \ - --load \ +.PHONY: image +image: + docker build \ --build-arg K8S_RELEASE=v1.17.0 \ - --build-arg ETCD_VERSION=v3.3.18 \ --build-arg KIND_VERSION=v0.8.0 \ --build-arg GO_VERSION=1.14.2 \ - -t $(IMAGE):$(TAG) . - -docker-push: - $(DOCKER) push $(IMAGE):$(TAG) - $(DOCKER) tag $(IMAGE):$(TAG) $(IMAGE):latest - $(DOCKER) push $(IMAGE):latest + -t $(IMAGE):$(TAG) rootfs diff --git a/images/e2e-prow/Dockerfile b/images/e2e-prow/rootfs/Dockerfile similarity index 84% rename from images/e2e-prow/Dockerfile rename to images/e2e-prow/rootfs/Dockerfile index ae05fc7a4..bc3457511 100644 --- a/images/e2e-prow/Dockerfile +++ b/images/e2e-prow/rootfs/Dockerfile @@ -16,6 +16,9 @@ # unit and integration tests FROM gcr.io/k8s-testimages/bootstrap +FROM k8s.gcr.io/etcd:3.4.3-0 as etcd + +COPY --from=etcd /usr/local/bin/etcd /usr/local/bin/etcd # hint to kubetest that it is in CI ENV KUBETEST_IN_DOCKER="true" @@ -43,7 +46,6 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* ARG K8S_RELEASE -ARG ETCD_VERSION ARG KIND_VERSION ARG GO_VERSION @@ -53,12 +55,6 @@ RUN curl -sSL https://storage.googleapis.com/kubernetes-release/release/${K8S_RE RUN curl -sSL https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/amd64/kube-apiserver -o /usr/local/bin/kube-apiserver \ && chmod +x /usr/local/bin/kube-apiserver -RUN curl -sSL https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz \ - && mkdir -p /tmp/etcd-download \ - && tar xzvf /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1 \ - && cp /tmp/etcd-download/etcd /usr/local/bin \ - && rm -rf /tmp/etcd-download - RUN curl -sSL https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64 -o /usr/local/bin/kind \ && chmod +x /usr/local/bin/kind diff --git a/images/e2e/Dockerfile b/images/e2e/Dockerfile deleted file mode 100644 index 6ad014e23..000000000 --- a/images/e2e/Dockerfile +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2018 The Kubernetes Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM quay.io/kubernetes-ingress-controller/nginx-amd64:5d67794f4fbf38ec6575476de46201b068eabf87 - -ARG GOLANG_VERSION -ARG GOLANG_SHA - -ARG RESTY_CLI_VERSION -ARG RESTY_CLI_SHA - -ARG K8S_RELEASE -ARG ETCD_VERSION -ARG CHART_TESTING_VERSION - -ENV GOPATH /go -ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH - -RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" - -RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf - -RUN apk add --no-cache \ - bash \ - ca-certificates \ - wget \ - make \ - gcc \ - git \ - musl-dev \ - perl \ - python \ - py-crcmod \ - py-pip \ - openssl - -RUN set -eux; \ - apk add --no-cache --virtual .build-deps \ - g++ \ - pkgconfig \ - openssl \ - unzip \ - go \ - ; \ - export \ -# set GOROOT_BOOTSTRAP such that we can actually build Go - GOROOT_BOOTSTRAP="$(go env GOROOT)" \ -# ... and set "cross-building" related vars to the installed system's values so that we create a build targeting the proper arch -# (for example, if our build host is GOARCH=amd64, but our build env/image is GOARCH=386, our build needs GOARCH=386) - GOOS="$(go env GOOS)" \ - GOARCH="$(go env GOARCH)" \ - GOHOSTOS="$(go env GOHOSTOS)" \ - GOHOSTARCH="$(go env GOHOSTARCH)" \ - ; \ -# also explicitly set GO386 and GOARM if appropriate -# https://github.com/docker-library/golang/issues/184 - apkArch="$(apk --print-arch)"; \ - case "$apkArch" in \ - armhf) export GOARM='6' ;; \ - armv7) export GOARM='7' ;; \ - x86) export GO386='387' ;; \ - esac; \ - \ - wget -O go.tgz "https://golang.org/dl/go$GOLANG_VERSION.src.tar.gz"; \ - echo "$GOLANG_SHA *go.tgz" | sha256sum -c -; \ - tar -C /usr/local -xzf go.tgz; \ - rm go.tgz; \ - \ - cd /usr/local/go/src; \ - ./make.bash; \ - \ - rm -rf \ -# https://github.com/golang/go/blob/0b30cf534a03618162d3015c8705dd2231e34703/src/cmd/dist/buildtool.go#L121-L125 - /usr/local/go/pkg/bootstrap \ -# https://golang.org/cl/82095 -# https://github.com/golang/build/blob/e3fe1605c30f6a3fd136b561569933312ede8782/cmd/release/releaselet.go#L56 - /usr/local/go/pkg/obj \ - ; \ - \ - export PATH="/usr/local/go/bin:$PATH"; \ - go version \ - ; \ - url="https://github.com/openresty/resty-cli/archive/v${RESTY_CLI_VERSION}.tar.gz"; \ - wget -O resty_cli.tgz "$url"; \ - echo "${RESTY_CLI_SHA} *resty_cli.tgz" | sha256sum -c -; \ - tar -C /tmp -xzf resty_cli.tgz; \ - rm resty_cli.tgz; \ - mv /tmp/resty-cli-${RESTY_CLI_VERSION}/bin/* /usr/local/bin/; \ - resty -V \ - ; \ - luarocks install luacheck; \ - luarocks install busted \ - ; \ - go get github.com/onsi/ginkgo/ginkgo; \ - go get golang.org/x/lint/golint \ - ; \ - apk del .build-deps; - -RUN wget https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/amd64/kubectl -O /usr/local/bin/kubectl \ - && chmod +x /usr/local/bin/kubectl - -RUN wget https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/amd64/kube-apiserver -O /usr/local/bin/kube-apiserver \ - && chmod +x /usr/local/bin/kube-apiserver - -RUN wget https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -O /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz \ - && mkdir -p /tmp/etcd-download \ - && tar xzvf /tmp/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1 \ - && cp /tmp/etcd-download/etcd /usr/local/bin \ - && rm -rf /tmp/etcd-download - -# Install a YAML Linter -ARG YAML_LINT_VERSION -RUN pip install "yamllint==$YAML_LINT_VERSION" - -# Install Yamale YAML schema validator -ARG YAMALE_VERSION -RUN pip install "yamale==$YAMALE_VERSION" - -RUN wget https://github.com/helm/chart-testing/releases/download/v${CHART_TESTING_VERSION}/chart-testing_${CHART_TESTING_VERSION}_linux_amd64.tar.gz \ - -O /tmp/ct-${CHART_TESTING_VERSION}-linux-amd64.tar.gz \ - && mkdir -p /tmp/ct-download \ - && tar xzvf /tmp/ct-${CHART_TESTING_VERSION}-linux-amd64.tar.gz -C /tmp/ct-download \ - && cp /tmp/ct-download/ct /usr/local/bin \ - && mkdir -p /etc/ct \ - && cp -R /tmp/ct-download/etc/* /etc/ct \ - && rm -rf /tmp/ct-download - -RUN curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash - -RUN curl -sSL -o /usr/local/bin/cfssl https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl_1.4.1_linux_amd64 \ - && curl -sSL -o /usr/local/bin/cfssljson https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssljson_1.4.1_linux_amd64 \ - && chmod +x /usr/local/bin/cfssl* - -WORKDIR $GOPATH diff --git a/images/e2e/Makefile b/images/e2e/Makefile index 65be68de3..78cca8162 100644 --- a/images/e2e/Makefile +++ b/images/e2e/Makefile @@ -17,25 +17,25 @@ REGISTRY ?= quay.io/kubernetes-ingress-controller IMAGE = $(REGISTRY)/e2e -all: docker-build docker-push +HOST_ARCH = $(shell which go >/dev/null 2>&1 && go env GOARCH) +ARCH ?= $(HOST_ARCH) +ifeq ($(ARCH),) + $(error mandatory variable ARCH is empty, either set it when calling the command or make sure 'go env GOARCH' works) +endif -docker-build: - docker buildx build \ - --pull \ - --load \ - --progress plain \ +.PHONY: image +image: + docker build \ + --build-arg TARGETARCH="$(ARCH)" \ --build-arg K8S_RELEASE=v1.15.7 \ - --build-arg ETCD_VERSION=v3.3.18 \ - --build-arg GOLANG_VERSION=1.14.2 \ - --build-arg GOLANG_SHA=98de84e69726a66da7b4e58eac41b99cbe274d7e8906eeb8a5b7eb0aadee7f7c \ --build-arg RESTY_CLI_VERSION=0.25rc2 \ --build-arg RESTY_CLI_SHA=a38d850441384fa037a5922ca012dcce8708d0e4abe34ad2fe4164a01b28bdfb \ --build-arg CHART_TESTING_VERSION=3.0.0-beta.1 \ --build-arg YAML_LINT_VERSION=1.13.0 \ --build-arg YAMALE_VERSION=1.8.0 \ - -t $(IMAGE):$(TAG) . + --build-arg HELM_VERSION=v3.2.0 \ + -t $(IMAGE):$(TAG) rootfs -docker-push: - docker push $(IMAGE):$(TAG) - docker tag $(IMAGE):$(TAG) $(IMAGE):latest - docker push $(IMAGE):latest +.PHONY: show-image +show-image: + echo -n $(IMAGE):$(TAG) diff --git a/images/e2e/rootfs/Dockerfile b/images/e2e/rootfs/Dockerfile new file mode 100644 index 000000000..aa48b802e --- /dev/null +++ b/images/e2e/rootfs/Dockerfile @@ -0,0 +1,115 @@ +# Copyright 2018 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM golang:1.14.3-alpine3.11 as GO +FROM k8s.gcr.io/etcd:3.4.3-0 as etcd + +FROM quay.io/kubernetes-ingress-controller/nginx:e3c49c52f4b74fe47ad65d6f3266a02e8b6b622f + +ARG RESTY_CLI_VERSION +ARG RESTY_CLI_SHA + +ARG K8S_RELEASE +ARG ETCD_VERSION +ARG CHART_TESTING_VERSION + +RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf + +COPY --from=GO /usr/local/go /usr/local/go +COPY --from=etcd /usr/local/bin/etcd /usr/local/bin/etcd + +RUN apk add --no-cache \ + bash \ + ca-certificates \ + wget \ + make \ + gcc \ + git \ + musl-dev \ + perl \ + python \ + py-crcmod \ + py-pip \ + unzip \ + openssl + +ENV GOPATH /go +ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH + +RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" + +RUN go get github.com/onsi/ginkgo/ginkgo golang.org/x/lint/golint + +RUN wget -O /tmp/resty_cli.tgz https://github.com/openresty/resty-cli/archive/v${RESTY_CLI_VERSION}.tar.gz \ + && echo "${RESTY_CLI_SHA} */tmp/resty_cli.tgz" | sha256sum -c - \ + && tar -C /tmp -xzf /tmp/resty_cli.tgz \ + && mv /tmp/resty-cli-${RESTY_CLI_VERSION}/bin/* /usr/local/bin/ \ + && resty -V \ + && rm -rf /tmp/* + +RUN wget -O /tmp/luarocks.tgz https://github.com/luarocks/luarocks/archive/v3.3.1.tar.gz \ + && tar -C /tmp -xzf /tmp/luarocks.tgz \ + && cd /tmp/luarocks* \ + && ./configure \ + && make install + +RUN luarocks install busted \ + && luarocks install luacheck + +ARG BUSTED_VERSION +ARG BUSTED_SHA + +ARG TARGETARCH + +RUN wget -O /usr/local/bin/kubectl \ + https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/${TARGETARCH}/kubectl \ + && chmod +x /usr/local/bin/kubectl + +RUN wget -O /usr/local/bin/kube-apiserver \ + https://storage.googleapis.com/kubernetes-release/release/${K8S_RELEASE}/bin/linux/${TARGETARCH}/kube-apiserver \ + && chmod +x /usr/local/bin/kube-apiserver + +RUN wget -O /tmp/ct-${CHART_TESTING_VERSION}-linux-amd64.tar.gz \ + https://github.com/helm/chart-testing/releases/download/v${CHART_TESTING_VERSION}/chart-testing_${CHART_TESTING_VERSION}_linux_amd64.tar.gz \ + && mkdir -p /tmp/ct-download \ + && tar xzvf /tmp/ct-${CHART_TESTING_VERSION}-linux-amd64.tar.gz -C /tmp/ct-download \ + && rm /tmp/ct-${CHART_TESTING_VERSION}-linux-amd64.tar.gz \ + && cp /tmp/ct-download/ct /usr/local/bin \ + && mkdir -p /etc/ct \ + && cp -R /tmp/ct-download/etc/* /etc/ct \ + && rm -rf /tmp/* + +RUN wget https://raw.githubusercontent.com/openresty/openresty-devel-utils/master/lj-releng -O /usr/local/bin/lj-releng \ + && chmod +x /usr/local/bin/lj-releng + +ARG HELM_VERSION + +RUN wget -O /tmp/helm.tgz https://get.helm.sh/helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz \ + && tar -C /tmp -xzf /tmp/helm.tgz \ + && cp /tmp/linux*/helm /usr/local/bin \ + && rm -rf /tmp/* + +RUN curl -sSL -o /usr/local/bin/cfssl https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl_1.4.1_linux_${TARGETARCH} \ + && curl -sSL -o /usr/local/bin/cfssljson https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssljson_1.4.1_linux_${TARGETARCH} \ + && chmod +x /usr/local/bin/cfssl* + +# Install a YAML Linter +ARG YAML_LINT_VERSION +RUN pip install "yamllint==$YAML_LINT_VERSION" + +# Install Yamale YAML schema validator +ARG YAMALE_VERSION +RUN pip install "yamale==$YAMALE_VERSION" + +WORKDIR $GOPATH diff --git a/images/echo/Dockerfile b/images/echo/Dockerfile deleted file mode 100644 index 7c78e47ba..000000000 --- a/images/echo/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM openresty/openresty:1.15.8.2-alpine - -RUN apk add -U perl curl \ - && opm get bungle/lua-resty-template - -COPY nginx.conf /usr/local/openresty/nginx/conf/nginx.conf diff --git a/images/echo/Makefile b/images/echo/Makefile index 3eee7459b..23be85f98 100644 --- a/images/echo/Makefile +++ b/images/echo/Makefile @@ -18,16 +18,12 @@ TAG ?= 0.0 REGISTRY ?= ingress-controller -DOCKER ?= docker -IMGNAME = echo -IMAGE = $(REGISTRY)/$(IMGNAME) +IMAGE = $(REGISTRY)/echo -container: - $(DOCKER) buildx build \ - --load \ - --platform linux/amd64 \ - -t $(IMAGE):$(TAG) . +image: + docker build \ + -t $(IMAGE):$(TAG) rootfs clean: - $(DOCKER) rmi -f $(IMAGE):$(TAG) || true + docker rmi -f $(IMAGE):$(TAG) || true diff --git a/images/echo/rootfs/Dockerfile b/images/echo/rootfs/Dockerfile new file mode 100644 index 000000000..392da697c --- /dev/null +++ b/images/echo/rootfs/Dockerfile @@ -0,0 +1,14 @@ +FROM quay.io/kubernetes-ingress-controller/nginx:e3c49c52f4b74fe47ad65d6f3266a02e8b6b622f + +RUN apk add -U perl curl make unzip + +RUN wget -O /tmp/luarocks.tgz https://github.com/luarocks/luarocks/archive/v3.3.1.tar.gz \ + && tar -C /tmp -xzf /tmp/luarocks.tgz \ + && cd /tmp/luarocks* \ + && ./configure \ + && make install \ + && rm -rf /tmp/* + +RUN luarocks install lua-resty-template + +COPY nginx.conf /etc/nginx/nginx.conf diff --git a/images/echo/nginx.conf b/images/echo/rootfs/nginx.conf similarity index 99% rename from images/echo/nginx.conf rename to images/echo/rootfs/nginx.conf index 74210120c..8ffdf3f28 100644 --- a/images/echo/nginx.conf +++ b/images/echo/rootfs/nginx.conf @@ -4,8 +4,6 @@ env POD_NAME; env POD_NAMESPACE; env POD_IP; -daemon off; - events { worker_connections 1024; } diff --git a/images/fastcgi-helloserver/Makefile b/images/fastcgi-helloserver/Makefile index 4e1de3aa7..af0eed9fc 100644 --- a/images/fastcgi-helloserver/Makefile +++ b/images/fastcgi-helloserver/Makefile @@ -17,24 +17,31 @@ # Use the 0.0 tag for testing, it shouldn't clobber any release builds TAG ?= 0.0 -REGISTRY ?= ingress-controller -DOCKER ?= docker +HOSTARCH := $(shell uname -m | sed -e s/x86_64/amd64/ \ + -e s/s390x/s390x/ \ + -e s/armv7l/arm/ \ + -e s/aarch64.*/arm64/) -IMGNAME = fastcgi-helloserver -IMAGE = $(REGISTRY)/$(IMGNAME) +ifndef ARCH +ARCH := $(HOSTARCH) +endif +ifeq ($(ARCH),) + $(error mandatory variable ARCH is empty) +endif + +REGISTRY ?= ingress-controller + +IMAGE = $(REGISTRY)/fastcgi-helloserver PKG=k8s.io/ingress-nginx/images/fastcgi-helloserver -container: clean build - $(DOCKER) buildx build \ - --load \ - --platform linux/amd64 \ +.PHONY: image +image: build + docker build \ -t $(IMAGE):$(TAG) rootfs -build: clean - CGO_ENABLED=0 go build -a -installsuffix cgo \ +.PHONY: build +build: + GOARCH=$(ARCH) CGO_ENABLED=0 go build -a -installsuffix cgo \ -ldflags "-s -w" \ -o rootfs/fastcgi-helloserver ${PKG}/... - -clean: - $(DOCKER) rmi -f $(IMAGE):$(TAG) || true diff --git a/images/fastcgi-helloserver/rootfs/Dockerfile b/images/fastcgi-helloserver/rootfs/Dockerfile index 7f66a55bd..b68ef45c3 100755 --- a/images/fastcgi-helloserver/rootfs/Dockerfile +++ b/images/fastcgi-helloserver/rootfs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM k8s.gcr.io/debian-base:v2.0.0 +FROM scratch COPY . / diff --git a/images/httpbin/Makefile b/images/httpbin/Makefile index 2bb40eeb4..6e30e1ada 100644 --- a/images/httpbin/Makefile +++ b/images/httpbin/Makefile @@ -18,16 +18,10 @@ TAG ?= 0.0 REGISTRY ?= ingress-controller -DOCKER ?= docker -IMGNAME = httpbin -IMAGE = $(REGISTRY)/$(IMGNAME) +IMAGE = $(REGISTRY)/httpbin -container: - $(DOCKER) buildx build \ - --load \ - --platform linux/amd64 \ +.PHONY: image +image: + docker build \ -t $(IMAGE):$(TAG) rootfs - -clean: - $(DOCKER) rmi -f $(IMAGE):$(TAG) || true diff --git a/images/httpbin/rootfs/Dockerfile b/images/httpbin/rootfs/Dockerfile index ff0daf964..3405bf68b 100644 --- a/images/httpbin/rootfs/Dockerfile +++ b/images/httpbin/rootfs/Dockerfile @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM alpine:3.11 +FROM alpine:3.12 ENV LC_ALL=C.UTF-8 ENV LANG=C.UTF-8 -RUN echo "@edge http://nl.alpinelinux.org/alpine/edge/main" >> /etc/apk/repositories \ - && apk update \ - && apk add --no-cache \ - python3 python3-dev \ - musl-dev gcc g++ make \ - libffi libffi-dev libstdc++ \ - py3-gevent py3-gunicorn py3-wheel@edge \ +RUN apk update \ + && apk add --no-cache \ + python3 python3-dev \ + musl-dev gcc g++ make \ + libffi libffi-dev libstdc++ \ + py3-gevent py3-gunicorn py3-wheel \ + py3-pip \ && pip3 install httpbin \ && apk del python3-dev musl-dev gcc g++ make libffi-dev diff --git a/images/nginx/Makefile b/images/nginx/Makefile index 0adb3b6d1..1df883e98 100644 --- a/images/nginx/Makefile +++ b/images/nginx/Makefile @@ -12,53 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -.DEFAULT_GOAL:=container +.DEFAULT_GOAL:=image # set default shell SHELL=/bin/bash -o pipefail # 0.0.0 shouldn't clobber any released builds -TAG ?= 0.101 +TAG ?= 0.103 REGISTRY ?= quay.io/kubernetes-ingress-controller -IMGNAME = nginx -IMAGE = $(REGISTRY)/$(IMGNAME) +IMAGE = $(REGISTRY)/nginx -PLATFORMS = amd64 arm arm64 - -EMPTY := -SPACE := $(EMPTY) $(EMPTY) -COMMA := , - -.PHONY: container -container: - DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build \ +.PHONY: image +image: + docker buildx build \ + --pull \ + --push \ --progress plain \ - --platform $(subst $(SPACE),$(COMMA),$(PLATFORMS)) \ + --platform amd64,arm,arm64,s390x \ --tag $(IMAGE):$(TAG) rootfs - # https://github.com/docker/buildx/issues/59 - $(foreach PLATFORM,$(PLATFORMS), \ - DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build \ - --load \ - --progress plain \ - --platform $(PLATFORM) \ - --tag $(IMAGE)-$(PLATFORM):$(TAG) rootfs;) - -.PHONY: push -push: container - $(foreach PLATFORM,$(PLATFORMS), \ - docker push $(IMAGE)-$(PLATFORM):$(TAG);) - -.PHONY: release -release: push - echo "done" - .PHONY: init-docker-buildx init-docker-buildx: ifneq ($(shell docker buildx 2>&1 >/dev/null; echo $?),) $(error "buildx not vailable. Docker 19.03 or higher is required") endif - docker run --rm --privileged docker/binfmt:66f9012c56a8316f9244ffd7622d7c21c1f6f28d + docker run --rm --privileged docker/binfmt:a7996909642ee92942dcd6cff44b9b95f08dad64 docker buildx create --name ingress-nginx --use || true docker buildx inspect --bootstrap diff --git a/images/nginx/rc.yaml b/images/nginx/rc.yaml index d9fa7ef6e..53b16806e 100644 --- a/images/nginx/rc.yaml +++ b/images/nginx/rc.yaml @@ -38,7 +38,7 @@ spec: spec: containers: - name: nginx - image: quay.io/kubernetes-ingress-controller/nginx:0.97 + image: quay.io/kubernetes-ingress-controller/nginx:0.103 ports: - containerPort: 80 - containerPort: 443 diff --git a/images/nginx/rootfs/build.sh b/images/nginx/rootfs/build.sh index bd198ec53..e11b0db80 100755 --- a/images/nginx/rootfs/build.sh +++ b/images/nginx/rootfs/build.sh @@ -14,14 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. - set -o errexit set -o nounset set -o pipefail -export DEBIAN_FRONTEND=noninteractive - -export NGINX_VERSION=1.17.10 +export NGINX_VERSION=1.19.0 export NDK_VERSION=0.3.1rc1 export SETMISC_VERSION=0.32 export MORE_HEADERS_VERSION=0.33 @@ -31,24 +28,29 @@ export NGINX_OPENTRACING_VERSION=0.9.0 export OPENTRACING_CPP_VERSION=1.5.1 export ZIPKIN_CPP_VERSION=0.5.2 export JAEGER_VERSION=0.4.2 -export MSGPACK_VERSION=3.2.0 -export DATADOG_CPP_VERSION=1.1.3 +export MSGPACK_VERSION=3.2.1 +export DATADOG_CPP_VERSION=1.1.5 export MODSECURITY_VERSION=1.0.1 -export MODSECURITY_LIB_VERSION=6624a18a4e7fd9881a7a9b435db3e481e8e986a5 -export OWASP_MODSECURITY_CRS_VERSION=3.2.0 +export MODSECURITY_LIB_VERSION=v3.0.4 +export OWASP_MODSECURITY_CRS_VERSION=v3.2.0 export LUA_NGX_VERSION=0.10.15 export LUA_STREAM_NGX_VERSION=0.0.7 export LUA_UPSTREAM_VERSION=0.07 export LUA_BRIDGE_TRACER_VERSION=0.1.1 +export LUA_CJSON_VERSION=2.1.0.7 export NGINX_INFLUXDB_VERSION=5b09391cb7b9a889687c0aa67964c06a2d933e8b export GEOIP2_VERSION=3.3 export NGINX_AJP_VERSION=bf6cd93f2098b59260de8d494f0f4b1f11a84627 -export RESTY_LUAROCKS_VERSION=3.1.3 -export LUAJIT_VERSION=33b5f86c1b9ab53ad09c33f9097df42403587bea + +export LUAJIT_VERSION=31116c4d25c4283a52b2d87fed50101cf20f5b77 + export LUA_RESTY_BALANCER=0.03 +export LUA_RESTY_CACHE=0.10rc1 export LUA_RESTY_CORE=0.1.17 -export LUA_CJSON_VERSION=2.1.0.7 export LUA_RESTY_COOKIE_VERSION=766ad8c15e498850ac77f5e0265f1d3f30dc4027 +export LUA_RESTY_DNS=0.21 +export LUA_RESTY_HTTP=0.15 +export LUA_RESTY_LOCK=0.08 export BUILD_PATH=/tmp/build @@ -92,7 +94,6 @@ apk add \ alpine-sdk \ findutils \ curl ca-certificates \ - geoip-dev \ patch \ libaio-dev \ openssl \ @@ -102,37 +103,21 @@ apk add \ wget \ curl-dev \ libprotobuf \ - git g++ pkgconf flex bison doxygen yajl-dev lmdb-dev libtool autoconf libxml2 pcre-dev libxml2-dev \ - python \ + git g++ pkgconf flex bison doxygen yajl-dev lmdb-dev libtool autoconf libxml2 libxml2-dev \ + python3 \ libmaxminddb-dev \ bc \ unzip \ - dos2unix mercurial \ + dos2unix \ yaml-cpp mkdir -p /etc/nginx -# Get the GeoIP data -GEOIP_FOLDER=/etc/nginx/geoip -mkdir -p $GEOIP_FOLDER - -function geoip2_get { - wget -O $GEOIP_FOLDER/$1.tar.gz $2 || { echo "Could not download $1, exiting." ; exit 1; } - mkdir $GEOIP_FOLDER/$1 \ - && tar xf $GEOIP_FOLDER/$1.tar.gz -C $GEOIP_FOLDER/$1 --strip-components 1 \ - && mv $GEOIP_FOLDER/$1/$1.mmdb $GEOIP_FOLDER/$1.mmdb \ - && rm -rf $GEOIP_FOLDER/$1 \ - && rm -rf $GEOIP_FOLDER/$1.tar.gz -} - -#geoip2_get "GeoLite2-City" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz" -#geoip2_get "GeoLite2-ASN" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz" - mkdir --verbose -p "$BUILD_PATH" cd "$BUILD_PATH" # download, verify and extract the source files -get_src a9aa73f19c352a6b166d78e2a664bb3ef1295bbe6d3cc5aa7404bd4664ab4b83 \ +get_src 44a616171fcd7d7ad7c6af3e6f3ad0879b54db5a5d21be874cd458b5691e36c8 \ "https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" get_src 49f50d4cd62b166bc1aaf712febec5e028d9f187cedbc27a610dfd01bdde2d36 \ @@ -165,7 +150,7 @@ get_src c969a78659bb47c84929de0b9adc1f8c512a51ec9dd3b162cb568ae228d3d59e \ get_src 21257af93a64fee42c04ca6262d292b2e4e0b7b0660c511db357b32fd42ef5d3 \ "https://github.com/jaegertracing/jaeger-client-cpp/archive/v$JAEGER_VERSION.tar.gz" -get_src ff865a36bad5c72b8e7ebc4b7cf5f27a820fce4faff9c571c1791e3728355a39 \ +get_src 464f46744a6be778626d11452c4db3c2d09461080c6db42e358e21af19d542f6 \ "https://github.com/msgpack/msgpack-c/archive/cpp-$MSGPACK_VERSION.tar.gz" get_src 7d5f3439c8df56046d0564b5857fd8a30296ab1bd6df0f048aed7afb56a0a4c2 \ @@ -177,10 +162,10 @@ get_src 99c47c75c159795c9faf76bbb9fa58e5a50b75286c86565ffcec8514b1c74bf9 \ get_src 2a69815e4ae01aa8b170941a8e1a10b6f6a9aab699dee485d58f021dd933829a \ "https://github.com/openresty/lua-upstream-nginx-module/archive/v$LUA_UPSTREAM_VERSION.tar.gz" -get_src 3b43917a155b81b7d20fdbb3c1be4419626286616195ad426bff1f2f59aa3659 \ +get_src 82bf1af1ee89887648b53c9df566f8b52ec10400f1641c051970a7540b7bf06a \ "https://github.com/openresty/luajit2/archive/$LUAJIT_VERSION.tar.gz" -get_src 6dc1088ab7f788b6c849fbaa6300517c8fdf88991a70b778be79c284c36857bf \ +get_src b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924 \ "https://github.com/DataDog/dd-opentracing-cpp/archive/v$DATADOG_CPP_VERSION.tar.gz" get_src 6faab57557bd9cc9fc38208f6bc304c1c13cf048640779f98812cf1f9567e202 \ @@ -195,9 +180,6 @@ get_src 41378438c833e313a18869d0c4a72704b4835c30acaf7fd68013ab6732ff78a7 \ get_src 5f629a50ba22347c441421091da70fdc2ac14586619934534e5a0f8a1390a950 \ "https://github.com/yaoweibin/nginx_ajp_module/archive/$NGINX_AJP_VERSION.tar.gz" -get_src c573435f495aac159e34eaa0a3847172a2298eb6295fcdc35d565f9f9b990513 \ - "https://luarocks.github.io/luarocks/releases/luarocks-${RESTY_LUAROCKS_VERSION}.tar.gz" - get_src 5d16e623d17d4f42cc64ea9cfb69ca960d313e12f5d828f785dd227cc483fcbd \ "https://github.com/openresty/lua-resty-upload/archive/v0.10.tar.gz" @@ -216,6 +198,19 @@ get_src 59d2f18ecadba48be61061004c8664eaed1111a3372cd2567cb24c5a47eb41fe \ get_src f818b5cef0881e5987606f2acda0e491531a0cb0c126d8dca02e2343edf641ef \ "https://github.com/cloudflare/lua-resty-cookie/archive/$LUA_RESTY_COOKIE_VERSION.tar.gz" +get_src f6b57d83a937899f97a98372c1e2631dd1ab8f580fc0ffeac0b27b4d42225a99 \ + "https://github.com/openresty/lua-resty-lrucache/archive/v$LUA_RESTY_CACHE.tar.gz" + +get_src 2b4683f9abe73e18ca00345c65010c9056777970907a311d6e1699f753141de2 \ + "https://github.com/openresty/lua-resty-lock/archive/v$LUA_RESTY_LOCK.tar.gz" + +get_src 4aca34f324d543754968359672dcf5f856234574ee4da360ce02c778d244572a \ + "https://github.com/openresty/lua-resty-dns/archive/v$LUA_RESTY_DNS.tar.gz" + +get_src 987d5754a366d3ccbf745d2765f82595dcff5b94ba6c755eeb6d310447996f32 \ + "https://github.com/ledgetech/lua-resty-http/archive/v$LUA_RESTY_HTTP.tar.gz" + + # improve compilation times CORES=$(($(grep -c ^processor /proc/cpuinfo) - 0)) @@ -233,8 +228,13 @@ cd "$BUILD_PATH/luajit2-$LUAJIT_VERSION" make CCDEBUG=-g make install +ln -s /usr/local/bin/luajit /usr/local/bin/lua + cd "$BUILD_PATH" +# Git tuning +git config --global --add core.compression -1 + # install openresty-gdb-utils cd / git clone --depth=1 https://github.com/openresty/openresty-gdb-utils.git @@ -353,7 +353,7 @@ git submodule init git submodule update cd "$BUILD_PATH" -git clone https://github.com/ssdeep-project/ssdeep +git clone --depth=1 https://github.com/ssdeep-project/ssdeep cd ssdeep/ ./bootstrap @@ -364,9 +364,8 @@ make install # build modsecurity library cd "$BUILD_PATH" -git clone https://github.com/SpiderLabs/ModSecurity +git clone --depth=1 -b $MODSECURITY_LIB_VERSION https://github.com/SpiderLabs/ModSecurity cd ModSecurity/ -git checkout $MODSECURITY_LIB_VERSION git submodule init git submodule update @@ -393,7 +392,7 @@ echo "SecAuditLogStorageDir /var/log/audit/" >> /etc/nginx/modsecurity/modsecuri # Download owasp modsecurity crs cd /etc/nginx/ -git clone -b v$OWASP_MODSECURITY_CRS_VERSION https://github.com/SpiderLabs/owasp-modsecurity-crs +git clone -b $OWASP_MODSECURITY_CRS_VERSION https://github.com/SpiderLabs/owasp-modsecurity-crs cd owasp-modsecurity-crs mv crs-setup.conf.example crs-setup.conf @@ -533,22 +532,6 @@ WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \ make make install -cd "$BUILD_PATH/luarocks-${RESTY_LUAROCKS_VERSION}" -./configure \ - --lua-suffix=jit-2.1.0-beta3 \ - --with-lua-include=/usr/local/include/luajit-2.1 - -make -make install - -export LUA_INCLUDE_DIR=/usr/local/include/luajit-2.1 - -ln -s $LUA_INCLUDE_DIR /usr/include/lua5.1 - -if [[ ${ARCH} != "armv7l" ]]; then - luarocks install lrexlib-pcre 2.7.2-1 -fi - cd "$BUILD_PATH/lua-resty-core-$LUA_RESTY_CORE" make install @@ -556,6 +539,9 @@ cd "$BUILD_PATH/lua-resty-balancer-$LUA_RESTY_BALANCER" make all make install +export LUA_INCLUDE_DIR=/usr/local/include/luajit-2.1 +ln -s $LUA_INCLUDE_DIR /usr/include/lua5.1 + cd "$BUILD_PATH/lua-cjson-$LUA_CJSON_VERSION" make all make install @@ -564,13 +550,18 @@ cd "$BUILD_PATH/lua-resty-cookie-$LUA_RESTY_COOKIE_VERSION" make all make install -luarocks install lua-resty-iputils 0.3.0-1 -luarocks install lua-resty-lrucache 0.09-2 -luarocks install lua-resty-lock 0.08-0 -luarocks install lua-resty-dns 0.21-1 +cd "$BUILD_PATH/lua-resty-lrucache-$LUA_RESTY_CACHE" +make install + +cd "$BUILD_PATH/lua-resty-dns-$LUA_RESTY_DNS" +make install + +cd "$BUILD_PATH/lua-resty-lock-$LUA_RESTY_LOCK" +make install # required for OCSP verification -luarocks install lua-resty-http +cd "$BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP" +make install cd "$BUILD_PATH/lua-resty-upload-0.10" make install @@ -588,9 +579,8 @@ make install # mimalloc cd "$BUILD_PATH" -git clone https://github.com/microsoft/mimalloc +git clone --depth=1 -b v1.6.3 https://github.com/microsoft/mimalloc cd mimalloc -git checkout v1.6.2 mkdir -p out/release cd out/release diff --git a/internal/ingress/annotations/annotations.go b/internal/ingress/annotations/annotations.go index d46a09ffa..7fe64efd8 100644 --- a/internal/ingress/annotations/annotations.go +++ b/internal/ingress/annotations/annotations.go @@ -108,7 +108,7 @@ type Ingress struct { UpstreamVhost string Whitelist ipwhitelist.SourceRange XForwardedPrefix string - SSLCiphers string + SSLCipher sslcipher.Config Logs log.Config InfluxDB influxdb.Config ModSecurity modsecurity.Config @@ -156,7 +156,7 @@ func NewAnnotationExtractor(cfg resolver.Resolver) Extractor { "UpstreamVhost": upstreamvhost.NewParser(cfg), "Whitelist": ipwhitelist.NewParser(cfg), "XForwardedPrefix": xforwardedprefix.NewParser(cfg), - "SSLCiphers": sslcipher.NewParser(cfg), + "SSLCipher": sslcipher.NewParser(cfg), "Logs": log.NewParser(cfg), "InfluxDB": influxdb.NewParser(cfg), "BackendProtocol": backendprotocol.NewParser(cfg), diff --git a/internal/ingress/annotations/sslcipher/main.go b/internal/ingress/annotations/sslcipher/main.go index 267694fef..d100a0da4 100644 --- a/internal/ingress/annotations/sslcipher/main.go +++ b/internal/ingress/annotations/sslcipher/main.go @@ -27,13 +27,36 @@ type sslCipher struct { r resolver.Resolver } +// Config contains the ssl-ciphers & ssl-prefer-server-ciphers configuration +type Config struct { + SSLCiphers string + SSLPreferServerCiphers string +} + // NewParser creates a new sslCipher annotation parser func NewParser(r resolver.Resolver) parser.IngressAnnotation { return sslCipher{r} } // Parse parses the annotations contained in the ingress rule -// used to add ssl-ciphers to the server name +// used to add ssl-ciphers & ssl-prefer-server-ciphers to the server name func (sc sslCipher) Parse(ing *networking.Ingress) (interface{}, error) { - return parser.GetStringAnnotation("ssl-ciphers", ing) + config := &Config{} + var err error + var sslPreferServerCiphers bool + + sslPreferServerCiphers, err = parser.GetBoolAnnotation("ssl-prefer-server-ciphers", ing) + if err != nil { + config.SSLPreferServerCiphers = "" + } else { + if sslPreferServerCiphers { + config.SSLPreferServerCiphers = "on" + } else { + config.SSLPreferServerCiphers = "off" + } + } + + config.SSLCiphers, _ = parser.GetStringAnnotation("ssl-ciphers", ing) + + return config, nil } diff --git a/internal/ingress/annotations/sslcipher/main_test.go b/internal/ingress/annotations/sslcipher/main_test.go index dbb0f500f..8110697dc 100644 --- a/internal/ingress/annotations/sslcipher/main_test.go +++ b/internal/ingress/annotations/sslcipher/main_test.go @@ -17,6 +17,7 @@ limitations under the License. package sslcipher import ( + "reflect" "testing" api "k8s.io/api/core/v1" @@ -27,22 +28,27 @@ import ( ) func TestParse(t *testing.T) { - annotation := parser.GetAnnotationWithPrefix("ssl-ciphers") ap := NewParser(&resolver.Mock{}) if ap == nil { t.Fatalf("expected a parser.IngressAnnotation but returned nil") } + annotationSSLCiphers := parser.GetAnnotationWithPrefix("ssl-ciphers") + annotationSSLPreferServerCiphers := parser.GetAnnotationWithPrefix("ssl-prefer-server-ciphers") + testCases := []struct { annotations map[string]string - expected string + expected Config }{ - {map[string]string{annotation: "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP"}, "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP"}, - {map[string]string{annotation: "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"}, - "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"}, - {map[string]string{annotation: ""}, ""}, - {map[string]string{}, ""}, - {nil, ""}, + {map[string]string{annotationSSLCiphers: "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP"}, Config{"ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP", ""}}, + {map[string]string{annotationSSLCiphers: "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"}, + Config{"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256", ""}}, + {map[string]string{annotationSSLCiphers: ""}, Config{"", ""}}, + {map[string]string{annotationSSLPreferServerCiphers: "true"}, Config{"", "on"}}, + {map[string]string{annotationSSLPreferServerCiphers: "false"}, Config{"", "off"}}, + {map[string]string{annotationSSLCiphers: "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP", annotationSSLPreferServerCiphers: "true"}, Config{"ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP", "on"}}, + {map[string]string{}, Config{"", ""}}, + {nil, Config{"", ""}}, } ing := &networking.Ingress{ @@ -56,7 +62,7 @@ func TestParse(t *testing.T) { for _, testCase := range testCases { ing.SetAnnotations(testCase.annotations) result, _ := ap.Parse(ing) - if result != testCase.expected { + if !reflect.DeepEqual(result, &testCase.expected) { t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) } } diff --git a/internal/ingress/annotations/upstreamhashby/main_test.go b/internal/ingress/annotations/upstreamhashby/main_test.go index e67803e51..5a71be56f 100644 --- a/internal/ingress/annotations/upstreamhashby/main_test.go +++ b/internal/ingress/annotations/upstreamhashby/main_test.go @@ -39,6 +39,7 @@ func TestParse(t *testing.T) { expected string }{ {map[string]string{annotation: "$request_uri"}, "$request_uri"}, + {map[string]string{annotation: "$request_uri$scheme"}, "$request_uri$scheme"}, {map[string]string{annotation: "false"}, "false"}, {map[string]string{}, ""}, {nil, ""}, diff --git a/internal/ingress/controller/config/config.go b/internal/ingress/controller/config/config.go index 65e20af7e..10305c2ee 100644 --- a/internal/ingress/controller/config/config.go +++ b/internal/ingress/controller/config/config.go @@ -111,11 +111,20 @@ type Configuration struct { // By default this is disabled EnableAccessLogForDefaultBackend bool `json:"enable-access-log-for-default-backend"` - // AccessLogPath sets the path of the access logs if enabled + // AccessLogPath sets the path of the access logs for both http and stream contexts if enabled // http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log + // http://nginx.org/en/docs/stream/ngx_stream_log_module.html#access_log // By default access logs go to /var/log/nginx/access.log AccessLogPath string `json:"access-log-path,omitempty"` + // HttpAccessLogPath sets the path of the access logs for http context globally if enabled + // http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log + HttpAccessLogPath string `json:"http-access-log-path,omitempty"` + + // StreamAccessLogPath sets the path of the access logs for stream context globally if enabled + // http://nginx.org/en/docs/stream/ngx_stream_log_module.html#access_log + StreamAccessLogPath string `json:"stream-access-log-path,omitempty"` + // WorkerCPUAffinity bind nginx worker processes to CPUs this will improve response latency // http://nginx.org/en/docs/ngx_core_module.html#worker_cpu_affinity // By default this is disabled diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index 8ce417a55..59e9b25d1 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -1053,8 +1053,9 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, Locations: []*ingress.Location{ loc, }, - SSLPassthrough: anns.SSLPassthrough, - SSLCiphers: anns.SSLCiphers, + SSLPassthrough: anns.SSLPassthrough, + SSLCiphers: anns.SSLCipher.SSLCiphers, + SSLPreferServerCiphers: anns.SSLCipher.SSLPreferServerCiphers, } } } @@ -1094,8 +1095,13 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, } // only add SSL ciphers if the server does not have them previously configured - if servers[host].SSLCiphers == "" && anns.SSLCiphers != "" { - servers[host].SSLCiphers = anns.SSLCiphers + if servers[host].SSLCiphers == "" && anns.SSLCipher.SSLCiphers != "" { + servers[host].SSLCiphers = anns.SSLCipher.SSLCiphers + } + + // only add SSLPreferServerCiphers if the server does not have them previously configured + if servers[host].SSLPreferServerCiphers == "" && anns.SSLCipher.SSLPreferServerCiphers != "" { + servers[host].SSLPreferServerCiphers = anns.SSLCipher.SSLPreferServerCiphers } // only add a certificate if the server does not have one previously configured diff --git a/internal/ingress/controller/nginx.go b/internal/ingress/controller/nginx.go index d697e0c51..d270a6c30 100644 --- a/internal/ingress/controller/nginx.go +++ b/internal/ingress/controller/nginx.go @@ -343,17 +343,10 @@ func (n *NGINXController) Start() { // issues because of this behavior. // To avoid this issue we restart nginx in case of errors. if process.IsRespawnIfRequired(err) { - process.WaitUntilPortIsAvailable(n.cfg.ListenPorts.HTTP) // release command resources - cmd.Process.Release() - // start a new nginx master process if the controller is not being stopped - cmd = n.command.ExecCommand() - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - Pgid: 0, - } - n.start(cmd) + return } + case event := <-n.updateCh.Out(): if n.isShuttingDown { break diff --git a/internal/ingress/controller/process/nginx.go b/internal/ingress/controller/process/nginx.go index a0aa44d90..69314441f 100644 --- a/internal/ingress/controller/process/nginx.go +++ b/internal/ingress/controller/process/nginx.go @@ -17,14 +17,9 @@ limitations under the License. package process import ( - "fmt" - "net" - "os" "os/exec" "syscall" - "time" - "github.com/ncabatoff/process-exporter/proc" "k8s.io/klog" ) @@ -43,41 +38,3 @@ NGINX master process died (%v): %v `, waitStatus.ExitStatus(), err) return true } - -// WaitUntilPortIsAvailable waits until there is no NGINX master or worker -// process/es listening in a particular port. -func WaitUntilPortIsAvailable(port int) { - // we wait until the workers are killed - for { - conn, err := net.DialTimeout("tcp", fmt.Sprintf("0.0.0.0:%v", port), 1*time.Second) - if err != nil { - break - } - conn.Close() - // kill nginx worker processes - fs, err := proc.NewFS("/proc", false) - if err != nil { - klog.Errorf("unexpected error reading /proc information: %v", err) - continue - } - - procs, _ := fs.FS.AllProcs() - for _, p := range procs { - pn, err := p.Comm() - if err != nil { - klog.Errorf("unexpected error obtaining process information: %v", err) - continue - } - - if pn == "nginx" { - osp, err := os.FindProcess(p.PID) - if err != nil { - klog.Errorf("unexpected error obtaining process information: %v", err) - continue - } - osp.Signal(syscall.SIGQUIT) - } - } - time.Sleep(100 * time.Millisecond) - } -} diff --git a/internal/ingress/controller/store/store.go b/internal/ingress/controller/store/store.go index b1b4cf131..bacf507f8 100644 --- a/internal/ingress/controller/store/store.go +++ b/internal/ingress/controller/store/store.go @@ -261,10 +261,24 @@ func New( store.listers.IngressWithAnnotation.Store = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + // As we currently do not filter out kubernetes objects we list, we can + // retrieve a huge amount of data from the API server. + // In a cluster using HELM < v3 configmaps are used to store binary data. + // If you happen to have a lot of HELM releases in the cluster it will make + // the memory consumption of nginx-ingress-controller explode. + // In order to avoid that we filter out labels OWNER=TILLER. + tweakListOptionsFunc := func(options *metav1.ListOptions) { + if len(options.LabelSelector) > 0 { + options.LabelSelector += ",OWNER!=TILLER" + } else { + options.LabelSelector = "OWNER!=TILLER" + } + } + // create informers factory, enable and assign required informers infFactory := informers.NewSharedInformerFactoryWithOptions(client, resyncPeriod, informers.WithNamespace(namespace), - informers.WithTweakListOptions(func(*metav1.ListOptions) {})) + informers.WithTweakListOptions(tweakListOptionsFunc)) if k8s.IsNetworkingIngressAvailable { store.informers.Ingress = infFactory.Networking().V1beta1().Ingresses().Informer() diff --git a/internal/ingress/controller/template/template.go b/internal/ingress/controller/template/template.go index 9861377f2..3666defbf 100644 --- a/internal/ingress/controller/template/template.go +++ b/internal/ingress/controller/template/template.go @@ -1229,18 +1229,17 @@ func commonListenOptions(template config.TemplateConfig, hostname string) string func httpListener(addresses []string, co string, tc config.TemplateConfig) []string { out := make([]string, 0) for _, address := range addresses { - l := make([]string, 0) - l = append(l, "listen") + lo := []string{"listen"} if address == "" { - l = append(l, fmt.Sprintf("%v", tc.ListenPorts.HTTP)) + lo = append(lo, fmt.Sprintf("%v", tc.ListenPorts.HTTP)) } else { - l = append(l, fmt.Sprintf("%v:%v", address, tc.ListenPorts.HTTP)) + lo = append(lo, fmt.Sprintf("%v:%v", address, tc.ListenPorts.HTTP)) } - l = append(l, co) - l = append(l, ";") - out = append(out, strings.Join(l, " ")) + lo = append(lo, co) + lo = append(lo, ";") + out = append(out, strings.Join(lo, " ")) } return out @@ -1249,38 +1248,35 @@ func httpListener(addresses []string, co string, tc config.TemplateConfig) []str func httpsListener(addresses []string, co string, tc config.TemplateConfig) []string { out := make([]string, 0) for _, address := range addresses { - l := make([]string, 0) - l = append(l, "listen") + lo := []string{"listen"} if tc.IsSSLPassthroughEnabled { if address == "" { - l = append(l, fmt.Sprintf("%v", tc.ListenPorts.SSLProxy)) + lo = append(lo, fmt.Sprintf("%v", tc.ListenPorts.SSLProxy)) } else { - l = append(l, fmt.Sprintf("%v:%v", address, tc.ListenPorts.SSLProxy)) + lo = append(lo, fmt.Sprintf("%v:%v", address, tc.ListenPorts.SSLProxy)) } - l = append(l, "proxy_protocol") + if !strings.Contains(co, "proxy_protocol") { + lo = append(lo, "proxy_protocol") + } } else { if address == "" { - l = append(l, fmt.Sprintf("%v", tc.ListenPorts.HTTPS)) + lo = append(lo, fmt.Sprintf("%v", tc.ListenPorts.HTTPS)) } else { - l = append(l, fmt.Sprintf("%v:%v", address, tc.ListenPorts.HTTPS)) - } - - if tc.Cfg.UseProxyProtocol { - l = append(l, "proxy_protocol") + lo = append(lo, fmt.Sprintf("%v:%v", address, tc.ListenPorts.HTTPS)) } } - l = append(l, co) - l = append(l, "ssl") + lo = append(lo, co) + lo = append(lo, "ssl") if tc.Cfg.UseHTTP2 { - l = append(l, "http2") + lo = append(lo, "http2") } - l = append(l, ";") - out = append(out, strings.Join(l, " ")) + lo = append(lo, ";") + out = append(out, strings.Join(lo, " ")) } return out diff --git a/internal/ingress/types.go b/internal/ingress/types.go index dcf56a015..e082f0d62 100644 --- a/internal/ingress/types.go +++ b/internal/ingress/types.go @@ -200,6 +200,9 @@ type Server struct { ServerSnippet string `json:"serverSnippet"` // SSLCiphers returns list of ciphers to be enabled SSLCiphers string `json:"sslCiphers,omitempty"` + // SSLPreferServerCiphers indicates that server ciphers should be preferred + // over client ciphers when using the SSLv3 and TLS protocols. + SSLPreferServerCiphers string `sslPreferServerCiphers,omitempty` // AuthTLSError contains the reason why the access to a server should be denied AuthTLSError string `json:"authTLSError,omitempty"` } diff --git a/internal/ingress/types_equals.go b/internal/ingress/types_equals.go index 8ea8fba0f..358bdd248 100644 --- a/internal/ingress/types_equals.go +++ b/internal/ingress/types_equals.go @@ -308,6 +308,9 @@ func (s1 *Server) Equal(s2 *Server) bool { if s1.SSLCiphers != s2.SSLCiphers { return false } + if s1.SSLPreferServerCiphers != s2.SSLPreferServerCiphers { + return false + } if s1.AuthTLSError != s2.AuthTLSError { return false } diff --git a/internal/runtime/cpu.go b/internal/runtime/cpu_linux.go similarity index 99% rename from internal/runtime/cpu.go rename to internal/runtime/cpu_linux.go index f2f9377c6..41b969df1 100644 --- a/internal/runtime/cpu.go +++ b/internal/runtime/cpu_linux.go @@ -1,3 +1,5 @@ +// +build linux + /* Copyright 2018 The Kubernetes Authors. diff --git a/internal/runtime/cpu_notlinux.go b/internal/runtime/cpu_notlinux.go new file mode 100644 index 000000000..86a649e62 --- /dev/null +++ b/internal/runtime/cpu_notlinux.go @@ -0,0 +1,28 @@ +// +build !linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "runtime" +) + +// NumCPU ... +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/mkdocs.yml b/mkdocs.yml index 1dc7ffab3..dc35a6838 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,4 @@ site_name: NGINX Ingress Controller -strict: true repo_name: "kubernetes/ingress-nginx" repo_url: https://github.com/kubernetes/ingress-nginx site_url: https://kubernetes.github.io/ingress-nginx diff --git a/rootfs/.dockerignore b/rootfs/.dockerignore new file mode 100644 index 000000000..4a246ec6c --- /dev/null +++ b/rootfs/.dockerignore @@ -0,0 +1,2 @@ +Dockerfile +.dockerignore diff --git a/rootfs/.gitignore b/rootfs/.gitignore new file mode 100644 index 000000000..582ec459a --- /dev/null +++ b/rootfs/.gitignore @@ -0,0 +1,2 @@ +bin/* + diff --git a/rootfs/Dockerfile b/rootfs/Dockerfile index 5b6116e91..4141ff4e9 100644 --- a/rootfs/Dockerfile +++ b/rootfs/Dockerfile @@ -16,6 +16,7 @@ ARG BASE_IMAGE FROM ${BASE_IMAGE} +ARG TARGETARCH ARG VERSION LABEL org.opencontainers.image.title="NGINX Ingress Controller for Kubernetes" @@ -29,12 +30,16 @@ WORKDIR /etc/nginx RUN apk update \ && apk upgrade \ - && apk add -U --no-cache \ + && apk add --no-cache \ diffutils \ - libcap \ && rm -rf /var/cache/apk/* -COPY --chown=www-data:www-data . / +COPY --chown=www-data:www-data etc /etc +COPY --chown=www-data:www-data ingress-controller /ingress-controller + +COPY --chown=www-data:www-data bin/${TARGETARCH}/dbg / +COPY --chown=www-data:www-data bin/${TARGETARCH}/nginx-ingress-controller / +COPY --chown=www-data:www-data bin/${TARGETARCH}/wait-shutdown / # Fix permission during the build to avoid issues at runtime # with volumes (custom templates) @@ -51,11 +56,12 @@ RUN bash -xeu -c ' \ chown -R www-data.www-data ${dir}; \ done' -RUN setcap cap_net_bind_service=+ep /nginx-ingress-controller \ - && setcap -v cap_net_bind_service=+ep /nginx-ingress-controller - -RUN setcap cap_net_bind_service=+ep /usr/local/nginx/sbin/nginx \ - && setcap -v cap_net_bind_service=+ep /usr/local/nginx/sbin/nginx +RUN apk add --no-cache libcap \ + && setcap cap_net_bind_service=+ep /nginx-ingress-controller \ + && setcap -v cap_net_bind_service=+ep /nginx-ingress-controller \ + && setcap cap_net_bind_service=+ep /usr/local/nginx/sbin/nginx \ + && setcap -v cap_net_bind_service=+ep /usr/local/nginx/sbin/nginx \ + && apk del libcap USER www-data diff --git a/rootfs/etc/nginx/lua/balancer.lua b/rootfs/etc/nginx/lua/balancer.lua index 1661735b7..bd6f264f2 100644 --- a/rootfs/etc/nginx/lua/balancer.lua +++ b/rootfs/etc/nginx/lua/balancer.lua @@ -16,7 +16,7 @@ local getmetatable = getmetatable local tostring = tostring local pairs = pairs local math = math - +local ngx = ngx -- measured in seconds -- for an Nginx worker to pick up the new list of upstream peers @@ -305,11 +305,11 @@ function _M.log() balancer:after_balance() end -if _TEST then - _M.get_implementation = get_implementation - _M.sync_backend = sync_backend - _M.route_to_alternative_balancer = route_to_alternative_balancer - _M.get_balancer = get_balancer -end +setmetatable(_M, {__index = { + get_implementation = get_implementation, + sync_backend = sync_backend, + route_to_alternative_balancer = route_to_alternative_balancer, + get_balancer = get_balancer, +}}) return _M diff --git a/rootfs/etc/nginx/lua/balancer/chash.lua b/rootfs/etc/nginx/lua/balancer/chash.lua index 3eddab9e5..1ca6140f4 100644 --- a/rootfs/etc/nginx/lua/balancer/chash.lua +++ b/rootfs/etc/nginx/lua/balancer/chash.lua @@ -1,14 +1,23 @@ local balancer_resty = require("balancer.resty") local resty_chash = require("resty.chash") local util = require("util") +local ngx_log = ngx.log +local ngx_ERR = ngx.ERR +local setmetatable = setmetatable local _M = balancer_resty:new({ factory = resty_chash, name = "chash" }) function _M.new(self, backend) local nodes = util.get_nodes(backend.endpoints) + local complex_val, err = + util.parse_complex_value(backend["upstreamHashByConfig"]["upstream-hash-by"]) + if err ~= nil then + ngx_log(ngx_ERR, "could not parse the value of the upstream-hash-by: ", err) + end + local o = { instance = self.factory:new(nodes), - hash_by = backend["upstreamHashByConfig"]["upstream-hash-by"], + hash_by = complex_val, traffic_shaping_policy = backend.trafficShapingPolicy, alternative_backends = backend.alternativeBackends, } @@ -18,7 +27,7 @@ function _M.new(self, backend) end function _M.balance(self) - local key = util.lua_ngx_var(self.hash_by) + local key = util.generate_var_value(self.hash_by) return self.instance:find(key) end diff --git a/rootfs/etc/nginx/lua/balancer/chashsubset.lua b/rootfs/etc/nginx/lua/balancer/chashsubset.lua index 9599378d6..28c2354a1 100644 --- a/rootfs/etc/nginx/lua/balancer/chashsubset.lua +++ b/rootfs/etc/nginx/lua/balancer/chashsubset.lua @@ -3,6 +3,13 @@ local resty_chash = require("resty.chash") local util = require("util") +local ngx_log = ngx.log +local ngx_ERR = ngx.ERR +local setmetatable = setmetatable +local tostring = tostring +local math = math +local table = table +local pairs = pairs local _M = { name = "chashsubset" } @@ -44,10 +51,15 @@ end function _M.new(self, backend) local subset_map, subsets = build_subset_map(backend) + local complex_val, err = + util.parse_complex_value(backend["upstreamHashByConfig"]["upstream-hash-by"]) + if err ~= nil then + ngx_log(ngx_ERR, "could not parse the value of the upstream-hash-by: ", err) + end local o = { instance = resty_chash:new(subset_map), - hash_by = backend["upstreamHashByConfig"]["upstream-hash-by"], + hash_by = complex_val, subsets = subsets, current_endpoints = backend.endpoints } @@ -57,7 +69,7 @@ function _M.new(self, backend) end function _M.balance(self) - local key = util.lua_ngx_var(self.hash_by) + local key = util.generate_var_value(self.hash_by) local subset_id = self.instance:find(key) local endpoints = self.subsets[subset_id] local endpoint = endpoints[math.random(#endpoints)] diff --git a/rootfs/etc/nginx/lua/balancer/ewma.lua b/rootfs/etc/nginx/lua/balancer/ewma.lua index 5102cf3a2..c3b4568ab 100644 --- a/rootfs/etc/nginx/lua/balancer/ewma.lua +++ b/rootfs/etc/nginx/lua/balancer/ewma.lua @@ -9,6 +9,14 @@ local resty_lock = require("resty.lock") local util = require("util") local split = require("util.split") +local ngx = ngx +local math = math +local pairs = pairs +local ipairs = ipairs +local tostring = tostring +local string = string +local tonumber = tonumber +local setmetatable = setmetatable local string_format = string.format local ngx_log = ngx.log local INFO = ngx.INFO @@ -185,7 +193,8 @@ function _M.after_balance(_) end function _M.sync(self, backend) - local normalized_endpoints_added, normalized_endpoints_removed = util.diff_endpoints(self.peers, backend.endpoints) + local normalized_endpoints_added, normalized_endpoints_removed = + util.diff_endpoints(self.peers, backend.endpoints) if #normalized_endpoints_added == 0 and #normalized_endpoints_removed == 0 then ngx.log(ngx.INFO, "endpoints did not change for backend " .. tostring(backend.name)) diff --git a/rootfs/etc/nginx/lua/balancer/resty.lua b/rootfs/etc/nginx/lua/balancer/resty.lua index a4090c4c0..c1065ff19 100644 --- a/rootfs/etc/nginx/lua/balancer/resty.lua +++ b/rootfs/etc/nginx/lua/balancer/resty.lua @@ -3,6 +3,7 @@ local util = require("util") local string_format = string.format local ngx_log = ngx.log local INFO = ngx.INFO +local setmetatable = setmetatable local _M = {} diff --git a/rootfs/etc/nginx/lua/balancer/round_robin.lua b/rootfs/etc/nginx/lua/balancer/round_robin.lua index 46641363e..7993a1831 100644 --- a/rootfs/etc/nginx/lua/balancer/round_robin.lua +++ b/rootfs/etc/nginx/lua/balancer/round_robin.lua @@ -2,6 +2,8 @@ local balancer_resty = require("balancer.resty") local resty_roundrobin = require("resty.roundrobin") local util = require("util") +local setmetatable = setmetatable + local _M = balancer_resty:new({ factory = resty_roundrobin, name = "round_robin" }) function _M.new(self, backend) diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index e97b0a8dc..45ea9beaf 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -4,6 +4,13 @@ local ngx_balancer = require("ngx.balancer") local split = require("util.split") local same_site = require("util.same_site") +local ngx = ngx +local pairs = pairs +local ipairs = ipairs +local string = string +local tonumber = tonumber +local setmetatable = setmetatable + local _M = balancer_resty:new() local DEFAULT_COOKIE_NAME = "route" @@ -64,7 +71,8 @@ function _M.set_cookie(self, value) } if self.cookie_session_affinity.expires and self.cookie_session_affinity.expires ~= "" then - cookie_data.expires = ngx.cookie_time(ngx.time() + tonumber(self.cookie_session_affinity.expires)) + cookie_data.expires = ngx.cookie_time(ngx.time() + + tonumber(self.cookie_session_affinity.expires)) end if self.cookie_session_affinity.maxage and self.cookie_session_affinity.maxage ~= "" then @@ -132,8 +140,8 @@ function _M.balance(self) end local last_failure = self.get_last_failure() - local should_pick_new_upstream = last_failure ~= nil and self.cookie_session_affinity.change_on_failure or - upstream_from_cookie == nil + local should_pick_new_upstream = last_failure ~= nil and + self.cookie_session_affinity.change_on_failure or upstream_from_cookie == nil if not should_pick_new_upstream then return upstream_from_cookie diff --git a/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua index ce0018158..119e4a8e2 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua @@ -9,6 +9,10 @@ local math_random = require("math").random local resty_chash = require("resty.chash") local util_get_nodes = require("util").get_nodes +local ngx = ngx +local string = string +local setmetatable = setmetatable + local _M = balancer_sticky:new() -- Consider the situation of N upstreams one of which is failing. diff --git a/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua index a4a6f0da2..ae116cece 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua @@ -6,6 +6,7 @@ local balancer_sticky = require("balancer.sticky") local util_get_nodes = require("util").get_nodes local util_nodemap = require("util.nodemap") +local setmetatable = setmetatable local _M = balancer_sticky:new() diff --git a/rootfs/etc/nginx/lua/certificate.lua b/rootfs/etc/nginx/lua/certificate.lua index ac13ee318..cb9e1ed82 100644 --- a/rootfs/etc/nginx/lua/certificate.lua +++ b/rootfs/etc/nginx/lua/certificate.lua @@ -1,7 +1,11 @@ local http = require("resty.http") local ssl = require("ngx.ssl") local ocsp = require("ngx.ocsp") +local ngx = ngx +local string = string +local tostring = tostring local re_sub = ngx.re.sub +local unpack = unpack local dns_lookup = require("util.dns").lookup @@ -42,7 +46,11 @@ local function set_der_cert_and_key(der_cert, der_priv_key) end local function get_pem_cert_uid(raw_hostname) - local hostname = re_sub(raw_hostname, "\\.$", "", "jo") + -- Convert hostname to ASCII lowercase (see RFC 6125 6.4.1) so that requests with uppercase + -- host would lead to the right certificate being chosen (controller serves certificates for + -- lowercase hostnames as specified in Ingress object's spec.rules.host) + local hostname = re_sub(raw_hostname, "\\.$", "", "jo"):gsub("[A-Z]", + function(c) return c:lower() end) local uid = certificate_servers:get(hostname) if uid then @@ -215,8 +223,8 @@ function _M.call() ngx.log(ngx.ERR, "error while obtaining hostname: " .. hostname_err) end if not hostname then - ngx.log(ngx.INFO, - "obtained hostname is nil (the client does not support SNI?), falling back to default certificate") + ngx.log(ngx.INFO, "obtained hostname is nil (the client does " + .. "not support SNI?), falling back to default certificate") hostname = DEFAULT_CERT_HOSTNAME end @@ -229,7 +237,8 @@ function _M.call() pem_cert = certificate_data:get(pem_cert_uid) end if not pem_cert then - ngx.log(ngx.ERR, "certificate not found, falling back to fake certificate for hostname: " .. tostring(hostname)) + ngx.log(ngx.ERR, "certificate not found, falling back to fake certificate for hostname: " + .. tostring(hostname)) return end diff --git a/rootfs/etc/nginx/lua/configuration.lua b/rootfs/etc/nginx/lua/configuration.lua index 49ea62dbc..fbf78f208 100644 --- a/rootfs/etc/nginx/lua/configuration.lua +++ b/rootfs/etc/nginx/lua/configuration.lua @@ -1,5 +1,12 @@ local cjson = require("cjson.safe") +local io = io +local ngx = ngx +local tostring = tostring +local string = string +local table = table +local pairs = pairs + -- this is the Lua representation of Configuration struct in internal/ingress/types.go local configuration_data = ngx.shared.configuration_data local certificate_data = ngx.shared.certificate_data @@ -72,12 +79,13 @@ local function handle_servers() else local success, set_err, forcible = certificate_servers:set(server, uid) if not success then - local err_msg = string.format("error setting certificate for %s: %s\n", server, tostring(set_err)) + local err_msg = string.format("error setting certificate for %s: %s\n", + server, tostring(set_err)) table.insert(err_buf, err_msg) end if forcible then - local msg = string.format("certificate_servers dictionary is full, LRU entry has been removed to store %s", - server) + local msg = string.format("certificate_servers dictionary is full, " + .. "LRU entry has been removed to store %s", server) ngx.log(ngx.WARN, msg) end end @@ -86,11 +94,13 @@ local function handle_servers() for uid, cert in pairs(configuration.certificates) do local success, set_err, forcible = certificate_data:set(uid, cert) if not success then - local err_msg = string.format("error setting certificate for %s: %s\n", uid, tostring(set_err)) + local err_msg = string.format("error setting certificate for %s: %s\n", + uid, tostring(set_err)) table.insert(err_buf, err_msg) end if forcible then - local msg = string.format("certificate_data dictionary is full, LRU entry has been removed to store %s", uid) + local msg = string.format("certificate_data dictionary is full, " + .. "LRU entry has been removed to store %s", uid) ngx.log(ngx.WARN, msg) end end @@ -211,8 +221,6 @@ function _M.call() ngx.print("Not found!") end -if _TEST then - _M.handle_servers = handle_servers -end +setmetatable(_M, {__index = { handle_servers = handle_servers }}) return _M diff --git a/rootfs/etc/nginx/lua/lua_ingress.lua b/rootfs/etc/nginx/lua/lua_ingress.lua index e292ade80..49355da7b 100644 --- a/rootfs/etc/nginx/lua/lua_ingress.lua +++ b/rootfs/etc/nginx/lua/lua_ingress.lua @@ -1,7 +1,12 @@ local ngx_re_split = require("ngx.re").split -local certificate_configured_for_current_request = require("certificate").configured_for_current_request +local certificate_configured_for_current_request = + require("certificate").configured_for_current_request +local ngx = ngx +local io = io +local math = math +local string = string local original_randomseed = math.randomseed local string_format = string.format local ngx_redirect = ngx.redirect @@ -38,8 +43,8 @@ end math.randomseed = function(seed) local pid = ngx.worker.pid() if seeds[pid] then - ngx.log(ngx.WARN, - string.format("ignoring math.randomseed(%d) since PRNG is already seeded for worker %d", seed, pid)) + ngx.log(ngx.WARN, string.format("ignoring math.randomseed(%d) since PRNG " + .. "is already seeded for worker %d", seed, pid)) return end @@ -143,7 +148,8 @@ function _M.rewrite(location_config) local uri = string_format("https://%s%s", redirect_host(), ngx.var.request_uri) if location_config.use_port_in_redirects then - uri = string_format("https://%s:%s%s", redirect_host(), config.listen_ports.https, ngx.var.request_uri) + uri = string_format("https://%s:%s%s", redirect_host(), + config.listen_ports.https, ngx.var.request_uri) end ngx_redirect(uri, config.http_redirect_code) diff --git a/rootfs/etc/nginx/lua/monitor.lua b/rootfs/etc/nginx/lua/monitor.lua index 7592c0c5b..1c0672e41 100644 --- a/rootfs/etc/nginx/lua/monitor.lua +++ b/rootfs/etc/nginx/lua/monitor.lua @@ -1,16 +1,22 @@ +local ngx = ngx +local tonumber = tonumber +local assert = assert +local string = string +local tostring = tostring local socket = ngx.socket.tcp local cjson = require("cjson.safe") -local assert = assert local new_tab = require "table.new" local clear_tab = require "table.clear" local clone_tab = require "table.clone" -local nkeys = require "table.nkeys" --- if an Nginx worker processes more than (MAX_BATCH_SIZE/FLUSH_INTERVAL) RPS then it will start dropping metrics + +-- if an Nginx worker processes more than (MAX_BATCH_SIZE/FLUSH_INTERVAL) RPS +-- then it will start dropping metrics local MAX_BATCH_SIZE = 10000 local FLUSH_INTERVAL = 1 -- second local metrics_batch = new_tab(MAX_BATCH_SIZE, 0) +local metrics_count = 0 local _M = {} @@ -47,12 +53,13 @@ local function flush(premature) return end - if #metrics_batch == 0 then + if metrics_count == 0 then return end local current_metrics_batch = clone_tab(metrics_batch) clear_tab(metrics_batch) + metrics_count = 0 local payload, err = cjson.encode(current_metrics_batch) if not payload then @@ -78,19 +85,19 @@ function _M.init_worker(max_batch_size) end function _M.call() - local metrics_size = nkeys(metrics_batch) - if metrics_size >= MAX_BATCH_SIZE then + if metrics_count >= MAX_BATCH_SIZE then ngx.log(ngx.WARN, "omitting metrics for the request, current batch is full") return end - metrics_batch[metrics_size + 1] = metrics() + metrics_count = metrics_count + 1 + metrics_batch[metrics_count] = metrics() end -if _TEST then - _M.flush = flush - _M.get_metrics_batch = function() return metrics_batch end - _M.set_metrics_max_batch_size = set_metrics_max_batch_size -end +setmetatable(_M, {__index = { + flush = flush, + set_metrics_max_batch_size = set_metrics_max_batch_size, + get_metrics_batch = function() return metrics_batch end, +}}) return _M diff --git a/rootfs/etc/nginx/lua/plugins.lua b/rootfs/etc/nginx/lua/plugins.lua index d634bc2f4..0c1fd899b 100644 --- a/rootfs/etc/nginx/lua/plugins.lua +++ b/rootfs/etc/nginx/lua/plugins.lua @@ -1,13 +1,16 @@ +local require = require +local ngx = ngx +local pairs = pairs +local ipairs = ipairs local string_format = string.format -local new_tab = require "table.new" local ngx_log = ngx.log local INFO = ngx.INFO local ERR = ngx.ERR +local pcall = pcall local _M = {} -local MAX_NUMBER_OF_PLUGINS = 10000 --- TODO: is this good for a dictionary? -local plugins = new_tab(MAX_NUMBER_OF_PLUGINS, 0) +local MAX_NUMBER_OF_PLUGINS = 20 +local plugins = {} local function load_plugin(name) local path = string_format("plugins.%s.main", name) @@ -22,8 +25,14 @@ local function load_plugin(name) end function _M.init(names) + local count = 0 for _, name in ipairs(names) do + if count >= MAX_NUMBER_OF_PLUGINS then + ngx_log(ERR, "the total number of plugins exceed the maximum number: ", MAX_NUMBER_OF_PLUGINS) + break + end load_plugin(name) + count = count + 1 -- ignore loading failure, just count the total end end @@ -36,10 +45,12 @@ function _M.run() -- TODO: consider sandboxing this, should we? -- probably yes, at least prohibit plugin from accessing env vars etc - -- but since the plugins are going to be installed by ingress-nginx operator they can be assumed to be safe also + -- but since the plugins are going to be installed by ingress-nginx + -- operator they can be assumed to be safe also local ok, err = pcall(plugin[phase]) if not ok then - ngx_log(ERR, string_format("error while running plugin \"%s\" in phase \"%s\": %s", name, phase, err)) + ngx_log(ERR, string_format("error while running plugin \"%s\" in phase \"%s\": %s", + name, phase, err)) end end end diff --git a/rootfs/etc/nginx/lua/plugins/hello_world/main.lua b/rootfs/etc/nginx/lua/plugins/hello_world/main.lua index af56b956a..03316c3ee 100644 --- a/rootfs/etc/nginx/lua/plugins/hello_world/main.lua +++ b/rootfs/etc/nginx/lua/plugins/hello_world/main.lua @@ -1,3 +1,5 @@ +local ngx = ngx + local _M = {} function _M.rewrite() diff --git a/rootfs/etc/nginx/lua/plugins/hello_world/test/main_test.lua b/rootfs/etc/nginx/lua/plugins/hello_world/test/main_test.lua index 20e914be1..5eda52259 100644 --- a/rootfs/etc/nginx/lua/plugins/hello_world/test/main_test.lua +++ b/rootfs/etc/nginx/lua/plugins/hello_world/test/main_test.lua @@ -1,4 +1,3 @@ -_G._TEST = true local main = require("plugins.hello_world.main") diff --git a/rootfs/etc/nginx/lua/tcp_udp_balancer.lua b/rootfs/etc/nginx/lua/tcp_udp_balancer.lua index cf6626cf0..0d7da9cf8 100644 --- a/rootfs/etc/nginx/lua/tcp_udp_balancer.lua +++ b/rootfs/etc/nginx/lua/tcp_udp_balancer.lua @@ -5,9 +5,18 @@ local dns_lookup = require("util.dns").lookup local configuration = require("tcp_udp_configuration") local round_robin = require("balancer.round_robin") +local ngx = ngx +local table = table +local ipairs = ipairs +local pairs = pairs +local tostring = tostring +local string = string +local getmetatable = getmetatable + -- measured in seconds -- for an Nginx worker to pick up the new list of upstream peers --- it will take + BACKENDS_SYNC_INTERVAL +-- it will take + BACKENDS_SYNC_INTERVAL local BACKENDS_SYNC_INTERVAL = 1 local DEFAULT_LB_ALG = "round_robin" @@ -23,7 +32,8 @@ local function get_implementation(backend) local implementation = IMPLEMENTATIONS[name] if not implementation then - ngx.log(ngx.WARN, string.format("%s is not supported, falling back to %s", backend["load-balance"], DEFAULT_LB_ALG)) + ngx.log(ngx.WARN, string.format("%s is not supported, falling back to %s", + backend["load-balance"], DEFAULT_LB_ALG)) implementation = IMPLEMENTATIONS[DEFAULT_LB_ALG] end @@ -73,15 +83,14 @@ local function sync_backend(backend) -- here we check if `balancer` is the instance of `implementation` -- if it is not then we deduce LB algorithm has changed for the backend if getmetatable(balancer) ~= implementation then - ngx.log( - ngx.INFO, - string.format("LB algorithm changed from %s to %s, resetting the instance", balancer.name, implementation.name) - ) + ngx.log(ngx.INFO, string.format("LB algorithm changed from %s to %s, " + .. "resetting the instance", balancer.name, implementation.name)) balancers[backend.name] = implementation:new(backend) return end - local service_type = backend.service and backend.service.spec and backend.service.spec["type"] + local service_type = backend.service and backend.service.spec and + backend.service.spec["type"] if service_type == "ExternalName" then backend = resolve_external_names(backend) end @@ -131,7 +140,8 @@ function _M.init_worker() sync_backends() -- when worker starts, sync backends without delay local _, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends) if err then - ngx.log(ngx.ERR, string.format("error when setting up timer.every for sync_backends: %s", tostring(err))) + ngx.log(ngx.ERR, string.format("error when setting up timer.every " + .. "for sync_backends: %s", tostring(err))) end end @@ -168,9 +178,9 @@ function _M.log() balancer:after_balance() end -if _TEST then - _M.get_implementation = get_implementation - _M.sync_backend = sync_backend -end +setmetatable(_M, {__index = { + get_implementation = get_implementation, + sync_backend = sync_backend, +}}) return _M diff --git a/rootfs/etc/nginx/lua/tcp_udp_configuration.lua b/rootfs/etc/nginx/lua/tcp_udp_configuration.lua index 902ac59b6..40d3443da 100644 --- a/rootfs/etc/nginx/lua/tcp_udp_configuration.lua +++ b/rootfs/etc/nginx/lua/tcp_udp_configuration.lua @@ -1,3 +1,5 @@ +local ngx = ngx +local tostring = tostring -- this is the Lua representation of TCP/UDP Configuration local tcp_udp_configuration_data = ngx.shared.tcp_udp_configuration_data diff --git a/rootfs/etc/nginx/lua/test/balancer/chash_test.lua b/rootfs/etc/nginx/lua/test/balancer/chash_test.lua index d71dfc56a..94379e7dd 100644 --- a/rootfs/etc/nginx/lua/test/balancer/chash_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/chash_test.lua @@ -1,9 +1,15 @@ +function mock_ngx(mock) + local _ngx = mock + setmetatable(_ngx, {__index = _G.ngx}) + _G.ngx = _ngx +end + describe("Balancer chash", function() - local balancer_chash = require("balancer.chash") describe("balance()", function() it("uses correct key for given backend", function() - _G.ngx = { var = { request_uri = "/alma/armud" }} + mock_ngx({var = { request_uri = "/alma/armud"}}) + local balancer_chash = require("balancer.chash") local resty_chash = package.loaded["resty.chash"] resty_chash.new = function(self, nodes) diff --git a/rootfs/etc/nginx/lua/test/balancer/chashsubset_test.lua b/rootfs/etc/nginx/lua/test/balancer/chashsubset_test.lua index 6bbd582dd..bbf55838c 100644 --- a/rootfs/etc/nginx/lua/test/balancer/chashsubset_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/chashsubset_test.lua @@ -1,3 +1,8 @@ +function mock_ngx(mock) + local _ngx = mock + setmetatable(_ngx, {__index = _G.ngx}) + _G.ngx = _ngx +end local function get_test_backend(n_endpoints) local backend = { @@ -18,11 +23,15 @@ local function get_test_backend(n_endpoints) end describe("Balancer chash subset", function() - local balancer_chashsubset = require("balancer.chashsubset") + local balancer_chashsubset + + before_each(function() + mock_ngx({ var = { request_uri = "/alma/armud" }}) + balancer_chashsubset = require("balancer.chashsubset") + end) describe("balance()", function() it("returns peers from the same subset", function() - _G.ngx = { var = { request_uri = "/alma/armud" }} local backend = get_test_backend(9) @@ -67,7 +76,6 @@ describe("Balancer chash subset", function() end) describe("new(backend)", function() it("fills last subset correctly", function() - _G.ngx = { var = { request_uri = "/alma/armud" }} local backend = get_test_backend(7) diff --git a/rootfs/etc/nginx/lua/test/balancer/ewma_test.lua b/rootfs/etc/nginx/lua/test/balancer/ewma_test.lua index f5a45d868..f9d9450cb 100644 --- a/rootfs/etc/nginx/lua/test/balancer/ewma_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/ewma_test.lua @@ -34,6 +34,8 @@ describe("Balancer ewma", function() before_each(function() mock_ngx({ now = function() return ngx_now end, var = { balancer_ewma_score = -1 } }) + package.loaded["balancer.ewma"] = nil + balancer_ewma = require("balancer.ewma") backend = { name = "namespace-service-port", ["load-balance"] = "ewma", diff --git a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua index c1bee4ac0..44e103c1c 100644 --- a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua @@ -1,5 +1,5 @@ -local sticky_balanced = require("balancer.sticky_balanced") -local sticky_persistent = require("balancer.sticky_persistent") +local sticky_balanced +local sticky_persistent local cookie = require("resty.cookie") local util = require("util") @@ -15,6 +15,14 @@ local function reset_ngx() _G.ngx = original_ngx end +local function reset_sticky_balancer() + package.loaded["balancer.sticky"] = nil + package.loaded["balancer.sticky_balanced"] = nil + package.loaded["balancer.sticky_persistent"] = nil + sticky_balanced = require("balancer.sticky_balanced") + sticky_persistent = require("balancer.sticky_persistent") +end + function get_mocked_cookie_new() local o = { value = nil } local mock = { @@ -47,6 +55,7 @@ end describe("Sticky", function() before_each(function() mock_ngx({ var = { location_path = "/", host = "test.com" } }) + reset_sticky_balancer() end) after_each(function() @@ -302,11 +311,8 @@ describe("Sticky", function() local mocked_cookie_new = cookie.new before_each(function() - package.loaded["balancer.sticky_balanced"] = nil - package.loaded["balancer.sticky_persistent"] = nil - sticky_balanced = require("balancer.sticky_balanced") - sticky_persistent = require("balancer.sticky_persistent") mock_ngx({ var = { location_path = "/", host = "test.com" } }) + reset_sticky_balancer() end) after_each(function() @@ -459,6 +465,7 @@ describe("Sticky", function() end) it("returns a cookie without SameSite=None when user specifies samesite None and conditional samesite none with unsupported user agent", function() mock_ngx({ var = { location_path = "/", host = "test.com" , http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"} }) + reset_sticky_balancer() test_set_cookie(sticky_balanced, "None", true, "/", nil) end) end) diff --git a/rootfs/etc/nginx/lua/test/balancer_test.lua b/rootfs/etc/nginx/lua/test/balancer_test.lua index 9887f401e..aa505f174 100644 --- a/rootfs/etc/nginx/lua/test/balancer_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer_test.lua @@ -1,4 +1,3 @@ -_G._TEST = true local balancer, expected_implementations, backends local original_ngx = ngx @@ -110,11 +109,12 @@ describe("Balancer", function() }, } + mock_ngx({ var = { proxy_upstream_name = backend.name } }) + reset_balancer() + balancer.sync_backend(backend) balancer.sync_backend(canary_backend) - mock_ngx({ var = { proxy_upstream_name = backend.name } }) - local expected = balancer.get_balancer() for i = 1,50,1 do @@ -134,6 +134,7 @@ describe("Balancer", function() } } mock_ngx({ var = { request_uri = "/" } }) + reset_balancer() end) it("returns false when no trafficShapingPolicy is set", function() @@ -171,8 +172,6 @@ describe("Balancer", function() context("canary by cookie", function() it("returns correct result for given cookies", function() - backend.trafficShapingPolicy.cookie = "canaryCookie" - balancer.sync_backend(backend) local test_patterns = { { case_title = "cookie_value is 'always'", @@ -204,6 +203,9 @@ describe("Balancer", function() ["cookie_" .. test_pattern.request_cookie_name] = test_pattern.request_cookie_value, request_uri = "/" }}) + reset_balancer() + backend.trafficShapingPolicy.cookie = "canaryCookie" + balancer.sync_backend(backend) assert.message("\nTest data pattern: " .. test_pattern.case_title) .equal(test_pattern.expected_result, balancer.route_to_alternative_balancer(_balancer)) reset_ngx() @@ -275,14 +277,14 @@ describe("Balancer", function() } for _, test_pattern in pairs(test_patterns) do - reset_balancer() - backend.trafficShapingPolicy.header = test_pattern.header_name - backend.trafficShapingPolicy.headerValue = test_pattern.header_value - balancer.sync_backend(backend) mock_ngx({ var = { ["http_" .. test_pattern.request_header_name] = test_pattern.request_header_value, request_uri = "/" }}) + reset_balancer() + backend.trafficShapingPolicy.header = test_pattern.header_name + backend.trafficShapingPolicy.headerValue = test_pattern.header_value + balancer.sync_backend(backend) assert.message("\nTest data pattern: " .. test_pattern.case_title) .equal(test_pattern.expected_result, balancer.route_to_alternative_balancer(_balancer)) reset_ngx() diff --git a/rootfs/etc/nginx/lua/test/certificate_test.lua b/rootfs/etc/nginx/lua/test/certificate_test.lua index e8c4a9da6..c3227f609 100644 --- a/rootfs/etc/nginx/lua/test/certificate_test.lua +++ b/rootfs/etc/nginx/lua/test/certificate_test.lua @@ -165,6 +165,9 @@ describe("Certificate", function() _G.ngx = _ngx ngx.ctx.cert_configured_for_current_request = nil + package.loaded["certificate"] = nil + certificate = require("certificate") + set_certificate("hostname", EXAMPLE_CERT, UUID) end) diff --git a/rootfs/etc/nginx/lua/test/configuration_test.lua b/rootfs/etc/nginx/lua/test/configuration_test.lua index 3bfe60db6..aa4ba38b3 100644 --- a/rootfs/etc/nginx/lua/test/configuration_test.lua +++ b/rootfs/etc/nginx/lua/test/configuration_test.lua @@ -1,4 +1,3 @@ -_G._TEST = true local cjson = require("cjson") local configuration = require("configuration") @@ -48,12 +47,12 @@ end describe("Configuration", function() before_each(function() _G.ngx = get_mocked_ngx_env() + package.loaded["configuration"] = nil + configuration = require("configuration") end) after_each(function() _G.ngx = unmocked_ngx - package.loaded["configuration"] = nil - configuration = require("configuration") end) describe("Backends", function() diff --git a/rootfs/etc/nginx/lua/test/monitor_test.lua b/rootfs/etc/nginx/lua/test/monitor_test.lua index 19a576c33..ca6dbd663 100644 --- a/rootfs/etc/nginx/lua/test/monitor_test.lua +++ b/rootfs/etc/nginx/lua/test/monitor_test.lua @@ -1,4 +1,3 @@ -_G._TEST = true local original_ngx = ngx local function reset_ngx() @@ -31,8 +30,8 @@ describe("Monitor", function() end) it("extended batch size", function() - local monitor = require("monitor") mock_ngx({ var = {} }) + local monitor = require("monitor") monitor.set_metrics_max_batch_size(20000) for i = 1,20000,1 do @@ -43,8 +42,8 @@ describe("Monitor", function() end) it("batches metrics", function() - local monitor = require("monitor") mock_ngx({ var = {} }) + local monitor = require("monitor") for i = 1,10,1 do monitor.call() @@ -56,8 +55,8 @@ describe("Monitor", function() describe("flush", function() it("short circuits when premmature is true (when worker is shutting down)", function() local tcp_mock = mock_ngx_socket_tcp() - local monitor = require("monitor") mock_ngx({ var = {} }) + local monitor = require("monitor") for i = 1,10,1 do monitor.call() @@ -76,7 +75,6 @@ describe("Monitor", function() it("JSON encodes and sends the batched metrics", function() local tcp_mock = mock_ngx_socket_tcp() - local monitor = require("monitor") local ngx_var_mock = { host = "example.com", @@ -98,6 +96,7 @@ describe("Monitor", function() upstream_status = "200", } mock_ngx({ var = ngx_var_mock }) + local monitor = require("monitor") monitor.call() local ngx_var_mock1 = ngx_var_mock diff --git a/rootfs/etc/nginx/lua/test/run.lua b/rootfs/etc/nginx/lua/test/run.lua index 0dff725e6..8e20ea3f5 100644 --- a/rootfs/etc/nginx/lua/test/run.lua +++ b/rootfs/etc/nginx/lua/test/run.lua @@ -10,7 +10,6 @@ do -- if there's more constants need to be whitelisted for test runs, add here. local GLOBALS_ALLOWED_IN_TEST = { - _TEST = true, helpers = true, } local newindex = function(table, key, value) @@ -35,7 +34,6 @@ do end _G.helpers = require("test.helpers") -_G._TEST = true local ffi = require("ffi") local lua_ingress = require("lua_ingress") diff --git a/rootfs/etc/nginx/lua/test/util_test.lua b/rootfs/etc/nginx/lua/test/util_test.lua index 6da681662..0b8d48ae4 100644 --- a/rootfs/etc/nginx/lua/test/util_test.lua +++ b/rootfs/etc/nginx/lua/test/util_test.lua @@ -1,4 +1,6 @@ local original_ngx = ngx +local util + local function reset_ngx() _G.ngx = original_ngx end @@ -9,28 +11,44 @@ local function mock_ngx(mock) _G.ngx = _ngx end -describe("lua_ngx_var", function() - local util = require("util") +describe("utility", function() after_each(function() reset_ngx() end) - describe("lua_ngx_var", function() + describe("ngx_complex_value", function() before_each(function() mock_ngx({ var = { remote_addr = "192.168.1.1", [1] = "nginx/regexp/1/group/capturing" } }) + util = require("util") end) + local ngx_complex_value = function(data) + local ret, err = util.parse_complex_value(data) + if err ~= nil then + return "" + end + return util.generate_var_value(ret) + end + it("returns value of nginx var by key", function() - assert.equal("192.168.1.1", util.lua_ngx_var("$remote_addr")) + assert.equal("192.168.1.1", ngx_complex_value("$remote_addr")) end) - + it("returns value of nginx var when key is number", function() - assert.equal("nginx/regexp/1/group/capturing", util.lua_ngx_var("$1")) + assert.equal("nginx/regexp/1/group/capturing", ngx_complex_value("$1")) end) - it("returns nil when variable is not defined", function() - assert.equal(nil, util.lua_ngx_var("$foo_bar")) + it("returns value of nginx var by multiple variables", function() + assert.equal("192.168.1.1nginx/regexp/1/group/capturing", ngx_complex_value("$remote_addr$1")) + end) + + it("returns value by the combination of variable and text value", function() + assert.equal("192.168.1.1-text-value", ngx_complex_value("${remote_addr}-text-value")) + end) + + it("returns empty when variable is not defined", function() + assert.equal("", ngx_complex_value("$foo_bar")) end) end) diff --git a/rootfs/etc/nginx/lua/util.lua b/rootfs/etc/nginx/lua/util.lua index 79e88bd2f..103858467 100644 --- a/rootfs/etc/nginx/lua/util.lua +++ b/rootfs/etc/nginx/lua/util.lua @@ -1,14 +1,15 @@ +local ngx = ngx local string = string local string_len = string.len -local string_sub = string.sub local string_format = string.format local pairs = pairs +local ipairs = ipairs local tonumber = tonumber local getmetatable = getmetatable local type = type local next = next local table = table - +local re_gmatch = ngx.re.gmatch local _M = {} @@ -24,15 +25,57 @@ function _M.get_nodes(endpoints) return nodes end --- given an Nginx variable i.e $request_uri --- it returns value of ngx.var[request_uri] -function _M.lua_ngx_var(ngx_var) - local var_name = string_sub(ngx_var, 2) - if var_name:match("^%d+$") then - var_name = tonumber(var_name) +-- parse the compound variables, then call generate_var_value function +-- to parse into a string value. +function _M.parse_complex_value(complex_value) + local reg = [[ (\\\$[0-9a-zA-Z_]+) | ]] -- \$var + .. [[ \$\{([0-9a-zA-Z_]+)\} | ]] -- ${var} + .. [[ \$([0-9a-zA-Z_]+) | ]] -- $var + .. [[ (\$|[^$\\]+) ]] -- $ or text value + local iterator, err = re_gmatch(complex_value, reg, "jiox") + if not iterator then + return nil, err + end + + local v + local t = {} + while true do + v, err = iterator() + if err then + return nil, err + end + + if not v then + break + end + + table.insert(t, v) + end + + return t +end + +-- Parse the return value of function parse_complex_value +-- into a string value +function _M.generate_var_value(data) + if data == nil then + return "" end - return ngx.var[var_name] + local t = {} + for _, value in ipairs(data) do + local var_name = value[2] or value[3] + if var_name then + if var_name:match("^%d+$") then + var_name = tonumber(var_name) + end + table.insert(t, ngx.var[var_name]) + else + table.insert(t, value[1] or value[4]) + end + end + + return table.concat(t, "") end -- normalize_endpoints takes endpoints as an array of endpoint objects @@ -75,7 +118,8 @@ function _M.diff_endpoints(old, new) end -- this implementation is taken from --- https://web.archive.org/web/20131225070434/http://snippets.luacode.org/snippets/Deep_Comparison_of_Two_Values_3 +-- https://web.archive.org/web/20131225070434/http://snippets. +-- luacode.org/snippets/Deep_Comparison_of_Two_Values_3 -- and modified for use in this project local function deep_compare(t1, t2, ignore_mt) local ty1 = type(t1) diff --git a/rootfs/etc/nginx/lua/util/dns.lua b/rootfs/etc/nginx/lua/util/dns.lua index e68ed1f7d..e94060fd4 100644 --- a/rootfs/etc/nginx/lua/util/dns.lua +++ b/rootfs/etc/nginx/lua/util/dns.lua @@ -13,7 +13,8 @@ local tostring = tostring local _M = {} local CACHE_SIZE = 10000 -local MAXIMUM_TTL_VALUE = 2147483647 -- maximum value according to https://tools.ietf.org/html/rfc2181 +-- maximum value according to https://tools.ietf.org/html/rfc2181 +local MAXIMUM_TTL_VALUE = 2147483647 -- for every host we will try two queries for the following types with the order set here local QTYPES_TO_CHECK = { resolver.TYPE_A, resolver.TYPE_AAAA } @@ -59,7 +60,8 @@ local function resolve_host_for_qtype(r, host, qtype) end if answers.errcode then - return nil, -1, string_format("server returned error code: %s: %s", answers.errcode, answers.errstr) + return nil, -1, string_format("server returned error code: %s: %s", + answers.errcode, answers.errstr) end local addresses, ttl = a_records_and_min_ttl(answers) @@ -116,7 +118,8 @@ function _M.lookup(host) return addresses end - ngx_log(ngx_ERR, "failed to query the DNS server for ", host, ":\n", table_concat(dns_errors, "\n")) + ngx_log(ngx_ERR, "failed to query the DNS server for ", + host, ":\n", table_concat(dns_errors, "\n")) return { host } end @@ -135,7 +138,8 @@ function _M.lookup(host) end for i = search_start, search_end, 1 do - local new_host = resolv_conf.search[i] and string_format("%s.%s", host, resolv_conf.search[i]) or host + local new_host = resolv_conf.search[i] and + string_format("%s.%s", host, resolv_conf.search[i]) or host addresses, ttl, dns_errors = resolve_host(r, new_host) if addresses then @@ -145,14 +149,13 @@ function _M.lookup(host) end if #dns_errors > 0 then - ngx_log(ngx_ERR, "failed to query the DNS server for ", host, ":\n", table_concat(dns_errors, "\n")) + ngx_log(ngx_ERR, "failed to query the DNS server for ", + host, ":\n", table_concat(dns_errors, "\n")) end return { host } end -if _TEST then - _M._cache = cache -end +setmetatable(_M, {__index = { _cache = cache }}) return _M diff --git a/rootfs/etc/nginx/lua/util/nodemap.lua b/rootfs/etc/nginx/lua/util/nodemap.lua index 04d034023..4fd6212bf 100644 --- a/rootfs/etc/nginx/lua/util/nodemap.lua +++ b/rootfs/etc/nginx/lua/util/nodemap.lua @@ -1,5 +1,9 @@ local math_random = require("math").random local util_tablelength = require("util").tablelength +local ngx = ngx +local pairs = pairs +local string = string +local setmetatable = setmetatable local _M = {} @@ -41,7 +45,8 @@ local function get_random_node(map) count = count + 1 end - ngx.log(ngx.ERR, string.format("Failed to find node %d of %d! This is a bug, please report!", index, size)) + ngx.log(ngx.ERR, string.format("Failed to find node %d of %d! " + .. "This is a bug, please report!", index, size)) return nil, nil end @@ -55,7 +60,8 @@ end -- To make sure hash keys are reproducible on different ingress controller instances the salt -- needs to be shared and therefore is not simply generated randomly. -- --- @tparam {[string]=number} endpoints A table with the node endpoint as a key and its weight as a value. +-- @tparam {[string]=number} endpoints A table with the node endpoint +-- as a key and its weight as a value. -- @tparam[opt] string hash_salt A optional hash salt that will be used to obfuscate the hash key. function _M.new(self, endpoints, hash_salt) if hash_salt == nil then diff --git a/rootfs/etc/nginx/lua/util/resolv_conf.lua b/rootfs/etc/nginx/lua/util/resolv_conf.lua index 82cd1d83b..37627c148 100644 --- a/rootfs/etc/nginx/lua/util/resolv_conf.lua +++ b/rootfs/etc/nginx/lua/util/resolv_conf.lua @@ -1,5 +1,6 @@ local ngx_re_split = require("ngx.re").split local string_format = string.format +local tonumber = tonumber local ngx_log = ngx.log local ngx_ERR = ngx.ERR diff --git a/rootfs/etc/nginx/lua/util/same_site.lua b/rootfs/etc/nginx/lua/util/same_site.lua index d8ee5fd2d..ea466b0db 100644 --- a/rootfs/etc/nginx/lua/util/same_site.lua +++ b/rootfs/etc/nginx/lua/util/same_site.lua @@ -1,3 +1,5 @@ +local string = string + local _M = {} -- determines whether to apply a SameSite=None attribute diff --git a/rootfs/etc/nginx/lua/util/split.lua b/rootfs/etc/nginx/lua/util/split.lua index 620e083a5..090a7cf7f 100644 --- a/rootfs/etc/nginx/lua/util/split.lua +++ b/rootfs/etc/nginx/lua/util/split.lua @@ -1,3 +1,5 @@ +local ipairs = ipairs + local _M = {} -- splits strings into host and port @@ -34,7 +36,8 @@ function _M.split_upstream_var(var) return t end --- Splits an NGINX $upstream_addr and returns an array of tables with a `host` and `port` key-value pair. +-- Splits an NGINX $upstream_addr and returns an array of tables +-- with a `host` and `port` key-value pair. function _M.split_upstream_addr(addrs_str) if not addrs_str then return nil, nil diff --git a/rootfs/etc/nginx/template/nginx.tmpl b/rootfs/etc/nginx/template/nginx.tmpl index 86a50c683..f684fffb5 100755 --- a/rootfs/etc/nginx/template/nginx.tmpl +++ b/rootfs/etc/nginx/template/nginx.tmpl @@ -211,11 +211,21 @@ http { $geoip2_isp_org organization; } {{ end }} + {{ if eq $file "GeoIP2-Connection-Type.mmdb" }} geoip2 /etc/nginx/geoip/GeoIP2-Connection-Type.mmdb { $geoip2_connection_type connection_type; } {{ end }} + + {{ if eq $file "GeoIP2-Anonymous-IP.mmdb" }} + geoip2 /etc/nginx/geoip/GeoIP2-Anonymous-IP.mmdb { + $geoip2_is_anon source=$remote_addr is_anonymous; + $geoip2_is_hosting_provider source=$remote_addr is_hosting_provider; + $geoip2_is_public_proxy source=$remote_addr is_public_proxy; + } + {{ end }} + {{ end }} {{ end }} @@ -321,7 +331,7 @@ http { {{ if $cfg.EnableSyslog }} access_log syslog:server={{ $cfg.SyslogHost }}:{{ $cfg.SyslogPort }} upstreaminfo if=$loggable; {{ else }} - access_log {{ $cfg.AccessLogPath }} upstreaminfo {{ $cfg.AccessLogParams }} if=$loggable; + access_log {{ or $cfg.HttpAccessLogPath $cfg.AccessLogPath }} upstreaminfo {{ $cfg.AccessLogParams }} if=$loggable; {{ end }} {{ end }} @@ -687,7 +697,7 @@ stream { {{ if $cfg.DisableAccessLog }} access_log off; {{ else }} - access_log {{ $cfg.AccessLogPath }} log_stream {{ $cfg.AccessLogParams }}; + access_log {{ or $cfg.StreamAccessLogPath $cfg.AccessLogPath }} log_stream {{ $cfg.AccessLogParams }}; {{ end }} error_log {{ $cfg.ErrorLogPath }}; @@ -875,6 +885,10 @@ stream { ssl_ciphers {{ $server.SSLCiphers }}; {{ end }} + {{ if not (empty $server.SSLPreferServerCiphers) }} + ssl_prefer_server_ciphers {{ $server.SSLPreferServerCiphers }}; + {{ end }} + {{ if not (empty $server.ServerSnippet) }} {{ $server.ServerSnippet }} {{ end }} @@ -1004,7 +1018,7 @@ stream { } {{ end }} - + {{ if isLocationAllowed $location }} {{ if $externalAuth.SigninURL }} location {{ buildAuthSignURLLocation $location.Path $externalAuth.SigninURL }} { internal; @@ -1014,6 +1028,7 @@ stream { return 302 {{ buildAuthSignURL $externalAuth.SigninURL }}; } {{ end }} + {{ end }} location {{ $path }} { {{ $ing := (getIngressInformation $location.Ingress $server.Hostname $location.Path) }} diff --git a/test/e2e-image/Dockerfile b/test/e2e-image/Dockerfile index 6e165b2bd..67bc40a03 100644 --- a/test/e2e-image/Dockerfile +++ b/test/e2e-image/Dockerfile @@ -1,6 +1,6 @@ -FROM quay.io/kubernetes-ingress-controller/e2e:v04212020-5d67794f4 AS BASE +FROM quay.io/kubernetes-ingress-controller/e2e:v05312020-d250b97b4 AS BASE -FROM alpine:3.11 +FROM alpine:3.12 RUN apk add -U --no-cache \ ca-certificates \ @@ -10,13 +10,10 @@ RUN apk add -U --no-cache \ libc6-compat \ openssl -RUN curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash \ - && helm repo add stable https://kubernetes-charts.storage.googleapis.com \ - && helm repo update - COPY --from=BASE /go/bin/ginkgo /usr/local/bin/ COPY --from=BASE /usr/local/bin/kubectl /usr/local/bin/ COPY --from=BASE /usr/local/bin/cfssl /usr/local/bin/ +COPY --from=BASE /usr/local/bin/helm /usr/local/bin/ COPY --from=BASE /usr/local/bin/cfssljson /usr/local/bin/ COPY . / diff --git a/test/e2e-image/Makefile b/test/e2e-image/Makefile index 07ac33150..dd96b4303 100644 --- a/test/e2e-image/Makefile +++ b/test/e2e-image/Makefile @@ -1,5 +1,5 @@ .PHONY: all -all: container +all: image DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) @@ -12,8 +12,8 @@ ifdef DIND_TASKS USE_DOCKER=false endif -.PHONY: container -container: +.PHONY: image +image: ifeq ($(USE_DOCKER), true) @$(DIR)/../../build/run-in-docker.sh make e2e-test-binary else @@ -27,9 +27,7 @@ endif # TODO: avoid manual copy cp -R $(DIR)/../../test/e2e/settings/ocsp/* . - docker buildx build \ - --load \ - --progress plain \ + docker build \ --tag nginx-ingress-controller:e2e . .PHONY: clean diff --git a/test/e2e/annotations/auth.go b/test/e2e/annotations/auth.go index b97ebcef8..11e4807b9 100644 --- a/test/e2e/annotations/auth.go +++ b/test/e2e/annotations/auth.go @@ -29,6 +29,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/test/e2e/framework" @@ -353,6 +354,8 @@ var _ = framework.DescribeAnnotation("auth-*", func() { ginkgo.Context("when external authentication is configured", func() { host := "auth" + var annotations map[string]string + var ing *networking.Ingress ginkgo.BeforeEach(func() { f.NewHttpbinDeployment() @@ -367,12 +370,12 @@ var _ = framework.DescribeAnnotation("auth-*", func() { httpbinIP = e.Subsets[0].Addresses[0].IP - annotations := map[string]string{ + annotations = map[string]string{ "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbinIP), "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", } - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) + ing = framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { @@ -399,6 +402,30 @@ var _ = framework.DescribeAnnotation("auth-*", func() { Status(http.StatusFound). Header("Location").Equal(fmt.Sprintf("http://%s/auth/start?rd=http://%s%s", host, host, url.QueryEscape("/?a=b&c=d"))) }) + + ginkgo.It("keeps processing new ingresses even if one of the existing ingresses is misconfigured", func() { + annotations["nginx.ingress.kubernetes.io/auth-type"] = "basic" + annotations["nginx.ingress.kubernetes.io/auth-secret"] = "something" + annotations["nginx.ingress.kubernetes.io/auth-realm"] = "test auth" + f.UpdateIngress(ing) + + anotherHost := "different" + anotherAnnotations := map[string]string{} + + anotherIng := framework.NewSingleIngress(anotherHost, "/", anotherHost, f.Namespace, framework.EchoService, 80, anotherAnnotations) + f.EnsureIngress(anotherIng) + + f.WaitForNginxServer(anotherHost, + func(server string) bool { + return strings.Contains(server, "server_name "+anotherHost) + }) + + f.HTTPTestClient(). + GET("/"). + WithHeader("Host", anotherHost). + Expect(). + Status(http.StatusOK) + }) }) ginkgo.Context("when external authentication with caching is configured", func() { diff --git a/test/e2e/annotations/sslciphers.go b/test/e2e/annotations/sslciphers.go index ca464bf3f..0e2753b52 100644 --- a/test/e2e/annotations/sslciphers.go +++ b/test/e2e/annotations/sslciphers.go @@ -34,7 +34,8 @@ var _ = framework.DescribeAnnotation("ssl-ciphers", func() { ginkgo.It("should change ssl ciphers", func() { host := "ciphers.foo.com" annotations := map[string]string{ - "nginx.ingress.kubernetes.io/ssl-ciphers": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP", + "nginx.ingress.kubernetes.io/ssl-ciphers": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP", + "nginx.ingress.kubernetes.io/ssl-prefer-server-ciphers": "false", } ing := framework.NewSingleIngress(host, "/something", host, f.Namespace, framework.EchoService, 80, annotations) @@ -42,7 +43,8 @@ var _ = framework.DescribeAnnotation("ssl-ciphers", func() { f.WaitForNginxServer(host, func(server string) bool { - return strings.Contains(server, "ssl_ciphers ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;") + return strings.Contains(server, "ssl_ciphers ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;") && + strings.Contains(server, "ssl_prefer_server_ciphers off;") }) }) }) diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go index b422e9865..225f74b7a 100644 --- a/test/e2e/framework/deployment.go +++ b/test/e2e/framework/deployment.go @@ -53,9 +53,7 @@ func (f *Framework) NewEchoDeploymentWithReplicas(replicas int) { // name is configurable func (f *Framework) NewEchoDeploymentWithNameAndReplicas(name string, replicas int) { deployment := newDeployment(name, f.Namespace, "ingress-controller/echo:1.0.0-dev", 80, int32(replicas), - []string{ - "openresty", - }, + nil, []corev1.VolumeMount{}, []corev1.Volume{}, ) @@ -91,21 +89,35 @@ func (f *Framework) NewEchoDeploymentWithNameAndReplicas(name string, replicas i // NewSlowEchoDeployment creates a new deployment of the slow echo server image in a particular namespace. func (f *Framework) NewSlowEchoDeployment() { data := map[string]string{} - data["default.conf"] = `# + data["nginx.conf"] = `# -server { - access_log on; - access_log /dev/stdout; +events { + worker_connections 1024; + multi_accept on; +} - listen 80; +http { + default_type 'text/plain'; + client_max_body_size 0; - location / { - echo ok; - } + server { + access_log on; + access_log /dev/stdout; - location ~ ^/sleep/(?[0-9]+)$ { - echo_sleep $sleepTime; - echo "ok after $sleepTime seconds"; + listen 80; + + location / { + content_by_lua_block { + ngx.print("ok") + } + } + + location ~ ^/sleep/(?[0-9]+)$ { + content_by_lua_block { + ngx.sleep(ngx.var.sleepTime) + ngx.print("ok after " .. ngx.var.sleepTime .. " seconds") + } + } } } @@ -120,12 +132,13 @@ server { }, metav1.CreateOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "creating configmap") - deployment := newDeployment(SlowEchoService, f.Namespace, "openresty/openresty:1.15.8.2-alpine", 80, 1, + deployment := newDeployment(SlowEchoService, f.Namespace, "quay.io/kubernetes-ingress-controller/nginx:e3c49c52f4b74fe47ad65d6f3266a02e8b6b622f", 80, 1, nil, []corev1.VolumeMount{ { Name: SlowEchoService, - MountPath: "/etc/nginx/conf.d", + MountPath: "/etc/nginx/nginx.conf", + SubPath: "nginx.conf", ReadOnly: true, }, }, diff --git a/test/e2e/run.sh b/test/e2e/run.sh index befe7e689..25199c328 100755 --- a/test/e2e/run.sh +++ b/test/e2e/run.sh @@ -56,14 +56,14 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Use 1.0.0-dev to make sure we use the latest configuration in the helm template export TAG=1.0.0-dev -export ARCH=amd64 +export ARCH=${ARCH:-amd64} export REGISTRY=ingress-controller export K8S_VERSION=${K8S_VERSION:-v1.18.0@sha256:0e20578828edd939d25eb98496a685c76c98d54084932f76069f886ec315d694} export DOCKER_CLI_EXPERIMENTAL=enabled -KIND_CLUSTER_NAME="ingress-nginx-dev" +export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ingress-nginx-dev} echo "[dev-env] creating Kubernetes cluster with kind" @@ -78,15 +78,15 @@ kind create cluster \ echo "Kubernetes cluster:" kubectl get nodes -o wide -echo "[dev-env] building container" +echo "[dev-env] building image" export EXIT_CODE=-1 echo " -make -C ${DIR}/../../ build container -make -C ${DIR}/../../ e2e-test-image -make -C ${DIR}/../../images/fastcgi-helloserver/ GO111MODULE=\"on\" build container -make -C ${DIR}/../../images/echo/ container -make -C ${DIR}/../../images/httpbin/ container -make -C ${DIR}/../../images/cfssl/ container +make -C ${DIR}/../../ clean-image build image +make -C ${DIR}/../e2e-image image +make -C ${DIR}/../../images/fastcgi-helloserver/ GO111MODULE=\"on\" build image +make -C ${DIR}/../../images/httpbin/ image +make -C ${DIR}/../../images/echo/ image +make -C ${DIR}/../../images/cfssl/ image " | parallel --joblog /tmp/log {} || EXIT_CODE=$? if [ ${EXIT_CODE} -eq 0 ] || [ ${EXIT_CODE} -eq -1 ]; then @@ -96,14 +96,12 @@ then else echo "Image builds were not ok! Log:" cat /tmp/log - exit + exit 1 fi -docker tag ${REGISTRY}/nginx-ingress-controller-${ARCH}:${TAG} ${REGISTRY}/nginx-ingress-controller:${TAG} - # Preload images used in e2e tests -docker pull openresty/openresty:1.15.8.2-alpine docker pull moul/grpcbin +docker pull quay.io/kubernetes-ingress-controller/nginx:e3c49c52f4b74fe47ad65d6f3266a02e8b6b622f KIND_WORKERS=$(kind get nodes --name="${KIND_CLUSTER_NAME}" | grep worker | awk '{printf (NR>1?",":"") $1}') @@ -113,9 +111,9 @@ echo " kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} nginx-ingress-controller:e2e kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} ${REGISTRY}/nginx-ingress-controller:${TAG} kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} ${REGISTRY}/fastcgi-helloserver:${TAG} -kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} openresty/openresty:1.15.8.2-alpine kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} ${REGISTRY}/httpbin:${TAG} kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} ${REGISTRY}/echo:${TAG} +kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} quay.io/kubernetes-ingress-controller/nginx:e3c49c52f4b74fe47ad65d6f3266a02e8b6b622f kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} moul/grpcbin kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} ${REGISTRY}/cfssl:${TAG} " | parallel --joblog /tmp/log {} || EXIT_CODE=$? diff --git a/test/e2e/servicebackend/service_externalname.go b/test/e2e/servicebackend/service_externalname.go index bef790c15..d14c11dfc 100644 --- a/test/e2e/servicebackend/service_externalname.go +++ b/test/e2e/servicebackend/service_externalname.go @@ -86,7 +86,10 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { f.EnsureService(svc) - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, nil) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-vhost": "httpbin.org", + } + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -124,7 +127,10 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { } f.EnsureService(svc) - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, nil) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-vhost": "httpbin.org", + } + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -193,7 +199,10 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { } f.EnsureService(svc) - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, nil) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-vhost": "httpbin.org", + } + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, annotations) ing.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort = intstr.FromString(host) f.EnsureIngress(ing) @@ -232,7 +241,10 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { } f.EnsureService(svc) - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, nil) + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/upstream-vhost": "httpbin.org", + } + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBinService, 80, annotations) ing.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort = intstr.FromString(host) f.EnsureIngress(ing) diff --git a/test/e2e/settings/access_log.go b/test/e2e/settings/access_log.go new file mode 100644 index 000000000..0e4c1d827 --- /dev/null +++ b/test/e2e/settings/access_log.go @@ -0,0 +1,87 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "strings" + + "github.com/onsi/ginkgo" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.DescribeSetting("access-log", func() { + f := framework.NewDefaultFramework("access-log") + + ginkgo.Context("access-log-path", func() { + + ginkgo.It("use the default configuration", func() { + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "access_log /var/log/nginx/access.log upstreaminfo") && + strings.Contains(cfg, "access_log /var/log/nginx/access.log log_stream") + }) + }) + + ginkgo.It("use the specified configuration", func() { + f.UpdateNginxConfigMapData("access-log-path", "/tmp/access.log") + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "access_log /tmp/access.log upstreaminfo") && + strings.Contains(cfg, "access_log /tmp/access.log log_stream") + }) + }) + }) + + ginkgo.Context("http-access-log-path", func() { + + ginkgo.It("use the specified configuration", func() { + f.UpdateNginxConfigMapData("http-access-log-path", "/tmp/http-access.log") + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "access_log /tmp/http-access.log upstreaminfo") && + strings.Contains(cfg, "access_log /var/log/nginx/access.log log_stream") + }) + }) + }) + + ginkgo.Context("stream-access-log-path", func() { + + ginkgo.It("use the specified configuration", func() { + f.UpdateNginxConfigMapData("stream-access-log-path", "/tmp/stream-access.log") + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "access_log /tmp/stream-access.log log_stream") && + strings.Contains(cfg, "access_log /var/log/nginx/access.log upstreaminfo") + }) + }) + }) + + ginkgo.Context("http-access-log-path & stream-access-log-path", func() { + + ginkgo.It("use the specified configuration", func() { + f.SetNginxConfigMapData(map[string]string{ + "http-access-log-path": "/tmp/http-access.log", + "stream-access-log-path": "/tmp/stream-access.log", + }) + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, "access_log /tmp/http-access.log upstreaminfo") && + strings.Contains(cfg, "access_log /tmp/stream-access.log log_stream") + }) + }) + }) +}) diff --git a/test/e2e/settings/proxy_protocol.go b/test/e2e/settings/proxy_protocol.go index fb51f6553..22ab3bdd7 100644 --- a/test/e2e/settings/proxy_protocol.go +++ b/test/e2e/settings/proxy_protocol.go @@ -17,6 +17,7 @@ limitations under the License. package settings import ( + "crypto/tls" "fmt" "io/ioutil" "net" @@ -103,4 +104,47 @@ var _ = framework.DescribeSetting("use-proxy-protocol", func() { assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=https")) assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=192.168.0.1")) }) + + ginkgo.It("should enable PROXY Protocol for HTTPS", func() { + host := "proxy-protocol" + + f.UpdateNginxConfigMapData(setting, "true") + + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, framework.EchoService, 80, nil)) + tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + assert.Nil(ginkgo.GinkgoT(), err) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "443 proxy_protocol") + }) + + ip := f.GetNginxIP() + + conn, err := net.Dial("tcp", net.JoinHostPort(ip, "443")) + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error connecting to %v:443", ip) + defer conn.Close() + + _, err = fmt.Fprintf(conn, "PROXY TCP4 192.168.0.1 192.168.0.11 56324 1234\r\n") + assert.Nil(ginkgo.GinkgoT(), err, "writing proxy protocol") + + tlsConn := tls.Client(conn, tlsConfig) + defer tlsConn.Close() + + _, err = tlsConn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")) + assert.Nil(ginkgo.GinkgoT(), err, "writing HTTP request") + + data, err := ioutil.ReadAll(tlsConn) + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error reading connection data") + + body := string(data) + assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=%v", "proxy-protocol")) + assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=1234")) + assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=https")) + assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-scheme=https")) + assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=192.168.0.1")) + }) }) diff --git a/test/e2e/wait-for-nginx.sh b/test/e2e/wait-for-nginx.sh index c4dff637b..c47b5b4ea 100755 --- a/test/e2e/wait-for-nginx.sh +++ b/test/e2e/wait-for-nginx.sh @@ -81,8 +81,18 @@ controller: admissionWebhooks: enabled: false -defaultBackend: - enabled: false + # ulimit -c unlimited + # mkdir -p /tmp/coredump + # chmod a+rwx /tmp/coredump + # echo "/tmp/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern + extraVolumeMounts: + - name: coredump + mountPath: /tmp/coredump + + extraVolumes: + - name: coredump + hostPath: + path: /tmp/coredump rbac: create: true diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go index 25ff51589..c0a965923 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -37,8 +37,18 @@ type Manager interface { // restore the object later. GetPaths() map[string]string + // GetUnifiedPath returns the unified path when running in unified mode. + // The value corresponds to the all values of GetPaths() map. + // + // GetUnifiedPath returns error when running in hybrid mode as well as + // in legacy mode. + GetUnifiedPath() (string, error) + // Sets the cgroup as configured. Set(container *configs.Config) error + + // Gets the cgroup as configured. + GetCgroups() (*configs.Cgroup, error) } type NotFoundError struct { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index 60790f83b..dbcc58f5b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -20,8 +20,9 @@ import ( ) const ( - CgroupNamePrefix = "name=" - CgroupProcesses = "cgroup.procs" + CgroupNamePrefix = "name=" + CgroupProcesses = "cgroup.procs" + unifiedMountpoint = "/sys/fs/cgroup" ) var ( @@ -40,7 +41,7 @@ var HugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"} func IsCgroup2UnifiedMode() bool { isUnifiedOnce.Do(func() { var st syscall.Statfs_t - if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + if err := syscall.Statfs(unifiedMountpoint, &st); err != nil { panic("cannot statfs cgroup root") } isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC @@ -50,6 +51,9 @@ func IsCgroup2UnifiedMode() bool { // https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { + if IsCgroup2UnifiedMode() { + return unifiedMountpoint, nil + } mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem) return mnt, err } @@ -235,8 +239,8 @@ func GetCgroupMounts(all bool) ([]Mount, error) { return nil, err } m := Mount{ - Mountpoint: "/sys/fs/cgroup", - Root: "/sys/fs/cgroup", + Mountpoint: unifiedMountpoint, + Root: unifiedMountpoint, Subsystems: availableControllers, } return []Mount{m}, nil @@ -262,6 +266,21 @@ func GetCgroupMounts(all bool) ([]Mount, error) { // GetAllSubsystems returns all the cgroup subsystems supported by the kernel func GetAllSubsystems() ([]string, error) { + // /proc/cgroups is meaningless for v2 + // https://github.com/torvalds/linux/blob/v5.3/Documentation/admin-guide/cgroup-v2.rst#deprecated-v1-core-features + if IsCgroup2UnifiedMode() { + // "pseudo" controllers do not appear in /sys/fs/cgroup/cgroup.controllers. + // - devices: implemented in kernel 4.15 + // - freezer: implemented in kernel 5.2 + // We assume these are always available, as it is hard to detect availability. + pseudo := []string{"devices", "freezer"} + data, err := ioutil.ReadFile("/sys/fs/cgroup/cgroup.controllers") + if err != nil { + return nil, err + } + subsystems := append(pseudo, strings.Fields(string(data))...) + return subsystems, nil + } f, err := os.Open("/proc/cgroups") if err != nil { return nil, err diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 0e2e30175..204834883 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error { // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertSliceNumbers(*v, 0) + case *interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertInterfaceNumbers(v, 0) + default: return json.Unmarshal(data, v) } } +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) + } + return err +} + // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited func convertMapNumbers(m map[string]interface{}, depth int) error { diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 4cb0c122c..d759d912b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -286,8 +286,9 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance } // BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained. +// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() +// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in +// undetermined behavior. // The BackoffManager is supposed to be called in a single-threaded environment. type BackoffManager interface { Backoff() clock.Timer @@ -317,7 +318,7 @@ func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Du Steps: math.MaxInt32, Cap: maxBackoff, }, - backoffTimer: c.NewTimer(0), + backoffTimer: nil, initialBackoff: initBackoff, lastBackoffStart: c.Now(), backoffResetDuration: resetDuration, @@ -334,9 +335,14 @@ func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { return b.backoff.Step() } -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff. +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - b.backoffTimer.Reset(b.getNextBackoff()) + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } return b.backoffTimer } @@ -354,7 +360,7 @@ func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.C clock: c, duration: duration, jitter: jitter, - backoffTimer: c.NewTimer(0), + backoffTimer: nil, } } @@ -366,8 +372,15 @@ func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { return jitteredPeriod } +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - j.backoffTimer.Reset(j.getNextBackoff()) + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } return j.backoffTimer } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 5096f51d2..a9806384a 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -35,7 +35,7 @@ import ( var ( // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields // DEPRECATED will be replaced - ClusterDefaults = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")} + ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} // DefaultClientConfig represents the legacy behavior of this package for defaulting // DEPRECATED will be replace DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ @@ -43,6 +43,15 @@ var ( }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} ) +// getDefaultServer returns a default setting for DefaultClientConfig +// DEPRECATED +func getDefaultServer() string { + if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { + return server + } + return "http://localhost:8080" +} + // ClientConfig is used to make it easy to get an api server client type ClientConfig interface { // RawConfig returns the merged result of all overrides diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 608f75249..6390b4ef5 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -88,6 +88,9 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord) if err != nil { return err } + if cml.cm.Annotations == nil { + cml.cm.Annotations = make(map[string]string) + } cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{}) return err diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index 70225e734..4e323ba05 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -5,9 +5,9 @@ module k8s.io/cloud-provider go 1.13 require ( - k8s.io/api v0.18.2 - k8s.io/apimachinery v0.18.2 - k8s.io/client-go v0.18.2 + k8s.io/api v0.18.3 + k8s.io/apimachinery v0.18.3 + k8s.io/client-go v0.18.3 k8s.io/klog v1.0.0 k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 ) @@ -15,7 +15,7 @@ require ( replace ( golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13 golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13 - k8s.io/api => k8s.io/api v0.18.2 - k8s.io/apimachinery => k8s.io/apimachinery v0.18.2 - k8s.io/client-go => k8s.io/client-go v0.18.2 + k8s.io/api => k8s.io/api v0.18.3 + k8s.io/apimachinery => k8s.io/apimachinery v0.18.3 + k8s.io/client-go => k8s.io/client-go v0.18.3 ) diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index d37f515ac..8c479b5d9 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -167,16 +167,16 @@ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= +k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= diff --git a/vendor/k8s.io/code-generator/go.mod b/vendor/k8s.io/code-generator/go.mod index 823e5cbc3..71e2d47b0 100644 --- a/vendor/k8s.io/code-generator/go.mod +++ b/vendor/k8s.io/code-generator/go.mod @@ -18,7 +18,7 @@ require ( golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 // indirect k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 k8s.io/klog v1.0.0 - k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c + k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 // release-1.18 sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/vendor/k8s.io/code-generator/go.sum b/vendor/k8s.io/code-generator/go.sum index dbd44f79e..22863c266 100644 --- a/vendor/k8s.io/code-generator/go.sum +++ b/vendor/k8s.io/code-generator/go.sum @@ -101,8 +101,8 @@ k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= diff --git a/vendor/modules.txt b/vendor/modules.txt index 6e4de7250..e1e88ee45 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -225,7 +225,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/opencontainers/runc v1.0.0-rc9 +# github.com/opencontainers/runc v1.0.0-rc10 ## explicit github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/configs @@ -436,7 +436,7 @@ gopkg.in/inf.v0 gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.18.2 => k8s.io/api v0.18.2 +# k8s.io/api v0.18.3 => k8s.io/api v0.18.3 ## explicit k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -479,7 +479,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.18.2 => k8s.io/apiextensions-apiserver v0.18.2 +# k8s.io/apiextensions-apiserver v0.18.3 => k8s.io/apiextensions-apiserver v0.18.3 ## explicit k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -488,7 +488,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.18.2 => k8s.io/apimachinery v0.18.2 +# k8s.io/apimachinery v0.18.3 => k8s.io/apimachinery v0.18.3 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -545,7 +545,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.18.2 => k8s.io/apiserver v0.18.2 +# k8s.io/apiserver v0.18.3 => k8s.io/apiserver v0.18.3 ## explicit k8s.io/apiserver/pkg/apis/audit k8s.io/apiserver/pkg/authentication/user @@ -555,7 +555,7 @@ k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/server/healthz k8s.io/apiserver/pkg/server/httplog k8s.io/apiserver/pkg/util/feature -# k8s.io/cli-runtime v0.18.2 => k8s.io/cli-runtime v0.18.2 +# k8s.io/cli-runtime v0.18.3 => k8s.io/cli-runtime v0.18.3 ## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/kustomize @@ -569,7 +569,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.18.2 => k8s.io/client-go v0.18.2 +# k8s.io/client-go v0.18.3 => k8s.io/client-go v0.18.3 ## explicit k8s.io/client-go/discovery k8s.io/client-go/discovery/cached/disk @@ -790,9 +790,9 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.18.2 => k8s.io/cloud-provider v0.18.2 +# k8s.io/cloud-provider v0.18.3 => k8s.io/cloud-provider v0.18.3 k8s.io/cloud-provider -# k8s.io/code-generator v0.18.2 => k8s.io/code-generator v0.18.2 +# k8s.io/code-generator v0.18.3 => k8s.io/code-generator v0.18.3 ## explicit k8s.io/code-generator k8s.io/code-generator/cmd/client-gen @@ -827,14 +827,14 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.18.2 => k8s.io/component-base v0.18.2 +# k8s.io/component-base v0.18.3 => k8s.io/component-base v0.18.3 ## explicit k8s.io/component-base/featuregate k8s.io/component-base/logs k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/version -# k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.18.2 +# k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.18.3 k8s.io/cri-api/pkg/apis/runtime/v1alpha2 # k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 k8s.io/gengo/args @@ -850,14 +850,14 @@ k8s.io/gengo/types # k8s.io/klog v1.0.0 ## explicit k8s.io/klog -# k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c +# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/generators k8s.io/kube-openapi/pkg/generators/rules k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets -# k8s.io/kubernetes v1.18.2 +# k8s.io/kubernetes v1.18.3 ## explicit k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/v1/pod @@ -890,7 +890,7 @@ k8s.io/utils/trace # pault.ag/go/sniff v0.0.0-20200207005214-cf7e4d167732 ## explicit pault.ag/go/sniff/parser -# sigs.k8s.io/controller-runtime v0.5.1-0.20200327213554-2d4c4877f906 +# sigs.k8s.io/controller-runtime v0.6.0 ## explicit sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil @@ -928,24 +928,24 @@ sigs.k8s.io/kustomize/pkg/types sigs.k8s.io/structured-merge-diff/v3/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml -# k8s.io/api => k8s.io/api v0.18.2 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.2 -# k8s.io/apimachinery => k8s.io/apimachinery v0.18.2 -# k8s.io/apiserver => k8s.io/apiserver v0.18.2 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.2 -# k8s.io/client-go => k8s.io/client-go v0.18.2 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.2 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.2 -# k8s.io/code-generator => k8s.io/code-generator v0.18.2 -# k8s.io/component-base => k8s.io/component-base v0.18.2 -# k8s.io/cri-api => k8s.io/cri-api v0.18.2 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.2 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.2 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.2 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.2 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.2 -# k8s.io/kubectl => k8s.io/kubectl v0.18.2 -# k8s.io/kubelet => k8s.io/kubelet v0.18.2 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.2 -# k8s.io/metrics => k8s.io/metrics v0.18.2 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.2 +# k8s.io/api => k8s.io/api v0.18.3 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.3 +# k8s.io/apimachinery => k8s.io/apimachinery v0.18.3 +# k8s.io/apiserver => k8s.io/apiserver v0.18.3 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.3 +# k8s.io/client-go => k8s.io/client-go v0.18.3 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.3 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.3 +# k8s.io/code-generator => k8s.io/code-generator v0.18.3 +# k8s.io/component-base => k8s.io/component-base v0.18.3 +# k8s.io/cri-api => k8s.io/cri-api v0.18.3 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.3 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.3 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.3 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.3 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.3 +# k8s.io/kubectl => k8s.io/kubectl v0.18.3 +# k8s.io/kubelet => k8s.io/kubelet v0.18.3 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.3 +# k8s.io/metrics => k8s.io/metrics v0.18.3 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.3 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index 7719417a7..0a31419ef 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -133,11 +133,8 @@ func loadConfig(context string) (*rest.Config, error) { } loadingRules.Precedence = append(loadingRules.Precedence, path.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) } - if c, err := loadConfigWithContext(apiServerURL, loadingRules, context); err == nil { - return c, nil - } - return nil, fmt.Errorf("could not locate a kubeconfig") + return loadConfigWithContext(apiServerURL, loadingRules, context) } func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go new file mode 100644 index 000000000..ced0548b1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -0,0 +1,95 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Create implements client.Client +func (c *dryRunClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client +func (c *dryRunClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client +func (c *dryRunClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client +func (c *dryRunClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + return c.client.Get(ctx, key, obj) +} + +// List implements client.Client +func (c *dryRunClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient +func (c *dryRunClient) Status() StatusWriter { + return &dryRunStatusWriter{client: c.client.Status()} +} + +// ensure dryRunStatusWriter implements client.StatusWriter +var _ StatusWriter = &dryRunStatusWriter{} + +// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// enforced. +type dryRunStatusWriter struct { + client StatusWriter +} + +// Update implements client.StatusWriter +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.StatusWriter +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index c3c4a52e9..131bdc2a0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -71,28 +71,43 @@ var DryRunAll = dryRunAll{} type dryRunAll struct{} +// ApplyToCreate applies this configuration to the given create options. func (dryRunAll) ApplyToCreate(opts *CreateOptions) { opts.DryRun = []string{metav1.DryRunAll} } + +// ApplyToUpdate applies this configuration to the given update options. func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { opts.DryRun = []string{metav1.DryRunAll} } + +// ApplyToPatch applies this configuration to the given patch options. func (dryRunAll) ApplyToPatch(opts *PatchOptions) { opts.DryRun = []string{metav1.DryRunAll} } + +// ApplyToPatch applies this configuration to the given delete options. func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { opts.DryRun = []string{metav1.DryRunAll} } +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} // FieldOwner set the field manager name for the given server-side apply patch. type FieldOwner string +// ApplyToPatch applies this configuration to the given patch options. func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { opts.FieldManager = string(f) } + +// ApplyToCreate applies this configuration to the given create options. func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { opts.FieldManager = string(f) } + +// ApplyToUpdate applies this configuration to the given update options. func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { opts.FieldManager = string(f) } @@ -252,33 +267,49 @@ func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { // to the given number of seconds. type GracePeriodSeconds int64 +// ApplyToDelete applies this configuration to the given delete options. func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { secs := int64(s) opts.GracePeriodSeconds = &secs } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { s.ApplyToDelete(&opts.DeleteOptions) } +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. type Preconditions metav1.Preconditions +// ApplyToDelete applies this configuration to the given delete options. func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { preconds := metav1.Preconditions(p) opts.Preconditions = &preconds } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { p.ApplyToDelete(&opts.DeleteOptions) } +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. type PropagationPolicy metav1.DeletionPropagation +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { policy := metav1.DeletionPropagation(p) opts.PropagationPolicy = &policy } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { p.ApplyToDelete(&opts.DeleteOptions) } @@ -379,12 +410,14 @@ func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { // MatchingLabels filters the list/delete operation on the given set of labels. type MatchingLabels map[string]string +// ApplyToList applies this configuration to the given list options. func (m MatchingLabels) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid reserializing this over and over? - sel := labels.SelectorFromSet(map[string]string(m)) + sel := labels.SelectorFromValidatedSet(map[string]string(m)) opts.LabelSelector = sel } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { m.ApplyToList(&opts.ListOptions) } @@ -393,6 +426,7 @@ func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { // without checking their values. type HasLabels []string +// ApplyToList applies this configuration to the given list options. func (m HasLabels) ApplyToList(opts *ListOptions) { sel := labels.NewSelector() for _, label := range m { @@ -404,6 +438,7 @@ func (m HasLabels) ApplyToList(opts *ListOptions) { opts.LabelSelector = sel } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { m.ApplyToList(&opts.ListOptions) } @@ -415,10 +450,12 @@ type MatchingLabelsSelector struct { labels.Selector } +// ApplyToList applies this configuration to the given list options. func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { opts.LabelSelector = m } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { m.ApplyToList(&opts.ListOptions) } @@ -435,12 +472,14 @@ func MatchingField(name, val string) MatchingFields { // (or index in the case of cached lists). type MatchingFields fields.Set +// ApplyToList applies this configuration to the given list options. func (m MatchingFields) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid re-serializing this? sel := fields.Set(m).AsSelector() opts.FieldSelector = sel } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { m.ApplyToList(&opts.ListOptions) } @@ -452,10 +491,12 @@ type MatchingFieldsSelector struct { fields.Selector } +// ApplyToList applies this configuration to the given list options. func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { opts.FieldSelector = m } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { m.ApplyToList(&opts.ListOptions) } @@ -463,10 +504,12 @@ func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { // InNamespace restricts the list/delete operation to the given namespace. type InNamespace string +// ApplyToList applies this configuration to the given list options. func (n InNamespace) ApplyToList(opts *ListOptions) { opts.Namespace = string(n) } +// ApplyToDeleteAllOf applies this configuration to the given an List options. func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { n.ApplyToList(&opts.ListOptions) } @@ -476,6 +519,7 @@ func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { // does not support setting it for deletecollection operations. type Limit int64 +// ApplyToList applies this configuration to the given an list options. func (l Limit) ApplyToList(opts *ListOptions) { opts.Limit = int64(l) } @@ -485,6 +529,7 @@ func (l Limit) ApplyToList(opts *ListOptions) { // does not support setting it for deletecollection operations. type Continue string +// ApplyToList applies this configuration to the given an List options. func (c Continue) ApplyToList(opts *ListOptions) { opts.Continue = string(c) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go index 0e9c451e2..04dc5fc2d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -278,10 +278,16 @@ func renderCRDs(options *CRDInstallOptions) ([]runtime.Object, error) { var ( err error info os.FileInfo - crds []*unstructured.Unstructured files []os.FileInfo ) + type GVKN struct { + GVK schema.GroupVersionKind + Name string + } + + crds := map[GVKN]*unstructured.Unstructured{} + for _, path := range options.Paths { var filePath = path @@ -294,7 +300,7 @@ func renderCRDs(options *CRDInstallOptions) ([]runtime.Object, error) { } if !info.IsDir() { - filePath, files = filepath.Dir(path), append(files, info) + filePath, files = filepath.Dir(path), []os.FileInfo{info} } else { if files, err = ioutil.ReadDir(path); err != nil { return nil, err @@ -307,14 +313,23 @@ func renderCRDs(options *CRDInstallOptions) ([]runtime.Object, error) { return nil, err } - // If CRD already in the list, skip it. - if existsUnstructured(crds, crdList) { - continue + for i, crd := range crdList { + gvkn := GVKN{GVK: crd.GroupVersionKind(), Name: crd.GetName()} + if _, found := crds[gvkn]; found { + // Currently, we only print a log when there are duplicates. We may want to error out if that makes more sense. + log.Info("there are more than one CRD definitions with the same ", "GVKN", gvkn) + } + // We always use the CRD definition that we found last. + crds[gvkn] = crdList[i] } - crds = append(crds, crdList...) } - return unstructuredCRDListToRuntime(crds), nil + // Converting map to a list to return + var res []runtime.Object + for _, obj := range crds { + res = append(res, obj) + } + return res, nil } // readCRDs reads the CRDs from files and Unmarshals them into structs diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go index b8401b111..00dfa2464 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go @@ -1,8 +1,6 @@ package envtest import ( - "reflect" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -57,18 +55,6 @@ func mergeCRDs(s1, s2 []runtime.Object) []runtime.Object { return merged } -// existsUnstructured verify if a any item is common between two lists. -func existsUnstructured(s1, s2 []*unstructured.Unstructured) bool { - for _, s1obj := range s1 { - for _, s2obj := range s2 { - if reflect.DeepEqual(s1obj, s2obj) { - return true - } - } - } - return false -} - func runtimeCRDListToUnstructured(l []runtime.Object) []*unstructured.Unstructured { res := []*unstructured.Unstructured{} for _, obj := range l { @@ -81,11 +67,3 @@ func runtimeCRDListToUnstructured(l []runtime.Object) []*unstructured.Unstructur } return res } - -func unstructuredCRDListToRuntime(l []*unstructured.Unstructured) []runtime.Object { - res := []runtime.Object{} - for _, obj := range l { - res = append(res, obj.DeepCopy()) - } - return res -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go index 26ba8a94c..dd6774d0c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -376,8 +376,8 @@ func readWebhooks(path string) ([]runtime.Object, []runtime.Object, error) { } const ( - admissionregv1 = "admissionregistration.k8s.io/v1beta1" - admissionregv1beta1 = "admissionregistration.k8s.io/v1" + admissionregv1 = "admissionregistration.k8s.io/v1" + admissionregv1beta1 = "admissionregistration.k8s.io/v1beta1" ) switch { case generic.Kind == "MutatingWebhookConfiguration": diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/apiserver.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/apiserver.go index 0282b23b4..5c0435fa1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/apiserver.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/apiserver.go @@ -1,19 +1,23 @@ package internal +// APIServerDefaultArgs allow tests to run offline, by preventing API server from attempting to +// use default route to determine its --advertise-address. var APIServerDefaultArgs = []string{ - // Allow tests to run offline, by preventing API server from attempting to - // use default route to determine its --advertise-address "--advertise-address=127.0.0.1", "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", "--cert-dir={{ .CertDir }}", "--insecure-port={{ if .URL }}{{ .URL.Port }}{{ end }}", "--insecure-bind-address={{ if .URL }}{{ .URL.Hostname }}{{ end }}", "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", - "--disable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,PersistentVolumeClaimResize,ResourceQuota", //nolint + // we're keeping this disabled because if enabled, default SA is missing which would force all tests to create one + // in normal apiserver operation this SA is created by controller, but that is not run in integration environment + "--disable-admission-plugins=ServiceAccount", "--service-cluster-ip-range=10.0.0.0/24", "--allow-privileged=true", } +// DoAPIServerArgDefaulting will set default values to allow tests to run offline when the args are not informed. Otherwise, +// it will return the same []string arg passed as param. func DoAPIServerArgDefaulting(args []string) []string { if len(args) != 0 { return args diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/arguments.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/arguments.go index 00fe7935a..573295d90 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/arguments.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/arguments.go @@ -5,6 +5,7 @@ import ( "html/template" ) +// RenderTemplates returns an []string to render the templates func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { var t *template.Template diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/etcd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/etcd.go index 5a4511747..2d108a3e8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/etcd.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/etcd.go @@ -4,6 +4,8 @@ import ( "net/url" ) +// EtcdDefaultArgs allow tests to run offline, by preventing API server from attempting to +// use default route to determine its urls. var EtcdDefaultArgs = []string{ "--listen-peer-urls=http://localhost:0", "--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", @@ -11,6 +13,8 @@ var EtcdDefaultArgs = []string{ "--data-dir={{ .DataDir }}", } +// DoEtcdArgDefaulting will set default values to allow tests to run offline when the args are not informed. Otherwise, +// it will return the same []string arg passed as param. func DoEtcdArgDefaulting(args []string) []string { if len(args) != 0 { return args @@ -19,6 +23,7 @@ func DoEtcdArgDefaulting(args []string) []string { return EtcdDefaultArgs } +// isSecureScheme returns false when the schema is insecure. func isSecureScheme(scheme string) bool { // https://github.com/coreos/etcd/blob/d9deeff49a080a88c982d328ad9d33f26d1ad7b6/pkg/transport/listener.go#L53 if scheme == "https" || scheme == "unixs" { @@ -27,6 +32,8 @@ func isSecureScheme(scheme string) bool { return false } +// GetEtcdStartMessage returns an start message to inform if the client is or not insecure. +// It will return true when the URL informed has the scheme == "https" || scheme == "unixs" func GetEtcdStartMessage(listenURL url.URL) string { if isSecureScheme(listenURL.Scheme) { // https://github.com/coreos/etcd/blob/a7f1fbe00ec216fcb3a1919397a103b41dca8413/embed/serve.go#L167 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/process.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/process.go index 8ecf1afe5..9651029f0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/process.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/process.go @@ -19,6 +19,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/addr" ) +// ProcessState define the state of the process. type ProcessState struct { DefaultedProcessInput Session *gexec.Session @@ -46,6 +47,7 @@ type ProcessState struct { ready bool } +// DefaultedProcessInput defines the default process input required to perform the test. type DefaultedProcessInput struct { URL url.URL Dir string @@ -55,6 +57,8 @@ type DefaultedProcessInput struct { StartTimeout time.Duration } +// DoDefaulting sets the default configuration according to the data informed and return an DefaultedProcessInput +// and an error if some requirement was not informed. func DoDefaulting( name string, listenURL *url.URL, @@ -112,6 +116,8 @@ func DoDefaulting( type stopChannel chan struct{} +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. func (ps *ProcessState) Start(stdout, stderr io.Writer) (err error) { if ps.ready { return nil @@ -187,6 +193,8 @@ func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh } } +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. func (ps *ProcessState) Stop() error { if ps.Session == nil { return nil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/tinyca.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/tinyca.go index ca877ca9e..034a659bc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/tinyca.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/tinyca.go @@ -73,6 +73,8 @@ func newPrivateKey() (crypto.Signer, error) { return rsa.GenerateKey(crand.Reader, rsaKeySize) } +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! func NewTinyCA() (*TinyCA, error) { caPrivateKey, err := newPrivateKey() if err != nil {