diff --git a/.gcloudignore b/.gcloudignore new file mode 100644 index 000000000..e69de29bb diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 4d443f8d5..150ec29ae 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -42,7 +42,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 id: filter @@ -68,7 +68,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Run Gosec Security Scanner uses: securego/gosec@c5ea1b7bdd9efc3792e513258853552b0ae31e06 # v2.16.0 @@ -85,7 +85,7 @@ jobs: (needs.changes.outputs.go == 'true') steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Set up Go id: go @@ -104,7 +104,7 @@ jobs: (needs.changes.outputs.go == 'true') steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Set up Go id: go @@ -123,7 +123,7 @@ jobs: (needs.changes.outputs.go == 'true') steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Set up Go id: go @@ -144,7 +144,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Set up Go id: go @@ -154,11 +154,11 @@ jobs: check-latest: true - name: Set up QEMU - uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 + uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0 - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0 + uses: docker/setup-buildx-action@16c0bc4a6e6ada2cfd8afd41d22d95379cf7c32a # v2.8.0 with: version: latest @@ -211,7 +211,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Setup Go uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 @@ -286,7 +286,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: cache uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 @@ -336,7 +336,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: cache uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 @@ -380,7 +380,7 @@ jobs: PLATFORMS: linux/amd64,linux/arm64 steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 id: filter-images @@ -453,7 +453,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 id: filter-images diff --git a/.github/workflows/depreview.yaml b/.github/workflows/depreview.yaml index 4f04bdaed..625c2f461 100644 --- a/.github/workflows/depreview.yaml +++ b/.github/workflows/depreview.yaml @@ -9,6 +9,6 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: 'Dependency Review' uses: actions/dependency-review-action@1360a344ccb0ab6e9475edef90ad2f46bf8003b1 # v3.0.6 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 1a2ceaa83..f7aee6610 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 id: filter @@ -47,7 +47,7 @@ jobs: steps: - name: Checkout master - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Deploy uses: ./.github/actions/mkdocs diff --git a/.github/workflows/helm.yaml b/.github/workflows/helm.yaml index 2a82fa124..6303b6a27 100644 --- a/.github/workflows/helm.yaml +++ b/.github/workflows/helm.yaml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Run Artifact Hub lint run: | @@ -61,7 +61,7 @@ jobs: steps: - name: Checkout master - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: # Fetch entire history. Required for chart-releaser; see https://github.com/helm/chart-releaser-action/issues/13#issuecomment-602063896 fetch-depth: 0 diff --git a/.github/workflows/perftest.yaml b/.github/workflows/perftest.yaml index 9e87bf1b2..36f1f1ede 100644 --- a/.github/workflows/perftest.yaml +++ b/.github/workflows/perftest.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Install K6 run: | diff --git a/.github/workflows/plugin.yaml b/.github/workflows/plugin.yaml index c5c6fc2b1..d8769f439 100644 --- a/.github/workflows/plugin.yaml +++ b/.github/workflows/plugin.yaml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: fetch-depth: 0 @@ -28,7 +28,7 @@ jobs: check-latest: true - name: Run GoReleaser - uses: goreleaser/goreleaser-action@f82d6c1c344bcacabba2c841718984797f664a6b # v4.2.0 + uses: goreleaser/goreleaser-action@336e29918d653399e599bfca99fadc1d7ffbc9f7 # v4.3.0 with: version: latest args: release --rm-dist diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 72acf608a..2e276a3f6 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -27,12 +27,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@80e868c13c90f172d68d1f4501dee99e2479f7af # v2.1.3 + uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # v2.2.0 with: results_file: results.sarif results_format: sarif diff --git a/.github/workflows/vulnerability-scans.yaml b/.github/workflows/vulnerability-scans.yaml index 069c9f974..af7d8bda1 100644 --- a/.github/workflows/vulnerability-scans.yaml +++ b/.github/workflows/vulnerability-scans.yaml @@ -22,7 +22,7 @@ jobs: versions: ${{ steps.version.outputs.TAGS }} steps: - name: Checkout code - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: fetch-depth: 0 @@ -52,7 +52,7 @@ jobs: versions: ${{ fromJSON(needs.version.outputs.versions) }} steps: - name: Checkout code - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - shell: bash id: test @@ -60,7 +60,7 @@ jobs: - name: Scan image with AquaSec/Trivy id: scan - uses: aquasecurity/trivy-action@e5f43133f6e8736992c9f3c1b3296e24b37e17f2 # v0.10.0 + uses: aquasecurity/trivy-action@41f05d9ecffa2ed3f1580af306000f734b733e54 # v0.11.2 with: image-ref: registry.k8s.io/ingress-nginx/controller:${{ matrix.versions }} format: 'sarif' diff --git a/Makefile b/Makefile index fc40a39bb..7b413141a 100644 --- a/Makefile +++ b/Makefile @@ -262,3 +262,8 @@ release: ensure-buildx clean --build-arg COMMIT_SHA="$(COMMIT_SHA)" \ --build-arg BUILD_ID="$(BUILD_ID)" \ -t $(REGISTRY)/controller-chroot:$(TAG) rootfs -f rootfs/Dockerfile-chroot + +.PHONY: build-docs +build-docs: + pip install -U mkdocs-material==6.2.4 mkdocs-awesome-pages-plugin mkdocs-minify-plugin mkdocs-redirects + mkdocs build --config-file mkdocs.yml diff --git a/NGINX_BASE b/NGINX_BASE index 02a35039a..14a7ee54b 100644 --- a/NGINX_BASE +++ b/NGINX_BASE @@ -1 +1 @@ -registry.k8s.io/ingress-nginx/nginx:v20230527@sha256:cf77c71aa6e4284925ca2233ddf871b5823eaa3ee000347ae25096b07fb52c57 +registry.k8s.io/ingress-nginx/nginx:v20230623-427f3d2fb@sha256:7b479f66872c0b1cb0f1315e305b8a3e9c6da846c7dd3855db99bc8cfd6791e1 diff --git a/README.md b/README.md index dc7ff3c38..812ddacb6 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ the versions listed. Ingress-Nginx versions may work on older versions but the p | | Ingress-NGINX version | k8s supported version | Alpine Version | Nginx Version | Helm Chart Version | |:--:|-----------------------|------------------------------|----------------|---------------|--------------------| +| 🔄 | **v1.8.1** | 1.27,1.26, 1.25, 1.24 | 3.18.2 | 1.21.6 | 4.7.* | | 🔄 | **v1.8.0** | 1.27,1.26, 1.25, 1.24 | 3.18.0 | 1.21.6 | 4.7.* | | 🔄 | **v1.7.1** | 1.27,1.26, 1.25, 1.24 | 3.17.2 | 1.21.6 | 4.6.* | | 🔄 | **v1.7.0** | 1.26, 1.25, 1.24 | 3.17.2 | 1.21.6 | 4.6.* | @@ -77,15 +78,15 @@ Thanks for taking the time to join our community and start contributing! - Read [`CONTRIBUTING.md`](CONTRIBUTING.md) for information about setting up your environment, the workflow that we expect, and instructions on the developer certificate of origin that we require. - Join our Kubernetes Slack channel for developer discussion : [#ingress-nginx-dev](https://kubernetes.slack.com/archives/C021E147ZA4). - - Submit GitHub issues for any feature enhancements, bugs or documentation problems. + - Submit GitHub issues for any feature enhancements, bugs or documentation problems. - Please make sure to read the [Issue Reporting Checklist](https://github.com/kubernetes/ingress-nginx/blob/main/CONTRIBUTING.md#issue-reporting-guidelines) before opening an issue. Issues not conforming to the guidelines **may be closed immediately**. - Join our [ingress-nginx-dev mailing list](https://groups.google.com/a/kubernetes.io/g/ingress-nginx-dev/c/ebbBMo-zX-w) -- **Support**: +- **Support**: - Join the [#ingress-nginx-users](https://kubernetes.slack.com/messages/CANQGM8BA/) channel inside the [Kubernetes Slack](http://slack.kubernetes.io/) to ask questions or get support from the maintainers and other users. - The [GitHub issues](https://github.com/kubernetes/ingress-nginx/issues) in the repository are **exclusively** for bug reports and feature requests. - **Discuss**: Tweet using the `#IngressNginx` hashtag or sharing with us [@IngressNginx](https://twitter.com/IngressNGINX). ## License -[Apache License 2.0](https://github.com/kubernetes/ingress-nginx/blob/main/LICENSE) \ No newline at end of file +[Apache License 2.0](https://github.com/kubernetes/ingress-nginx/blob/main/LICENSE) diff --git a/TAG b/TAG index ba103d909..804a616da 100644 --- a/TAG +++ b/TAG @@ -1,2 +1 @@ v1.8.0 - diff --git a/build/run-in-docker.sh b/build/run-in-docker.sh index d2d6fcdd9..86e0e443f 100755 --- a/build/run-in-docker.sh +++ b/build/run-in-docker.sh @@ -44,7 +44,7 @@ function cleanup { } trap cleanup EXIT -E2E_IMAGE=${E2E_IMAGE:-registry.k8s.io/ingress-nginx/e2e-test-runner:v20230527@sha256:a98ce8ab90f16bdd8539b168a4d000f366afa4eec23a220b3ce39698c5769bfd} +E2E_IMAGE=${E2E_IMAGE:-registry.k8s.io/ingress-nginx/e2e-test-runner:v20230623-d50c7193b@sha256:e5c68dc56934c273850bfb75c0348a2819756669baf59fcdce9e16771537b247} if [[ "$RUNTIME" == podman ]]; then # Podman does not support both tag and digest diff --git a/changelog/Changelog-1.8.1.md b/changelog/Changelog-1.8.1.md new file mode 100644 index 000000000..c84a7ce18 --- /dev/null +++ b/changelog/Changelog-1.8.1.md @@ -0,0 +1,67 @@ +# Changelog + +### 1.8.1 +Images: + + * registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd + * registry.k8s.io/ingress-nginx/controller-chroot:v1.8.1@sha256:e0d4121e3c5e39de9122e55e331a32d5ebf8d4d257227cb93ab54a1b912a7627 + +### All Changes: + +* netlify: Only trigger preview when there are changes in docs. (#10144) +* changed to updated baseimage and reverted tag (#10143) +* Fix loadBalancerClass value (#10139) +* Added a doc line to the missing helm value service.internal.loadBalancerIP (#9406) +* Set grpc :authority header from request header (#8912) +* bump pinned golang to 1.20.5 (#10127) +* update test runner (#10125) +* chore: remove echo from snippet tests (#10110) +* Update typo in docs for lb scheme (#10117) +* golang 1.20.5 bump (#10120) +* feat(helm): Add loadBalancerClass (#9562) +* chore: remove echo friom canary tests (#10089) +* fix: obsolete warnings (#10029) +* docs: change Dockefile url ref main (#10087) +* Revert "Remove fastcgi feature" (#10081) +* docs: add netlify configuration (#10073) +* add distroless otel init (#10035) +* chore: move httpbun to be part of framework (#9955) +* Remove fastcgi feature (#9864) +* Fix mirror-target values without path separator and port (#9889) +* Adding feature to upgrade Oracle Cloud Infrastructure's Flexible Load Balancer and adjusting Health Check that were critical in the previous configuration (#9961) +* add support for keda fallback settings (#9993) +* unnecessary use of fmt.Sprint (S1039) (#10049) +* chore: pkg imported more than once (#10048) +* tracing: upgrade to dd-opentracing-cpp v1.3.7 (#10031) +* fix: add canary to sidebar in examples (#10068) +* docs: add lua testing documentation (#10060) +* docs: canary weighted deployments example (#10067) +* Update Internal Load Balancer docs (#10062) +* fix broken kubernetes.io/user-guide/ docs links (#10055) +* docs: Updated the content of deploy/rbac.md (#10054) +* ensured hpa mem spec before cpu spec (#10043) +* Fix typo in controller_test (#10034) +* chore(dep): upgrade github.com/emicklei/go-restful/v3 to 3.10 (#10028) +* Upgrade to Golang 1.20.4 (#10016) +* perf: avoid unnecessary byte/string conversion (#10012) +* added note on dns for localtesting (#10021) +* added helmshowvalues example (#10019) +* release controller 1.8.0 and chart 4.7.0 (#10017) + +### Dependencies updates: +* Bump ossf/scorecard-action from 2.1.3 to 2.2.0 (#10133) +* Bump google.golang.org/grpc from 1.56.0 to 1.56.1 (#10134) +* Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 (#10106) +* Bump golang.org/x/crypto from 0.9.0 to 0.10.0 (#10105) +* Bump google.golang.org/grpc from 1.55.0 to 1.56.0 (#10103) +* Bump goreleaser/goreleaser-action from 4.2.0 to 4.3.0 (#10101) +* Bump docker/setup-buildx-action from 2.6.0 to 2.7.0 (#10102) +* Bump actions/checkout from 3.5.2 to 3.5.3 (#10076) +* Bump docker/setup-qemu-action from 2.1.0 to 2.2.0 (#10075) +* Bump aquasecurity/trivy-action from 0.10.0 to 0.11.2 (#10078) +* Bump docker/setup-buildx-action from 2.5.0 to 2.6.0 (#10077) +* Bump actions/dependency-review-action from 3.0.4 to 3.0.6 (#10042) +* Bump github.com/stretchr/testify from 1.8.3 to 1.8.4 (#10041) +* Bump github.com/stretchr/testify from 1.8.2 to 1.8.3 (#10005) + +**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/controller-controller-v1.8.0...controller-controller-v1.8.1 diff --git a/charts/ingress-nginx/Chart.yaml b/charts/ingress-nginx/Chart.yaml index 9786f705c..de5fe4df0 100644 --- a/charts/ingress-nginx/Chart.yaml +++ b/charts/ingress-nginx/Chart.yaml @@ -1,11 +1,12 @@ annotations: artifacthub.io/changes: | - - "helm: Fix opentelemetry module installation for daemonset (#9792)" - - "Update charts/* to keep project name display aligned (#9931)" - - "Update Ingress-Nginx version controller-v1.8.0" + - "Added a doc line to the missing helm value service.internal.loadBalancerIP (#9406)" + - "feat(helm): Add loadBalancerClass (#9562)" + - "added helmshowvalues example (#10019)" + - "Update Ingress-Nginx version controller-v1.8.1" artifacthub.io/prerelease: "false" apiVersion: v2 -appVersion: 1.8.0 +appVersion: 1.8.1 description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer engine: gotpl @@ -22,4 +23,4 @@ maintainers: name: ingress-nginx sources: - https://github.com/kubernetes/ingress-nginx -version: 4.7.0 +version: 4.7.1 diff --git a/charts/ingress-nginx/README.md b/charts/ingress-nginx/README.md index 4a00815fd..955091873 100644 --- a/charts/ingress-nginx/README.md +++ b/charts/ingress-nginx/README.md @@ -2,7 +2,7 @@ [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer -![Version: 4.7.0](https://img.shields.io/badge/Version-4.7.0-informational?style=flat-square) ![AppVersion: 1.8.0](https://img.shields.io/badge/AppVersion-1.8.0-informational?style=flat-square) +![Version: 4.7.1](https://img.shields.io/badge/Version-4.7.1-informational?style=flat-square) ![AppVersion: 1.8.1](https://img.shields.io/badge/AppVersion-1.8.1-informational?style=flat-square) To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. @@ -143,8 +143,10 @@ controller: internal: enabled: true annotations: - # Create internal ELB - service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Create internal NLB + service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" + # Create internal ELB(Deprecated) + # service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Any other annotation can be declared here. ``` @@ -187,6 +189,8 @@ controller: # Any other annotation can be declared here. ``` +The load balancer annotations of more cloud service providers can be found: [Internal load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer). + An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. @@ -309,13 +313,13 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu | controller.hostname | object | `{}` | Optionally customize the pod hostname. | | controller.image.allowPrivilegeEscalation | bool | `true` | | | controller.image.chroot | bool | `false` | | -| controller.image.digest | string | `"sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3"` | | -| controller.image.digestChroot | string | `"sha256:a45e41cd2b7670adf829759878f512d4208d0aec1869dae593a0fecd09a5e49e"` | | +| controller.image.digest | string | `"sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd"` | | +| controller.image.digestChroot | string | `"sha256:e0d4121e3c5e39de9122e55e331a32d5ebf8d4d257227cb93ab54a1b912a7627"` | | | controller.image.image | string | `"ingress-nginx/controller"` | | | controller.image.pullPolicy | string | `"IfNotPresent"` | | | controller.image.registry | string | `"registry.k8s.io"` | | | controller.image.runAsUser | int | `101` | | -| controller.image.tag | string | `"v1.8.0"` | | +| controller.image.tag | string | `"v1.8.1"` | | | controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | | controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | | controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | @@ -352,7 +356,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu | controller.metrics.prometheusRule.enabled | bool | `false` | | | controller.metrics.prometheusRule.rules | list | `[]` | | | controller.metrics.service.annotations | object | `{}` | | -| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips # | | controller.metrics.service.labels | object | `{}` | Labels to be added to the metrics service resource | | controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | | controller.metrics.service.servicePort | int | `10254` | | @@ -368,7 +372,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu | controller.minAvailable | int | `1` | Minimum available pods set in PodDisruptionBudget. Define either 'minAvailable' or 'maxUnavailable', never both. | | controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | | controller.name | string | `"controller"` | | -| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ # | | controller.opentelemetry.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | | controller.opentelemetry.enabled | bool | `false` | | | controller.opentelemetry.image | string | `"registry.k8s.io/ingress-nginx/opentelemetry:v20230527@sha256:fd7ec835f31b7b37187238eb4fdad4438806e69f413a203796263131f4f02ed0"` | | @@ -401,15 +405,17 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu | controller.service.enableHttps | bool | `true` | | | controller.service.enabled | bool | `true` | | | controller.service.external.enabled | bool | `true` | | -| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips # | | controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | | controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting internal LoadBalancer to a pre-existing static IP. Make sure to add to the service the needed annotation to specify the subnet which the static IP belongs to. For instance, `networking.gke.io/internal-load-balancer-subnet` for GCP and `service.beta.kubernetes.io/aws-load-balancer-subnets` for AWS. | | controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | | controller.service.internal.ports | object | `{}` | Custom port mapping for internal service | | controller.service.internal.targetPorts | object | `{}` | Custom target port mapping for internal service | | controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | | controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | | controller.service.labels | object | `{}` | | +| controller.service.loadBalancerClass | string | `""` | Used by cloud providers to select a load balancer implementation other than the cloud provider default. https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class | | controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | | controller.service.loadBalancerSourceRanges | list | `[]` | | | controller.service.nodePorts.http | string | `""` | | @@ -463,7 +469,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu | defaultBackend.minAvailable | int | `1` | | | defaultBackend.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | | defaultBackend.name | string | `"defaultbackend"` | | -| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ # | | defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # | | defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | | defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | @@ -477,7 +483,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu | defaultBackend.replicaCount | int | `1` | | | defaultBackend.resources | object | `{}` | | | defaultBackend.service.annotations | object | `{}` | | -| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips # | | defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | | defaultBackend.service.servicePort | int | `80` | | | defaultBackend.service.type | string | `"ClusterIP"` | | diff --git a/charts/ingress-nginx/README.md.gotmpl b/charts/ingress-nginx/README.md.gotmpl index 4a35a40b3..17b029bbf 100644 --- a/charts/ingress-nginx/README.md.gotmpl +++ b/charts/ingress-nginx/README.md.gotmpl @@ -140,8 +140,10 @@ controller: internal: enabled: true annotations: - # Create internal ELB - service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Create internal NLB + service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" + # Create internal ELB(Deprecated) + # service.beta.kubernetes.io/aws-load-balancer-internal: "true" # Any other annotation can be declared here. ``` @@ -184,6 +186,8 @@ controller: # Any other annotation can be declared here. ``` +The load balancer annotations of more cloud service providers can be found: [Internal load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer). + An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. diff --git a/charts/ingress-nginx/changelog/Changelog-4.7.1.md b/charts/ingress-nginx/changelog/Changelog-4.7.1.md new file mode 100644 index 000000000..4d69a7117 --- /dev/null +++ b/charts/ingress-nginx/changelog/Changelog-4.7.1.md @@ -0,0 +1,12 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.7.1 + +* Added a doc line to the missing helm value service.internal.loadBalancerIP (#9406) +* feat(helm): Add loadBalancerClass (#9562) +* added helmshowvalues example (#10019) +* Update Ingress-Nginx version controller-v1.8.1 + +**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.7.0...helm-chart-4.7.1 diff --git a/charts/ingress-nginx/templates/_helpers.tpl b/charts/ingress-nginx/templates/_helpers.tpl index 7db5b2ca8..548e8cf12 100644 --- a/charts/ingress-nginx/templates/_helpers.tpl +++ b/charts/ingress-nginx/templates/_helpers.tpl @@ -201,8 +201,12 @@ Extra modules. - name: {{ .name }} image: {{ .image }} + {{- if .distroless | default false }} + command: ['/init_module'] + {{- else }} command: ['sh', '-c', '/usr/local/bin/init_module.sh'] - {{- if (.containerSecurityContext) }} + {{- end }} + {{- if .containerSecurityContext }} securityContext: {{ .containerSecurityContext | toYaml | nindent 4 }} {{- end }} volumeMounts: diff --git a/charts/ingress-nginx/templates/controller-deployment.yaml b/charts/ingress-nginx/templates/controller-deployment.yaml index 323d87623..7fe8804ea 100644 --- a/charts/ingress-nginx/templates/controller-deployment.yaml +++ b/charts/ingress-nginx/templates/controller-deployment.yaml @@ -190,7 +190,7 @@ spec: {{- end }} {{- if .Values.controller.opentelemetry.enabled}} {{ $otelContainerSecurityContext := $.Values.controller.opentelemetry.containerSecurityContext | default $.Values.controller.containerSecurityContext }} - {{- include "extraModules" (dict "name" "opentelemetry" "image" .Values.controller.opentelemetry.image "containerSecurityContext" $otelContainerSecurityContext) | nindent 8}} + {{- include "extraModules" (dict "name" "opentelemetry" "image" .Values.controller.opentelemetry.image "containerSecurityContext" $otelContainerSecurityContext "distroless" false) | nindent 8}} {{- end}} {{- end }} {{- if .Values.controller.hostNetwork }} diff --git a/charts/ingress-nginx/templates/controller-keda.yaml b/charts/ingress-nginx/templates/controller-keda.yaml index 875157ea4..c0d95a98e 100644 --- a/charts/ingress-nginx/templates/controller-keda.yaml +++ b/charts/ingress-nginx/templates/controller-keda.yaml @@ -25,6 +25,11 @@ spec: cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} minReplicaCount: {{ .Values.controller.keda.minReplicas }} maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} +{{- with .Values.controller.keda.fallback }} + fallback: + failureThreshold: {{ .failureThreshold | default 3 }} + replicas: {{ .replicas | default $.Values.controller.keda.maxReplicas }} +{{- end }} triggers: {{- with .Values.controller.keda.triggers }} {{ toYaml . | indent 2 }} diff --git a/charts/ingress-nginx/templates/controller-service.yaml b/charts/ingress-nginx/templates/controller-service.yaml index 2b28196de..b2735d2e8 100644 --- a/charts/ingress-nginx/templates/controller-service.yaml +++ b/charts/ingress-nginx/templates/controller-service.yaml @@ -28,6 +28,9 @@ spec: {{- if .Values.controller.service.loadBalancerSourceRanges }} loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} {{- end }} +{{- if .Values.controller.service.loadBalancerClass }} + loadBalancerClass: {{ .Values.controller.service.loadBalancerClass }} +{{- end }} {{- if .Values.controller.service.externalTrafficPolicy }} externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} {{- end }} diff --git a/charts/ingress-nginx/values.yaml b/charts/ingress-nginx/values.yaml index 7ca41e79e..d091391a8 100644 --- a/charts/ingress-nginx/values.yaml +++ b/charts/ingress-nginx/values.yaml @@ -23,9 +23,9 @@ controller: ## for backwards compatibility consider setting the full image url via the repository value below ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail ## repository: - tag: "v1.8.0" - digest: sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 - digestChroot: sha256:a45e41cd2b7670adf829759878f512d4208d0aec1869dae593a0fecd09a5e49e + tag: "v1.8.1" + digest: sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd + digestChroot: sha256:e0d4121e3c5e39de9122e55e331a32d5ebf8d4d257227cb93ab54a1b912a7627 pullPolicy: IfNotPresent # www-data -> uid 101 runAsUser: 101 @@ -257,7 +257,7 @@ controller: ## terminationGracePeriodSeconds: 300 # -- Node labels for controller pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ ## nodeSelector: kubernetes.io/os: linux @@ -368,6 +368,9 @@ controller: maxReplicas: 11 pollingInterval: 30 cooldownPeriod: 300 + # fallback: + # failureThreshold: 3 + # replicas: 11 restoreToOriginalReplicaCount: false scaledObject: annotations: {} @@ -417,12 +420,14 @@ controller: # clusterIP: "" # -- List of IP addresses at which the controller services are available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips ## externalIPs: [] # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer loadBalancerIP: "" loadBalancerSourceRanges: [] + # -- Used by cloud providers to select a load balancer implementation other than the cloud provider default. https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" enableHttp: true enableHttps: true ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. @@ -473,8 +478,8 @@ controller: enabled: false # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. annotations: {} - # loadBalancerIP: "" - + # -- Used by cloud providers to connect the resulting internal LoadBalancer to a pre-existing static IP. Make sure to add to the service the needed annotation to specify the subnet which the static IP belongs to. For instance, `networking.gke.io/internal-load-balancer-subnet` for GCP and `service.beta.kubernetes.io/aws-load-balancer-subnets` for AWS. + loadBalancerIP: "" # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. loadBalancerSourceRanges: [] ## Set external traffic policy to: "Local" to preserve source IP on @@ -652,7 +657,7 @@ controller: # clusterIP: "" # -- List of IP addresses at which the stats-exporter service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips ## externalIPs: [] # loadBalancerIP: "" @@ -810,7 +815,7 @@ defaultBackend: # key: value # -- Node labels for default backend pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ ## nodeSelector: kubernetes.io/os: linux @@ -849,7 +854,7 @@ defaultBackend: # clusterIP: "" # -- List of IP addresses at which the default backend service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips ## externalIPs: [] # loadBalancerIP: "" diff --git a/cmd/dataplane/main.go b/cmd/dataplane/main.go index 6fd559e4d..a1c4cbcc6 100644 --- a/cmd/dataplane/main.go +++ b/cmd/dataplane/main.go @@ -18,13 +18,10 @@ package main import ( "fmt" - "math/rand" // #nosec - "net/http" - "os" - "time" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" + "net/http" + "os" "k8s.io/klog/v2" @@ -41,8 +38,6 @@ import ( func main() { klog.InitFlags(nil) - rand.Seed(time.Now().UnixNano()) - fmt.Println(version.String()) var err error showVersion, conf, err := ingressflags.ParseFlags() diff --git a/cmd/nginx/main.go b/cmd/nginx/main.go index 48dd933dc..508e940e1 100644 --- a/cmd/nginx/main.go +++ b/cmd/nginx/main.go @@ -19,7 +19,6 @@ package main import ( "context" "fmt" - "math/rand" // #nosec "net/http" "os" "path/filepath" @@ -54,8 +53,6 @@ import ( func main() { klog.InitFlags(nil) - rand.Seed(time.Now().UnixNano()) - fmt.Println(version.String()) showVersion, conf, err := ingressflags.ParseFlags() diff --git a/cmd/plugin/commands/certs/certs.go b/cmd/plugin/commands/certs/certs.go index 88b721ee3..1f08b5216 100644 --- a/cmd/plugin/commands/certs/certs.go +++ b/cmd/plugin/commands/certs/certs.go @@ -18,6 +18,7 @@ package certs import ( "fmt" + "os" "github.com/spf13/cobra" @@ -46,7 +47,10 @@ func CreateCommand(flags *genericclioptions.ConfigFlags) *cobra.Command { } cmd.Flags().String("host", "", "Get the cert for this hostname") - cobra.MarkFlagRequired(cmd.Flags(), "host") + if err := cobra.MarkFlagRequired(cmd.Flags(), "host"); err != nil { + util.PrintError(err) + os.Exit(1) + } pod = util.AddPodFlag(cmd) deployment = util.AddDeploymentFlag(cmd) selector = util.AddSelectorFlag(cmd) diff --git a/cmd/plugin/kubectl/kubectl.go b/cmd/plugin/kubectl/kubectl.go index 3f31a2104..1171e9218 100644 --- a/cmd/plugin/kubectl/kubectl.go +++ b/cmd/plugin/kubectl/kubectl.go @@ -77,7 +77,9 @@ func execToWriter(args []string, writer io.Writer) error { return err } - go io.Copy(writer, op) + go func() { + io.Copy(writer, op) //nolint:errcheck + }() err = cmd.Run() if err != nil { return err diff --git a/deploy/static/provider/aws/deploy.yaml b/deploy/static/provider/aws/deploy.yaml index 48f7f11b1..f22f3a9c1 100644 --- a/deploy/static/provider/aws/deploy.yaml +++ b/deploy/static/provider/aws/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -328,7 +328,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -344,7 +344,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -377,7 +377,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -400,7 +400,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -418,7 +418,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -442,7 +442,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -514,7 +514,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -525,7 +525,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -561,7 +561,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -572,7 +572,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -610,7 +610,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -623,7 +623,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml b/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml index b4e2cd5f6..e9ae85143 100644 --- a/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml +++ b/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -335,7 +335,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -353,7 +353,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -386,7 +386,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -409,7 +409,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -427,7 +427,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -451,7 +451,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -526,7 +526,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -537,7 +537,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -573,7 +573,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -584,7 +584,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -622,7 +622,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -635,7 +635,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/deploy/static/provider/baremetal/deploy.yaml b/deploy/static/provider/baremetal/deploy.yaml index 74292b82b..b66da7d45 100644 --- a/deploy/static/provider/baremetal/deploy.yaml +++ b/deploy/static/provider/baremetal/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -328,7 +328,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -340,7 +340,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -372,7 +372,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -395,7 +395,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -413,7 +413,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -436,7 +436,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -508,7 +508,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -519,7 +519,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -555,7 +555,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -566,7 +566,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -604,7 +604,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -617,7 +617,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/deploy/static/provider/cloud/deploy.yaml b/deploy/static/provider/cloud/deploy.yaml index 6dac63865..659da1d1b 100644 --- a/deploy/static/provider/cloud/deploy.yaml +++ b/deploy/static/provider/cloud/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -328,7 +328,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -340,7 +340,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -373,7 +373,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -396,7 +396,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -414,7 +414,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -438,7 +438,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -510,7 +510,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -521,7 +521,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -557,7 +557,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -568,7 +568,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -606,7 +606,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -619,7 +619,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/deploy/static/provider/do/deploy.yaml b/deploy/static/provider/do/deploy.yaml index 0e3f4b46f..434f21ead 100644 --- a/deploy/static/provider/do/deploy.yaml +++ b/deploy/static/provider/do/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -329,7 +329,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -343,7 +343,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -376,7 +376,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -399,7 +399,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -417,7 +417,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -441,7 +441,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -513,7 +513,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -524,7 +524,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -560,7 +560,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -571,7 +571,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -609,7 +609,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -622,7 +622,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/deploy/static/provider/exoscale/deploy.yaml b/deploy/static/provider/exoscale/deploy.yaml index a1bd4ed00..c9dff62cb 100644 --- a/deploy/static/provider/exoscale/deploy.yaml +++ b/deploy/static/provider/exoscale/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -328,7 +328,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -349,7 +349,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -382,7 +382,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -405,7 +405,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -423,7 +423,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -447,7 +447,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -519,7 +519,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -530,7 +530,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -566,7 +566,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -577,7 +577,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -615,7 +615,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -628,7 +628,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/deploy/static/provider/kind/deploy.yaml b/deploy/static/provider/kind/deploy.yaml index 41d17d8d1..f9965d0fe 100644 --- a/deploy/static/provider/kind/deploy.yaml +++ b/deploy/static/provider/kind/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -328,7 +328,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -340,7 +340,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -372,7 +372,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -395,7 +395,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -417,7 +417,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -442,7 +442,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -524,7 +524,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -535,7 +535,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -571,7 +571,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -582,7 +582,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -620,7 +620,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -633,7 +633,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/deploy/static/provider/oracle/deploy.yaml b/deploy/static/provider/oracle/deploy.yaml new file mode 100644 index 000000000..72556bb42 --- /dev/null +++ b/deploy/static/provider/oracle/deploy.yaml @@ -0,0 +1,649 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + name: ingress-nginx +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx + namespace: ingress-nginx +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resourceNames: + - ingress-nginx-leader + resources: + - leases + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission + namespace: ingress-nginx +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: +- kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: +- kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: +- kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: +- kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: v1 +data: + allow-snippet-annotations: "true" +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-controller + namespace: ingress-nginx +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/oci-load-balancer-shape: flexible + service.beta.kubernetes.io/oci-load-balancer-shape-flex-max: "100" + service.beta.kubernetes.io/oci-load-balancer-shape-flex-min: "10" + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + externalTrafficPolicy: Local + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - appProtocol: http + name: http + port: 80 + protocol: TCP + targetPort: http + - appProtocol: https + name: https + port: 443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + ports: + - appProtocol: https + name: https-webhook + port: 443 + targetPort: webhook + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + minReadySeconds: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + spec: + containers: + - args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-nginx-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + - containerPort: 8443 + name: webhook + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 90Mi + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission-create + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission-create + spec: + containers: + - args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b + imagePullPolicy: IfNotPresent + name: create + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission-patch + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission-patch + spec: + containers: + - args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b + imagePullPolicy: IfNotPresent + name: patch + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.8.1 + name: ingress-nginx-admission +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: ingress-nginx + path: /networking/v1/ingresses + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None diff --git a/deploy/static/provider/oracle/kustomization.yaml b/deploy/static/provider/oracle/kustomization.yaml new file mode 100644 index 000000000..5c1dcff96 --- /dev/null +++ b/deploy/static/provider/oracle/kustomization.yaml @@ -0,0 +1,11 @@ +# NOTE: kustomize is not supported. This file exists only to be able to reference it from bases. +# https://kubectl.docs.kubernetes.io/references/kustomize/bases/ +# +# ``` +# namespace: ingress-nginx +# bases: +# - github.com/kubernetes/ingress-nginx/tree/main/deploy/static/provider/oracle +# ``` + +resources: + - deploy.yaml diff --git a/deploy/static/provider/scw/deploy.yaml b/deploy/static/provider/scw/deploy.yaml index e303a52f5..8b4750992 100644 --- a/deploy/static/provider/scw/deploy.yaml +++ b/deploy/static/provider/scw/deploy.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx --- @@ -27,7 +27,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx --- @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx rules: @@ -129,7 +129,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx rules: @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx rules: - apiGroups: @@ -230,7 +230,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission rules: - apiGroups: @@ -249,7 +249,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx namespace: ingress-nginx roleRef: @@ -269,7 +269,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission namespace: ingress-nginx roleRef: @@ -288,7 +288,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io @@ -307,7 +307,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission roleRef: apiGroup: rbac.authorization.k8s.io @@ -329,7 +329,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx --- @@ -343,7 +343,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -376,7 +376,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller-admission namespace: ingress-nginx spec: @@ -399,7 +399,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-controller namespace: ingress-nginx spec: @@ -417,7 +417,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 spec: containers: - args: @@ -441,7 +441,7 @@ spec: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so - image: registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3 + image: registry.k8s.io/ingress-nginx/controller:v1.8.1@sha256:e5c4824e7375fcf2a393e1c03c293b69759af37a9ca6abdb91b13d78a93da8bd imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -513,7 +513,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create namespace: ingress-nginx spec: @@ -524,7 +524,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-create spec: containers: @@ -560,7 +560,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch namespace: ingress-nginx spec: @@ -571,7 +571,7 @@ spec: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission-patch spec: containers: @@ -609,7 +609,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: nginx spec: controller: k8s.io/ingress-nginx @@ -622,7 +622,7 @@ metadata: app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - app.kubernetes.io/version: 1.8.0 + app.kubernetes.io/version: 1.8.1 name: ingress-nginx-admission webhooks: - admissionReviewVersions: diff --git a/docs/deploy/index.md b/docs/deploy/index.md index 76a28b6ad..d719d4a57 100644 --- a/docs/deploy/index.md +++ b/docs/deploy/index.md @@ -68,7 +68,7 @@ helm show values ingress-nginx --repo https://kubernetes.github.io/ingress-nginx **If you don't have Helm** or if you prefer to use a YAML manifest, you can run the following command instead: ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/cloud/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/cloud/deploy.yaml ``` !!! info @@ -243,7 +243,7 @@ In AWS, we use a Network load balancer (NLB) to expose the Ingress-Nginx Control ##### Network Load Balancer (NLB) ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/aws/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/aws/deploy.yaml ``` ##### TLS termination in AWS Load Balancer (NLB) @@ -251,10 +251,10 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/cont By default, TLS is terminated in the ingress controller. But it is also possible to terminate TLS in the Load Balancer. This section explains how to do that on AWS using an NLB. -1. Download the [deploy.yaml](https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml) template +1. Download the [deploy.yaml](https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml) template ```console - wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml + wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/aws/nlb-with-tls-termination/deploy.yaml ``` 2. Edit the file and change the VPC CIDR in use for the Kubernetes cluster: @@ -300,7 +300,7 @@ Then, the ingress controller can be installed like this: ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/cloud/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/cloud/deploy.yaml ``` !!! warning @@ -317,7 +317,7 @@ Proxy-protocol is supported in GCE check the [Official Documentations on how to #### Azure ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/cloud/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/cloud/deploy.yaml ``` More information with regard to Azure annotations for ingress controller can be found in the [official AKS documentation](https://docs.microsoft.com/en-us/azure/aks/ingress-internal-ip#create-an-ingress-controller). @@ -325,7 +325,7 @@ More information with regard to Azure annotations for ingress controller can be #### Digital Ocean ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/do/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/do/deploy.yaml ``` - By default the service object of the ingress-nginx-controller for Digital-Ocean, only configures one annotation. Its this one `service.beta.kubernetes.io/do-loadbalancer-enable-proxy-protocol: "true"`. While this makes the service functional, it was reported that the Digital-Ocean LoadBalancer graphs shows `no data`, unless a few other annotations are also configured. Some of these other annotations require values that can not be generic and hence not forced in a out-of-the-box installation. These annotations and a discussion on them is well documented in [this issue](https://github.com/kubernetes/ingress-nginx/issues/8965). Please refer to the issue to add annotations, with values specific to user, to get graphs of the DO-LB populated with data. @@ -333,7 +333,7 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/cont #### Scaleway ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/scw/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/scw/deploy.yaml ``` #### Exoscale @@ -348,7 +348,7 @@ The full list of annotations supported by Exoscale is available in the Exoscale #### Oracle Cloud Infrastructure ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/cloud/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/cloud/deploy.yaml ``` A @@ -375,7 +375,7 @@ For quick testing, you can use a This should work on almost every cluster, but it will typically use a port in the range 30000-32767. ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/baremetal/deploy.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/baremetal/deploy.yaml ``` For more information about bare metal deployments (and how to use port 80 instead of a random port in the 30000-32767 range), diff --git a/docs/deploy/rbac.md b/docs/deploy/rbac.md index 8c36d19a7..70af8ba92 100644 --- a/docs/deploy/rbac.md +++ b/docs/deploy/rbac.md @@ -29,39 +29,38 @@ namespace specific permissions defined by the `Role` named `ingress-nginx`. These permissions are granted in order for the ingress-nginx-controller to be able to function as an ingress across the cluster. These permissions are -granted to the ClusterRole named `ingress-nginx` +granted to the `ClusterRole` named `ingress-nginx` * `configmaps`, `endpoints`, `nodes`, `pods`, `secrets`: list, watch * `nodes`: get -* `services`, `ingresses`: get, list, watch +* `services`, `ingresses`, `ingressclasses`, `endpointslices`: get, list, watch * `events`: create, patch * `ingresses/status`: update +* `leases`: list, watch ### Namespace Permissions These permissions are granted specific to the ingress-nginx namespace. These -permissions are granted to the Role named `ingress-nginx` +permissions are granted to the `Role` named `ingress-nginx` * `configmaps`, `pods`, `secrets`: get * `endpoints`: get Furthermore to support leader-election, the ingress-nginx-controller needs to -have access to a `configmap` using the resourceName `ingress-controller-leader-nginx` +have access to a `leases` using the resourceName `ingress-nginx-leader` > Note that resourceNames can NOT be used to limit requests using the “create” > verb because authorizers only have access to information that can be obtained > from the request URL, method, and headers (resource names in a “create” request > are part of the request body). -* `configmaps`: get, update (for resourceName `ingress-controller-leader-nginx`) -* `configmaps`: create +* `leases`: get, update (for resourceName `ingress-controller-leader`) +* `leases`: create -This resourceName is the concatenation of the `election-id` and the -`ingress-class` as defined by the ingress-controller, which defaults to: +This resourceName is the `election-id` defined by the ingress-controller, which defaults to: * `election-id`: `ingress-controller-leader` -* `ingress-class`: `nginx` -* `resourceName` : `-` +* `resourceName` : `` Please adapt accordingly if you overwrite either parameter when launching the ingress-nginx-controller. diff --git a/docs/e2e-tests.md b/docs/e2e-tests.md index 025ff686d..c45b1e72c 100644 --- a/docs/e2e-tests.md +++ b/docs/e2e-tests.md @@ -55,42 +55,6 @@ Do not try to edit it manually. - [should redirect to /foo](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/approot.go#L35) -### [auth-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L40) - -- [should return status code 200 when no authentication is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L47) -- [should return status code 503 when authentication is configured with an invalid secret](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L66) -- [should return status code 401 when authentication is configured but Authorization header is not configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L90) -- [should return status code 401 when authentication is configured and Authorization header is sent with invalid credentials](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L117) -- [should return status code 401 and cors headers when authentication and cors is configured but Authorization header is not configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L145) -- [should return status code 200 when authentication is configured and Authorization header is sent](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L173) -- [should return status code 200 when authentication is configured with a map and Authorization header is sent](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L200) -- [should return status code 401 when authentication is configured with invalid content and Authorization header is sent](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L228) -- [ when external auth is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L267) -- [ when external auth is not configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L285) -- [ when auth-headers are set](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L302) -- [should set cache_key when external auth cache is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L323) -- [user retains cookie by default](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L412) -- [user does not retain cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L423) -- [user with annotated ingress retains cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L434) -- [should return status code 200 when signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L486) -- [should redirect to signin url when not signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L495) -- [keeps processing new ingresses even if one of the existing ingresses is misconfigured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L506) -- [should overwrite Foo header with auth response](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L530) -- [should not create additional upstream block when auth-keepalive is not set](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L553) -- [should not create additional upstream block when host part of auth-url contains a variable](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L571) -- [should not create additional upstream block when auth-keepalive is negative](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L591) -- [should not create additional upstream block when auth-keepalive is set with HTTP/2](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L610) -- [should create additional upstream block when auth-keepalive is set with HTTP/1.x](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L624) -- [should return status code 200 when signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L679) -- [should redirect to signin url when not signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L688) -- [keeps processing new ingresses even if one of the existing ingresses is misconfigured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L699) -- [should return status code 200 when signed in after auth backend is deleted ](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L773) -- [should deny login for different location on same server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L793) -- [should deny login for different servers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L821) -- [should redirect to signin url when not signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L850) -- [should return 503 (location was denied)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L880) -- [should add error to the config](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L888) - ### [auth-tls-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/authtls.go#L29) - [should set sslClientCertificate, sslVerifyClient and sslVerifyDepth with auth-tls-secret](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/authtls.go#L36) @@ -109,35 +73,6 @@ Do not try to edit it manually. - [should set backend protocol to grpc:// and use grpc_pass](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/backendprotocol.go#L64) - [should set backend protocol to grpcs:// and use grpc_pass](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/backendprotocol.go#L79) - [should set backend protocol to '' and use fastcgi_pass](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/backendprotocol.go#L94) -- [should set backend protocol to '' and use ajp_pass](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/backendprotocol.go#L109) - -### [canary-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L36) - -- [should response with a 200 status from the mainline upstream when requests are made to the mainline ingress](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L48) -- [should return 404 status for requests to the canary if no matching ingress is found](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L80) -- [should return the correct status codes when endpoints are unavailable](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L107) -- [should route requests to the correct upstream if mainline ingress is created before the canary ingress](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L161) -- [should route requests to the correct upstream if mainline ingress is created after the canary ingress](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L206) -- [should route requests to the correct upstream if the mainline ingress is modified](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L250) -- [should route requests to the correct upstream if the canary ingress is modified](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L307) -- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L372) -- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L426) -- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L490) -- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L532) -- [should routes to mainline upstream when the given Regex causes error](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L566) -- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L604) -- [respects always and never values](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L643) -- [should route requests only to mainline if canary weight is 0](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L705) -- [should route requests only to canary if canary weight is 100](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L743) -- [should route requests only to canary if canary weight is equal to canary weight total](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L775) -- [should route requests split between mainline and canary if canary weight is 50](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L808) -- [should route requests split between mainline and canary if canary weight is 100 and weight total is 200](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L834) -- [should not use canary as a catch-all server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L863) -- [should not use canary with domain as a server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L891) -- [does not crash when canary ingress has multiple paths to the same non-matching backend](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L915) -- [always routes traffic to canary if first request was affinitized to canary (default behavior)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L943) -- [always routes traffic to canary if first request was affinitized to canary (explicit sticky behavior)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1000) -- [routes traffic to either mainline or canary backend (legacy behavior)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1058) ### [client-body-buffer-size](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/clientbodybuffersize.go#L28) @@ -148,10 +83,6 @@ Do not try to edit it manually. - [should set client_body_buffer_size to 1M](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/clientbodybuffersize.go#L123) - [should not set client_body_buffer_size to invalid 1b](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/clientbodybuffersize.go#L145) -### [connection-proxy-header](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/connection.go#L29) - -- [set connection header to keep-alive](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/connection.go#L36) - ### [cors-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/cors.go#L28) - [should enable cors](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/cors.go#L35) @@ -212,13 +143,6 @@ Do not try to edit it manually. - [generates correct configuration](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/globalratelimit.go#L38) -### [backend-protocol - GRPC](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L40) - -- [should use grpc_pass in the configuration file](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L43) -- [should return OK for service with backend protocol GRPC](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L68) -- [authorization metadata should be overwritten by external auth response headers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L126) -- [should return OK for service with backend protocol GRPCS](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L199) - ### [http2-push-preload](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/http2pushpreload.go#L27) - [enable the http2-push-preload directive](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/http2pushpreload.go#L34) @@ -245,12 +169,6 @@ Do not try to edit it manually. - [set access_log off](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/log.go#L34) - [set rewrite_log on](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/log.go#L49) -### [mirror-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L28) - -- [should set mirror-target to http://localhost/mirror](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L36) -- [should set mirror-target to https://test.env.com/$request_uri](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L51) -- [should disable mirror-request-body](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L67) - ### [modsecurity owasp](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/modsecurity/modsecurity.go#L28) - [should enable modsecurity](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/modsecurity/modsecurity.go#L35) @@ -306,11 +224,6 @@ Do not try to edit it manually. - [should fail to use longest match for documented warning](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/rewrite.go#L158) - [should allow for custom rewrite parameters](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/rewrite.go#L190) -### [satisfy](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/satisfy.go#L35) - -- [should configure satisfy directive correctly](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/satisfy.go#L42) -- [should allow multiple auth with satisfy any](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/satisfy.go#L84) - ### [server-snippet](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/serversnippet.go#L28) - [add valid directives to server via server snippet](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/serversnippet.go#L35) @@ -322,11 +235,6 @@ Do not try to edit it manually. - [should use the Service Cluster IP and Port ](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/serviceupstream.go#L70) - [should not use the Service Cluster IP and Port](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/serviceupstream.go#L99) -### [configuration-snippet](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/snippet.go#L28) - -- [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/snippet.go#L35) -- [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/snippet.go#L58) - ### [ssl-ciphers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/sslciphers.go#L28) - [should change ssl ciphers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/sslciphers.go#L35) @@ -350,6 +258,97 @@ Do not try to edit it manually. - [should set the X-Forwarded-Prefix to the annotation value](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/xforwardedprefix.go#L35) - [should not add X-Forwarded-Prefix if the annotation value is empty](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/xforwardedprefix.go#L57) +### [auth-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L39) + +- [should return status code 200 when no authentication is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L46) +- [should return status code 503 when authentication is configured with an invalid secret](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L65) +- [should return status code 401 when authentication is configured but Authorization header is not configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L89) +- [should return status code 401 when authentication is configured and Authorization header is sent with invalid credentials](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L116) +- [should return status code 401 and cors headers when authentication and cors is configured but Authorization header is not configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L144) +- [should return status code 200 when authentication is configured and Authorization header is sent](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L172) +- [should return status code 200 when authentication is configured with a map and Authorization header is sent](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L199) +- [should return status code 401 when authentication is configured with invalid content and Authorization header is sent](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L227) +- [ when external auth is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L266) +- [ when external auth is not configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L284) +- [ when auth-headers are set](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L301) +- [should set cache_key when external auth cache is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L322) +- [user retains cookie by default](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L411) +- [user does not retain cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L422) +- [user with annotated ingress retains cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L433) +- [should return status code 200 when signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L472) +- [should redirect to signin url when not signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L481) +- [keeps processing new ingresses even if one of the existing ingresses is misconfigured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L492) +- [should overwrite Foo header with auth response](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L516) +- [should not create additional upstream block when auth-keepalive is not set](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L539) +- [should not create additional upstream block when host part of auth-url contains a variable](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L557) +- [should not create additional upstream block when auth-keepalive is negative](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L577) +- [should not create additional upstream block when auth-keepalive is set with HTTP/2](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L596) +- [should create additional upstream block when auth-keepalive is set with HTTP/1.x](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L610) +- [should return status code 200 when signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L653) +- [should redirect to signin url when not signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L662) +- [keeps processing new ingresses even if one of the existing ingresses is misconfigured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L673) +- [should return status code 200 when signed in after auth backend is deleted ](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L732) +- [should deny login for different location on same server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L752) +- [should deny login for different servers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L780) +- [should redirect to signin url when not signed in](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L809) +- [should return 503 (location was denied)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L839) +- [should add error to the config](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/auth.go#L847) + +### [canary-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L36) + +- [should response with a 200 status from the mainline upstream when requests are made to the mainline ingress](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L45) +- [should return 404 status for requests to the canary if no matching ingress is found](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L89) +- [should return the correct status codes when endpoints are unavailable](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L120) +- [should route requests to the correct upstream if mainline ingress is created before the canary ingress](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L174) +- [should route requests to the correct upstream if mainline ingress is created after the canary ingress](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L232) +- [should route requests to the correct upstream if the mainline ingress is modified](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L289) +- [should route requests to the correct upstream if the canary ingress is modified](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L363) +- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L445) +- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L513) +- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L594) +- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L647) +- [should routes to mainline upstream when the given Regex causes error](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L692) +- [should route requests to the correct upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L741) +- [respects always and never values](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L790) +- [should route requests only to mainline if canary weight is 0](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L862) +- [should route requests only to canary if canary weight is 100](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L910) +- [should route requests only to canary if canary weight is equal to canary weight total](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L952) +- [should route requests split between mainline and canary if canary weight is 50](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L995) +- [should route requests split between mainline and canary if canary weight is 100 and weight total is 200](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1031) +- [should not use canary as a catch-all server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1070) +- [should not use canary with domain as a server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1104) +- [does not crash when canary ingress has multiple paths to the same non-matching backend](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1138) +- [always routes traffic to canary if first request was affinitized to canary (default behavior)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1175) +- [always routes traffic to canary if first request was affinitized to canary (explicit sticky behavior)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1242) +- [routes traffic to either mainline or canary backend (legacy behavior)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/canary.go#L1310) + +### [connection-proxy-header](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/connection.go#L28) + +- [set connection header to keep-alive](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/connection.go#L35) + +### [mirror-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L28) + +- [should set mirror-target to http://localhost/mirror](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L36) +- [should set mirror-target to https://test.env.com/$request_uri](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L51) +- [should disable mirror-request-body](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/mirror.go#L67) + +### [satisfy](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/satisfy.go#L33) + +- [should configure satisfy directive correctly](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/satisfy.go#L40) +- [should allow multiple auth with satisfy any](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/satisfy.go#L82) + +### [configuration-snippet](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/snippet.go#L28) + +- [set snippet more_set_headers in all locations](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/snippet.go#L34) +- [drops snippet more_set_header in all locations if disabled by admin](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/snippet.go#L63) + +### [backend-protocol - GRPC](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L39) + +- [should use grpc_pass in the configuration file](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L42) +- [should return OK for service with backend protocol GRPC](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L67) +- [authorization metadata should be overwritten by external auth response headers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L126) +- [should return OK for service with backend protocol GRPCS](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/annotations/grpc.go#L186) + ### [Debug CLI](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/dbg/main.go#L29) - [should list the backend servers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/dbg/main.go#L37) @@ -390,10 +389,6 @@ Do not try to edit it manually. - [should return 200 when service has topology hints](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/endpointslices/topology.go#L43) -### [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/deployment.go#L) - -- [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/deployment.go#L) - ### [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/exec.go#L) - [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/exec.go#L) @@ -402,10 +397,6 @@ Do not try to edit it manually. - [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/fastcgi_helloserver.go#L) -### [[Setting] ](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/framework.go#L194) - -- [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/framework.go#L) - ### [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/grpc_fortune_teller.go#L) - [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/grpc_fortune_teller.go#L) @@ -478,6 +469,14 @@ Do not try to edit it manually. - [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/util.go#L) +### [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/deployment.go#L) + +- [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/deployment.go#L) + +### [[Setting] ](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/framework.go#L217) + +- [](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/framework/framework.go#L) + ### [[Shutdown] Grace period shutdown](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/gracefulshutdown/grace_period.go#L32) - [/healthz should return status code 500 during shutdown grace period](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/gracefulshutdown/grace_period.go#L35) @@ -571,21 +570,21 @@ Do not try to edit it manually. - [should return 503 when backend service does not exist](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_backend.go#L36) - [should return 503 when all backend service endpoints are unavailable](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_backend.go#L54) -### [[Service] Type ExternalName](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L37) - -- [works with external name set to incomplete fqdn](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L40) -- [should return 200 for service type=ExternalName without a port defined](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L72) -- [should return 200 for service type=ExternalName with a port defined](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L110) -- [should return status 502 for service type=ExternalName with an invalid host](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L138) -- [should return 200 for service type=ExternalName using a port name](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L169) -- [should return 200 for service type=ExternalName using FQDN with trailing dot](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L206) -- [should update the external name after a service update](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L241) -- [should sync ingress on external name service addition/deletion](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L311) - ### [[Service] Nil Service Backend](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_nil_backend.go#L31) - [should return 404 when backend service is nil](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_nil_backend.go#L38) +### [[Service] Type ExternalName](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L37) + +- [works with external name set to incomplete fqdn](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L40) +- [should return 200 for service type=ExternalName without a port defined](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L77) +- [should return 200 for service type=ExternalName with a port defined](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L117) +- [should return status 502 for service type=ExternalName with an invalid host](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L147) +- [should return 200 for service type=ExternalName using a port name](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L183) +- [should return 200 for service type=ExternalName using FQDN with trailing dot](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L224) +- [should update the external name after a service update](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L260) +- [should sync ingress on external name service addition/deletion](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/servicebackend/service_externalname.go#L347) + ### [access-log](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/access_log.go#L27) - [use the default configuration](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/access_log.go#L32) @@ -601,10 +600,6 @@ Do not try to edit it manually. - [[BAD_ANNOTATIONS] should allow an ingress if there is a default blocklist config in place](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/badannotationvalues.go#L102) - [[BAD_ANNOTATIONS] should drop an ingress if there is a custom blocklist config in place and allow others to pass](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/badannotationvalues.go#L133) -### [brotli](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/brotli.go#L30) - -- [ condition](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/brotli.go#L39) - ### [Configmap change](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/configmap_change.go#L29) - [should reload after an update in the configuration](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/configmap_change.go#L36) @@ -626,26 +621,12 @@ Do not try to edit it manually. - [should delete Ingress updated to catch-all](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_catch_all.go#L81) - [should allow Ingress with rules](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_catch_all.go#L123) -### [[Flag] disable-service-external-name](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_service_external_name.go#L35) - -- [should ignore services of external-name type](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_service_external_name.go#L52) - ### [[Flag] disable-sync-events](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_sync_events.go#L32) - [should create sync events (default)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_sync_events.go#L35) - [should create sync events](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_sync_events.go#L53) - [should not create sync events](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_sync_events.go#L80) -### [enable-real-ip](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/enable_real_ip.go#L30) - -- [trusts X-Forwarded-For header only when setting is true](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/enable_real_ip.go#L40) -- [should not trust X-Forwarded-For header when setting is false](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/enable_real_ip.go#L78) - -### [use-forwarded-headers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/forwarded_headers.go#L30) - -- [should trust X-Forwarded headers when setting is true](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/forwarded_headers.go#L40) -- [should not trust X-Forwarded headers when setting is false](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/forwarded_headers.go#L92) - ### [Geoip2](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/geoip2.go#L37) - [should include geoip2 line in config when enabled and db file exists](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/geoip2.go#L46) @@ -657,21 +638,6 @@ Do not try to edit it manually. - [should block User-Agents defined in the ConfigMap](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_access_block.go#L55) - [should block Referers defined in the ConfigMap](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_access_block.go#L88) -### [[Security] global-auth-url](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L34) - -- [should return status code 401 when request any protected service](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L85) -- [should return status code 200 when request whitelisted (via no-auth-locations) service and 401 when request protected service](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L102) -- [should return status code 200 when request whitelisted (via ingress annotation) service and 401 when request protected service](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L126) -- [should still return status code 200 after auth backend is deleted using cache](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L155) -- [should proxy_method method when global-auth-method is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L197) -- [should add custom error page when global-auth-signin url is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L210) -- [should add auth headers when global-auth-response-headers is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L223) -- [should set request-redirect when global-auth-request-redirect is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L237) -- [should set snippet when global external auth is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L250) -- [user retains cookie by default](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L326) -- [user does not retain cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L337) -- [user with global-auth-always-set-cookie key in configmap retains cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L348) - ### [global-options](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_options.go#L28) - [should have worker_rlimit_nofile option](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_options.go#L31) @@ -715,25 +681,10 @@ Do not try to edit it manually. - [should watch Ingress with correct annotation](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ingress_class.go#L631) - [should ignore Ingress with only IngressClassName](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ingress_class.go#L652) -### [keep-alive keep-alive-requests](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L28) - -- [should set keepalive_timeout](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L40) -- [should set keepalive_requests](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L48) -- [should set keepalive connection to upstream server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L59) -- [should set keep alive connection timeout to upstream server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L68) -- [should set keepalive time to upstream server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L77) -- [should set the request count to upstream server through one keep alive connection](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L86) - ### [Configmap - limit-rate](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/limit_rate.go#L28) - [Check limit-rate config](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/limit_rate.go#L36) -### [[Flag] custom HTTP and HTTPS ports](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L32) - -- [should set X-Forwarded-Port headers accordingly when listening on a non-default HTTP port](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L48) -- [should set X-Forwarded-Port header to 443](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L70) -- [should set the X-Forwarded-Port header to 443](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L100) - ### [log-format-*](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/log-format.go#L28) - [should not configure log-format escape by default](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/log-format.go#L40) @@ -773,10 +724,6 @@ Do not try to edit it manually. - [should return status code 200 when accessing '/' authentication](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/no_auth_locations.go#L68) - [should return status code 200 when accessing '/noauth' unauthenticated](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/no_auth_locations.go#L82) -### [Add no tls redirect locations](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/no_tls_redirect_locations.go#L28) - -- [Check no tls redirect locations config](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/no_tls_redirect_locations.go#L31) - ### [OCSP](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ocsp/ocsp.go#L42) - [should enable OCSP and contain stapling information in the connection](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ocsp/ocsp.go#L49) @@ -822,22 +769,10 @@ Do not try to edit it manually. - [should set valid proxy timeouts using configmap values](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_connect_timeout.go#L36) - [should not set invalid proxy timeouts using configmap values](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_connect_timeout.go#L52) -### [Dynamic $proxy_host](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_host.go#L28) - -- [should exist a proxy_host](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_host.go#L36) -- [should exist a proxy_host using the upstream-vhost annotation value](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_host.go#L57) - ### [proxy-next-upstream](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_next_upstream.go#L28) - [should build proxy next upstream using configmap values](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_next_upstream.go#L36) -### [use-proxy-protocol](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L36) - -- [should respect port passed by the PROXY Protocol](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L46) -- [should respect proto passed by the PROXY Protocol server port](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L79) -- [should enable PROXY Protocol for HTTPS](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L112) -- [should enable PROXY Protocol for TCP](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L155) - ### [proxy-read-timeout](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_read_timeout.go#L28) - [should set valid proxy read timeouts using configmap values](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_read_timeout.go#L36) @@ -868,15 +803,79 @@ Do not try to edit it manually. - [Add ssl ciphers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ssl_ciphers.go#L31) +### [configmap stream-snippet](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/stream_snippet.go#L35) + +- [should add value of stream-snippet via config map to nginx config](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/stream_snippet.go#L42) + +### [brotli](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/brotli.go#L30) + +- [ condition](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/brotli.go#L38) + +### [[Flag] disable-service-external-name](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_service_external_name.go#L35) + +- [should ignore services of external-name type](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/disable_service_external_name.go#L55) + +### [enable-real-ip](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/enable_real_ip.go#L30) + +- [trusts X-Forwarded-For header only when setting is true](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/enable_real_ip.go#L40) +- [should not trust X-Forwarded-For header when setting is false](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/enable_real_ip.go#L78) + +### [use-forwarded-headers](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/forwarded_headers.go#L29) + +- [should trust X-Forwarded headers when setting is true](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/forwarded_headers.go#L39) +- [should not trust X-Forwarded headers when setting is false](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/forwarded_headers.go#L91) + +### [[Security] global-auth-url](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L34) + +- [should return status code 401 when request any protected service](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L87) +- [should return status code 200 when request whitelisted (via no-auth-locations) service and 401 when request protected service](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L104) +- [should return status code 200 when request whitelisted (via ingress annotation) service and 401 when request protected service](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L128) +- [should still return status code 200 after auth backend is deleted using cache](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L157) +- [should proxy_method method when global-auth-method is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L199) +- [should add custom error page when global-auth-signin url is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L212) +- [should add auth headers when global-auth-response-headers is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L225) +- [should set request-redirect when global-auth-request-redirect is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L239) +- [should set snippet when global external auth is configured](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L252) +- [user retains cookie by default](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L328) +- [user does not retain cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L339) +- [user with global-auth-always-set-cookie key in configmap retains cookie if upstream returns error status code](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/global_external_auth.go#L350) + +### [keep-alive keep-alive-requests](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L27) + +- [should set keepalive_timeout](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L39) +- [should set keepalive_requests](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L47) +- [should set keepalive connection to upstream server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L58) +- [should set keep alive connection timeout to upstream server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L67) +- [should set keepalive time to upstream server](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L76) +- [should set the request count to upstream server through one keep alive connection](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/keep-alive.go#L85) + +### [[Flag] custom HTTP and HTTPS ports](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L30) + +- [should set X-Forwarded-Port headers accordingly when listening on a non-default HTTP port](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L46) +- [should set X-Forwarded-Port header to 443](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L68) +- [should set the X-Forwarded-Port header to 443](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/listen_nondefault_ports.go#L98) + +### [Add no tls redirect locations](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/no_tls_redirect_locations.go#L27) + +- [Check no tls redirect locations config](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/no_tls_redirect_locations.go#L30) + +### [Dynamic $proxy_host](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_host.go#L28) + +- [should exist a proxy_host](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_host.go#L36) +- [should exist a proxy_host using the upstream-vhost annotation value](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_host.go#L57) + +### [use-proxy-protocol](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L36) + +- [should respect port passed by the PROXY Protocol](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L46) +- [should respect proto passed by the PROXY Protocol server port](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L79) +- [should enable PROXY Protocol for HTTPS](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L112) +- [should enable PROXY Protocol for TCP](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/proxy_protocol.go#L155) + ### [With enable-ssl-passthrough enabled](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ssl_passthrough.go#L36) - [should enable ssl-passthrough-proxy-port on a different port](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ssl_passthrough.go#L56) - [should pass unknown traffic to default backend and handle known traffic](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/ssl_passthrough.go#L79) -### [configmap stream-snippet](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/stream_snippet.go#L35) - -- [should add value of stream-snippet via config map to nginx config](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/stream_snippet.go#L42) - ### [[SSL] TLS protocols, ciphers and headers)](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/tls.go#L31) - [setting cipher suite](https://github.com/kubernetes/ingress-nginx/tree/main/test/e2e/settings/tls.go#L65) diff --git a/docs/examples/canary/README.md b/docs/examples/canary/README.md new file mode 100644 index 000000000..4124faf6f --- /dev/null +++ b/docs/examples/canary/README.md @@ -0,0 +1,231 @@ +# Canary + +Ingress Nginx Has the ability to handle canary routing by setting specific +annotations, the following is an example of how to configure a canary +deployment with weighted canary routing. + +## Create your main deployment and service + +This is the main deployment of your application with the service that will be +used to route to it + +```bash +echo " +--- +# Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: production + labels: + app: production +spec: + replicas: 1 + selector: + matchLabels: + app: production + template: + metadata: + labels: + app: production + spec: + containers: + - name: production + image: registry.k8s.io/ingress-nginx/e2e-test-echo@sha256:6fc5aa2994c86575975bb20a5203651207029a0d28e3f491d8a127d08baadab4 + ports: + - containerPort: 80 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP +--- +# Service +apiVersion: v1 +kind: Service +metadata: + name: production + labels: + app: production +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: production +" | kubectl apply -f - +``` + +## Create the canary deployment and service + +This is the canary deployment that will take a weighted amount of requests +instead of the main deployment + +```bash +echo " +--- +# Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: canary + labels: + app: canary +spec: + replicas: 1 + selector: + matchLabels: + app: canary + template: + metadata: + labels: + app: canary + spec: + containers: + - name: canary + image: registry.k8s.io/ingress-nginx/e2e-test-echo@sha256:6fc5aa2994c86575975bb20a5203651207029a0d28e3f491d8a127d08baadab4 + ports: + - containerPort: 80 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP +--- +# Service +apiVersion: v1 +kind: Service +metadata: + name: canary + labels: + app: canary +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: canary +" | kubectl apply -f - +``` + +## Create Ingress Pointing To Your Main Deployment + +Next you will need to expose your main deployment with an ingress resource, +note there are no canary specific annotations on this ingress + +```bash +echo " +--- +# Ingress +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: production + annotations: +spec: + ingressClassName: nginx + rules: + - host: echo.prod.mydomain.com + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: production + port: + number: 80 +" | kubectl apply -f - +``` + +## Create Ingress Pointing To Your Canary Deployment + +You will then create an Ingress that has the canary specific configuration, +please pay special notice of the following: + +- The host name is identical to the main ingress host name +- The `nginx.ingress.kubernetes.io/canary: "true"` annotation is required and + defines this as a canary annotation (if you do not have this the Ingresses + will clash) +- The `nginx.ingress.kubernetes.io/canary-weight: "50"` annotation dictates the + weight of the routing, in this case there is a "50%" chance a request will + hit the canary deployment over the main deployment +```bash +echo " +--- +# Ingress +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: canary + annotations: + nginx.ingress.kubernetes.io/canary: \"true\" + nginx.ingress.kubernetes.io/canary-weight: \"50\" +spec: + ingressClassName: nginx + rules: + - host: echo.prod.mydomain.com + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: canary + port: + number: 80 +" | kubectl apply -f - +``` + +## Testing your setup + +You can use the following command to test your setup (replacing +INGRESS_CONTROLLER_IP with your ingresse controllers IP Address) + +```bash +for i in $(seq 1 10); do curl -s --resolve echo.prod.mydomain.com:80:$INGRESS_CONTROLLER_IP echo.prod.mydomain.com | grep "Hostname"; done +``` + +You will get the following output showing that your canary setup is working as +expected: + +```bash +Hostname: production-5c5f65d859-phqzc +Hostname: canary-6697778457-zkfjf +Hostname: canary-6697778457-zkfjf +Hostname: production-5c5f65d859-phqzc +Hostname: canary-6697778457-zkfjf +Hostname: production-5c5f65d859-phqzc +Hostname: production-5c5f65d859-phqzc +Hostname: production-5c5f65d859-phqzc +Hostname: canary-6697778457-zkfjf +Hostname: production-5c5f65d859-phqzc +``` diff --git a/docs/examples/grpc/README.md b/docs/examples/grpc/README.md index cf4597fcd..508b23fb8 100644 --- a/docs/examples/grpc/README.md +++ b/docs/examples/grpc/README.md @@ -20,7 +20,7 @@ This example demonstrates how to route traffic to a gRPC service through the Ing - As an example gRPC application, we can use this app . -- To create a container image for this app, you can use [this Dockerfile](https://github.com/kubernetes/ingress-nginx/blob/5a52d99ae85cfe5ef9535291b8326b0006e75066/images/go-grpc-greeter-server/rootfs/Dockerfile). +- To create a container image for this app, you can use [this Dockerfile](https://github.com/kubernetes/ingress-nginx/blob/main/images/go-grpc-greeter-server/rootfs/Dockerfile). - If you use the Dockerfile mentioned above, to create a image, then you can use the following example Kubernetes manifest to create a deployment resource that uses that image. If necessary edit this manifest to suit your needs. diff --git a/docs/examples/index.md b/docs/examples/index.md index 8a5fd5f51..3af4266ff 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -23,6 +23,7 @@ Customization | [External authentication with response header propagation](custo Customization | [Sysctl tuning](customization/sysctl/README.md) | TODO | TODO Features | [Rewrite](rewrite/README.md) | TODO | TODO Features | [Session stickiness](affinity/cookie/README.md) | route requests consistently to the same endpoint | Advanced +Features | [Canary Deployments](canary/README.md) | weigthed canary routing to a seperate deployment | Intermediate Scaling | [Static IP](static-ip/README.md) | a single ingress gets a single static IP | Intermediate TLS | [Multi TLS certificate termination](multi-tls/README.md) | TODO | TODO TLS | [TLS termination](tls-termination/README.md) | TODO | TODO diff --git a/docs/index.md b/docs/index.md index 0a0f488cf..bd6a825e1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -4,7 +4,7 @@ This is the documentation for the Ingress NGINX Controller. It is built around the [Kubernetes Ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/), using a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) to store the controller configuration. -You can learn more about using [Ingress](http://kubernetes.io/docs/user-guide/ingress/) in the official [Kubernetes documentation](https://docs.k8s.io). +You can learn more about using [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) in the official [Kubernetes documentation](https://docs.k8s.io). # Getting Started diff --git a/docs/lua_tests.md b/docs/lua_tests.md new file mode 100644 index 000000000..4d3d1fe70 --- /dev/null +++ b/docs/lua_tests.md @@ -0,0 +1,19 @@ +# Lua Tests + +## Running the Lua Tests + +To run the Lua tests you can run the following from the root directory: + +```bash +make lua-test +``` + +This command makes use of docker hence does not need any dependency +installations besides docker + +## Where are the Lua Tests? + +Lua Tests can be found in the [rootfs/etc/nginx/lua/test](../rootfs/etc/nginx/lua/test) directory + + +[1]: https://openresty.org/en/installation.html diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 4b9820200..e1fd6956e 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -137,7 +137,7 @@ The Ingress controller needs information from apiserver. Therefore, authenticati * _Kubeconfig file:_ In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the `--kubeconfig` flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the `--kubeconfig` does not requires the flag `--apiserver-host`. The format of the file is identical to `~/.kube/config` which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. -* _Using the flag `--apiserver-host`:_ Using this flag `--apiserver-host=http://localhost:8080` it is possible to specify an unsecured API server or reach a remote kubernetes cluster using [kubectl proxy](https://kubernetes.io/docs/user-guide/kubectl/kubectl_proxy/). +* _Using the flag `--apiserver-host`:_ Using this flag `--apiserver-host=http://localhost:8080` it is possible to specify an unsecured API server or reach a remote kubernetes cluster using [kubectl proxy](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#proxy). Please do not use this approach in production. In the diagram below you can see the full authentication flow with all options, starting with the browser @@ -230,7 +230,7 @@ If it is not working, there are two possible reasons: More information: -- [User Guide: Service Accounts](http://kubernetes.io/docs/user-guide/service-accounts/) +- [User Guide: Service Accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) - [Cluster Administrator Guide: Managing Service Accounts](http://kubernetes.io/docs/admin/service-accounts-admin/) ## Kube-Config diff --git a/docs/user-guide/nginx-configuration/annotations.md b/docs/user-guide/nginx-configuration/annotations.md index b515a9f3b..0916b4df5 100755 --- a/docs/user-guide/nginx-configuration/annotations.md +++ b/docs/user-guide/nginx-configuration/annotations.md @@ -38,7 +38,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/auth-proxy-set-headers](#external-authentication)|string| |[nginx.ingress.kubernetes.io/auth-snippet](#external-authentication)|string| |[nginx.ingress.kubernetes.io/enable-global-auth](#external-authentication)|"true" or "false"| -|[nginx.ingress.kubernetes.io/backend-protocol](#backend-protocol)|string|HTTP,HTTPS,GRPC,GRPCS,AJP| +|[nginx.ingress.kubernetes.io/backend-protocol](#backend-protocol)|string|HTTP,HTTPS,GRPC,GRPCS| |[nginx.ingress.kubernetes.io/canary](#canary)|"true" or "false"| |[nginx.ingress.kubernetes.io/canary-by-header](#canary)|string| |[nginx.ingress.kubernetes.io/canary-by-header-value](#canary)|string| @@ -894,7 +894,7 @@ Include /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf ### Backend Protocol Using `backend-protocol` annotations is possible to indicate how NGINX should communicate with the backend service. (Replaces `secure-backends` in older versions) -Valid Values: HTTP, HTTPS, GRPC, GRPCS, AJP and FCGI +Valid Values: HTTP, HTTPS, GRPC, GRPCS and FCGI By default NGINX uses `HTTP`. diff --git a/docs/user-guide/nginx-configuration/configmap.md b/docs/user-guide/nginx-configuration/configmap.md index 782b9bc92..c55b7502a 100644 --- a/docs/user-guide/nginx-configuration/configmap.md +++ b/docs/user-guide/nginx-configuration/configmap.md @@ -25,211 +25,211 @@ data: The following table shows a configuration option's name, type, and the default value: -|name|type|default| -|:---|:---|:------| -|[add-headers](#add-headers)|string|""| -|[allow-backend-server-header](#allow-backend-server-header)|bool|"false"| -|[allow-snippet-annotations](#allow-snippet-annotations)|bool|true| -|[annotation-value-word-blocklist](#annotation-value-word-blocklist)|string array|""| -|[hide-headers](#hide-headers)|string array|empty| -|[access-log-params](#access-log-params)|string|""| -|[access-log-path](#access-log-path)|string|"/var/log/nginx/access.log"| -|[http-access-log-path](#http-access-log-path)|string|""| -|[stream-access-log-path](#stream-access-log-path)|string|""| -|[enable-access-log-for-default-backend](#enable-access-log-for-default-backend)|bool|"false"| -|[error-log-path](#error-log-path)|string|"/var/log/nginx/error.log"| -|[enable-modsecurity](#enable-modsecurity)|bool|"false"| -|[modsecurity-snippet](#modsecurity-snippet)|string|""| -|[enable-owasp-modsecurity-crs](#enable-owasp-modsecurity-crs)|bool|"false"| -|[client-header-buffer-size](#client-header-buffer-size)|string|"1k"| -|[client-header-timeout](#client-header-timeout)|int|60| -|[client-body-buffer-size](#client-body-buffer-size)|string|"8k"| -|[client-body-timeout](#client-body-timeout)|int|60| -|[disable-access-log](#disable-access-log)|bool|false| -|[disable-ipv6](#disable-ipv6)|bool|false| -|[disable-ipv6-dns](#disable-ipv6-dns)|bool|false| -|[enable-underscores-in-headers](#enable-underscores-in-headers)|bool|false| -|[enable-ocsp](#enable-ocsp)|bool|false| -|[ignore-invalid-headers](#ignore-invalid-headers)|bool|true| -|[retry-non-idempotent](#retry-non-idempotent)|bool|"false"| -|[error-log-level](#error-log-level)|string|"notice"| -|[http2-max-field-size](#http2-max-field-size)|string|"4k"| -|[http2-max-header-size](#http2-max-header-size)|string|"16k"| -|[http2-max-requests](#http2-max-requests)|int|1000| -|[http2-max-concurrent-streams](#http2-max-concurrent-streams)|int|128| -|[hsts](#hsts)|bool|"true"| -|[hsts-include-subdomains](#hsts-include-subdomains)|bool|"true"| -|[hsts-max-age](#hsts-max-age)|string|"15724800"| -|[hsts-preload](#hsts-preload)|bool|"false"| -|[keep-alive](#keep-alive)|int|75| -|[keep-alive-requests](#keep-alive-requests)|int|1000| -|[large-client-header-buffers](#large-client-header-buffers)|string|"4 8k"| -|[log-format-escape-none](#log-format-escape-none)|bool|"false"| -|[log-format-escape-json](#log-format-escape-json)|bool|"false"| -|[log-format-upstream](#log-format-upstream)|string|`$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] [$proxy_alternative_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id`| -|[log-format-stream](#log-format-stream)|string|`[$remote_addr] [$time_local] $protocol $status $bytes_sent $bytes_received $session_time`| -|[enable-multi-accept](#enable-multi-accept)|bool|"true"| -|[max-worker-connections](#max-worker-connections)|int|16384| -|[max-worker-open-files](#max-worker-open-files)|int|0| -|[map-hash-bucket-size](#max-hash-bucket-size)|int|64| -|[nginx-status-ipv4-whitelist](#nginx-status-ipv4-whitelist)|[]string|"127.0.0.1"| -|[nginx-status-ipv6-whitelist](#nginx-status-ipv6-whitelist)|[]string|"::1"| -|[proxy-real-ip-cidr](#proxy-real-ip-cidr)|[]string|"0.0.0.0/0"| -|[proxy-set-headers](#proxy-set-headers)|string|""| -|[server-name-hash-max-size](#server-name-hash-max-size)|int|1024| -|[server-name-hash-bucket-size](#server-name-hash-bucket-size)|int|`` -|[proxy-headers-hash-max-size](#proxy-headers-hash-max-size)|int|512| -|[proxy-headers-hash-bucket-size](#proxy-headers-hash-bucket-size)|int|64| -|[plugins](#plugins)|[]string| | -|[reuse-port](#reuse-port)|bool|"true"| -|[server-tokens](#server-tokens)|bool|"false"| -|[ssl-ciphers](#ssl-ciphers)|string|"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384"| -|[ssl-ecdh-curve](#ssl-ecdh-curve)|string|"auto"| -|[ssl-dh-param](#ssl-dh-param)|string|""| -|[ssl-protocols](#ssl-protocols)|string|"TLSv1.2 TLSv1.3"| -|[ssl-session-cache](#ssl-session-cache)|bool|"true"| -|[ssl-session-cache-size](#ssl-session-cache-size)|string|"10m"| -|[ssl-session-tickets](#ssl-session-tickets)|bool|"false"| -|[ssl-session-ticket-key](#ssl-session-ticket-key)|string|`` -|[ssl-session-timeout](#ssl-session-timeout)|string|"10m"| -|[ssl-buffer-size](#ssl-buffer-size)|string|"4k"| -|[use-proxy-protocol](#use-proxy-protocol)|bool|"false"| -|[proxy-protocol-header-timeout](#proxy-protocol-header-timeout)|string|"5s"| -|[use-gzip](#use-gzip)|bool|"false"| -|[use-geoip](#use-geoip)|bool|"true"| -|[use-geoip2](#use-geoip2)|bool|"false"| -|[enable-brotli](#enable-brotli)|bool|"false"| -|[brotli-level](#brotli-level)|int|4| -|[brotli-min-length](#brotli-min-length)|int|20| -|[brotli-types](#brotli-types)|string|"application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component"| -|[use-http2](#use-http2)|bool|"true"| -|[gzip-disable](#gzip-disable)|string|""| -|[gzip-level](#gzip-level)|int|1| -|[gzip-min-length](#gzip-min-length)|int|256| -|[gzip-types](#gzip-types)|string|"application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component"| -|[worker-processes](#worker-processes)|string|``| -|[worker-cpu-affinity](#worker-cpu-affinity)|string|""| -|[worker-shutdown-timeout](#worker-shutdown-timeout)|string|"240s"| -|[load-balance](#load-balance)|string|"round_robin"| -|[variables-hash-bucket-size](#variables-hash-bucket-size)|int|128| -|[variables-hash-max-size](#variables-hash-max-size)|int|2048| -|[upstream-keepalive-connections](#upstream-keepalive-connections)|int|320| -|[upstream-keepalive-time](#upstream-keepalive-time)|string|"1h"| -|[upstream-keepalive-timeout](#upstream-keepalive-timeout)|int|60| -|[upstream-keepalive-requests](#upstream-keepalive-requests)|int|10000| -|[limit-conn-zone-variable](#limit-conn-zone-variable)|string|"$binary_remote_addr"| -|[proxy-stream-timeout](#proxy-stream-timeout)|string|"600s"| -|[proxy-stream-next-upstream](#proxy-stream-next-upstream)|bool|"true"| -|[proxy-stream-next-upstream-timeout](#proxy-stream-next-upstream-timeout)|string|"600s"| -|[proxy-stream-next-upstream-tries](#proxy-stream-next-upstream-tries)|int|3| -|[proxy-stream-responses](#proxy-stream-responses)|int|1| -|[bind-address](#bind-address)|[]string|""| -|[use-forwarded-headers](#use-forwarded-headers)|bool|"false"| -|[enable-real-ip](#enable-real-ip)|bool|"false"| -|[forwarded-for-header](#forwarded-for-header)|string|"X-Forwarded-For"| -|[compute-full-forwarded-for](#compute-full-forwarded-for)|bool|"false"| -|[proxy-add-original-uri-header](#proxy-add-original-uri-header)|bool|"false"| -|[generate-request-id](#generate-request-id)|bool|"true"| -|[enable-opentracing](#enable-opentracing)|bool|"false"| -|[opentracing-operation-name](#opentracing-operation-name)|string|""| -|[opentracing-location-operation-name](#opentracing-location-operation-name)|string|""| -|[zipkin-collector-host](#zipkin-collector-host)|string|""| -|[zipkin-collector-port](#zipkin-collector-port)|int|9411| -|[zipkin-service-name](#zipkin-service-name)|string|"nginx"| -|[zipkin-sample-rate](#zipkin-sample-rate)|float|1.0| -|[jaeger-collector-host](#jaeger-collector-host)|string|""| -|[jaeger-collector-port](#jaeger-collector-port)|int|6831| -|[jaeger-endpoint](#jaeger-endpoint)|string|""| -|[jaeger-service-name](#jaeger-service-name)|string|"nginx"| -|[jaeger-propagation-format](#jaeger-propagation-format)|string|"jaeger"| -|[jaeger-sampler-type](#jaeger-sampler-type)|string|"const"| -|[jaeger-sampler-param](#jaeger-sampler-param)|string|"1"| -|[jaeger-sampler-host](#jaeger-sampler-host)|string|"http://127.0.0.1"| -|[jaeger-sampler-port](#jaeger-sampler-port)|int|5778| -|[jaeger-trace-context-header-name](#jaeger-trace-context-header-name)|string|uber-trace-id| -|[jaeger-debug-header](#jaeger-debug-header)|string|uber-debug-id| -|[jaeger-baggage-header](#jaeger-baggage-header)|string|jaeger-baggage| -|[jaeger-trace-baggage-header-prefix](#jaeger-trace-baggage-header-prefix)|string|uberctx-| -|[datadog-collector-host](#datadog-collector-host)|string|""| -|[datadog-collector-port](#datadog-collector-port)|int|8126| -|[datadog-service-name](#datadog-service-name)|string|"nginx"| -|[datadog-environment](#datadog-environment)|string|"prod"| -|[datadog-operation-name-override](#datadog-operation-name-override)|string|"nginx.handle"| -|[datadog-priority-sampling](#datadog-priority-sampling)|bool|"true"| -|[datadog-sample-rate](#datadog-sample-rate)|float|1.0| -|[enable-opentelemetry](#enable-opentelemetry)|bool|"false"| -|[opentelemetry-trust-incoming-span](#opentelemetry-trust-incoming-span)|bool|"true"| -|[opentelemetry-operation-name](#opentelemetry-operation-name)|string|""| -|[opentelemetry-config](#/etc/nginx/opentelemetry.toml)|string|"/etc/nginx/opentelemetry.toml"| -|[otlp-collector-host](#otlp-collector-host)|string|""| -|[otlp-collector-port](#otlp-collector-port)|int|4317| -|[otel-max-queuesize](#otel-max-queuesize)|int|| -|[otel-schedule-delay-millis](#otel-schedule-delay-millis)|int|| -|[otel-max-export-batch-size](#otel-max-export-batch-size)|int|| -|[otel-service-name](#otel-service-name)|string|"nginx"| -|[otel-sampler](#otel-sampler)|string|"AlwaysOff"| -|[otel-sampler-parent-based](#otel-sampler-parent-based)|bool|"false"| -|[otel-sampler-ratio](#otel-sampler-ratio)|float|0.01| -|[main-snippet](#main-snippet)|string|""| -|[http-snippet](#http-snippet)|string|""| -|[server-snippet](#server-snippet)|string|""| -|[stream-snippet](#stream-snippet)|string|""| -|[location-snippet](#location-snippet)|string|""| -|[custom-http-errors](#custom-http-errors)|[]int|[]int{}| -|[proxy-body-size](#proxy-body-size)|string|"1m"| -|[proxy-connect-timeout](#proxy-connect-timeout)|int|5| -|[proxy-read-timeout](#proxy-read-timeout)|int|60| -|[proxy-send-timeout](#proxy-send-timeout)|int|60| -|[proxy-buffers-number](#proxy-buffers-number)|int|4| -|[proxy-buffer-size](#proxy-buffer-size)|string|"4k"| -|[proxy-cookie-path](#proxy-cookie-path)|string|"off"| -|[proxy-cookie-domain](#proxy-cookie-domain)|string|"off"| -|[proxy-next-upstream](#proxy-next-upstream)|string|"error timeout"| -|[proxy-next-upstream-timeout](#proxy-next-upstream-timeout)|int|0| -|[proxy-next-upstream-tries](#proxy-next-upstream-tries)|int|3| -|[proxy-redirect-from](#proxy-redirect-from)|string|"off"| -|[proxy-request-buffering](#proxy-request-buffering)|string|"on"| -|[ssl-redirect](#ssl-redirect)|bool|"true"| -|[force-ssl-redirect](#force-ssl-redirect)|bool|"false"| -|[denylist-source-range](#denylist-source-range)|[]string|[]string{}| -|[whitelist-source-range](#whitelist-source-range)|[]string|[]string{}| -|[skip-access-log-urls](#skip-access-log-urls)|[]string|[]string{}| -|[limit-rate](#limit-rate)|int|0| -|[limit-rate-after](#limit-rate-after)|int|0| -|[lua-shared-dicts](#lua-shared-dicts)|string|""| -|[http-redirect-code](#http-redirect-code)|int|308| -|[proxy-buffering](#proxy-buffering)|string|"off"| -|[limit-req-status-code](#limit-req-status-code)|int|503| -|[limit-conn-status-code](#limit-conn-status-code)|int|503| -|[enable-syslog](#enable-syslog)|bool|false| -|[syslog-host](#syslog-host)|string|""| -|[syslog-port](#syslog-port)|int|514| -|[no-tls-redirect-locations](#no-tls-redirect-locations)|string|"/.well-known/acme-challenge"| -|[global-auth-url](#global-auth-url)|string|""| -|[global-auth-method](#global-auth-method)|string|""| -|[global-auth-signin](#global-auth-signin)|string|""| -|[global-auth-signin-redirect-param](#global-auth-signin-redirect-param)|string|"rd"| -|[global-auth-response-headers](#global-auth-response-headers)|string|""| -|[global-auth-request-redirect](#global-auth-request-redirect)|string|""| -|[global-auth-snippet](#global-auth-snippet)|string|""| -|[global-auth-cache-key](#global-auth-cache-key)|string|""| -|[global-auth-cache-duration](#global-auth-cache-duration)|string|"200 202 401 5m"| -|[no-auth-locations](#no-auth-locations)|string|"/.well-known/acme-challenge"| -|[block-cidrs](#block-cidrs)|[]string|""| -|[block-user-agents](#block-user-agents)|[]string|""| -|[block-referers](#block-referers)|[]string|""| -|[proxy-ssl-location-only](#proxy-ssl-location-only)|bool|"false"| -|[default-type](#default-type)|string|"text/html"| -|[global-rate-limit-memcached-host](#global-rate-limit)|string|""| -|[global-rate-limit-memcached-port](#global-rate-limit)|int|11211| -|[global-rate-limit-memcached-connect-timeout](#global-rate-limit)|int|50| -|[global-rate-limit-memcached-max-idle-timeout](#global-rate-limit)|int|10000| -|[global-rate-limit-memcached-pool-size](#global-rate-limit)|int|50| -|[global-rate-limit-status-code](#global-rate-limit)|int|429| -|[service-upstream](#service-upstream)|bool|"false"| -|[ssl-reject-handshake](#ssl-reject-handshake)|bool|"false"| -|[debug-connections](#debug-connections)|[]string|"127.0.0.1,1.1.1.1/24"| -|[strict-validate-path-type](#strict-validate-path-type)|bool|"false" (v1.7.x)| +|name|type|default|notes| +|:---|:---|:------|:----| +|[add-headers](#add-headers)|string|""|| +|[allow-backend-server-header](#allow-backend-server-header)|bool|"false"|| +|[allow-snippet-annotations](#allow-snippet-annotations)|bool|true|| +|[annotation-value-word-blocklist](#annotation-value-word-blocklist)|string array|""|| +|[hide-headers](#hide-headers)|string array|empty|| +|[access-log-params](#access-log-params)|string|""|| +|[access-log-path](#access-log-path)|string|"/var/log/nginx/access.log"|| +|[http-access-log-path](#http-access-log-path)|string|""|| +|[stream-access-log-path](#stream-access-log-path)|string|""|| +|[enable-access-log-for-default-backend](#enable-access-log-for-default-backend)|bool|"false"|| +|[error-log-path](#error-log-path)|string|"/var/log/nginx/error.log"|| +|[enable-modsecurity](#enable-modsecurity)|bool|"false"|| +|[modsecurity-snippet](#modsecurity-snippet)|string|""|| +|[enable-owasp-modsecurity-crs](#enable-owasp-modsecurity-crs)|bool|"false"|| +|[client-header-buffer-size](#client-header-buffer-size)|string|"1k"|| +|[client-header-timeout](#client-header-timeout)|int|60|| +|[client-body-buffer-size](#client-body-buffer-size)|string|"8k"|| +|[client-body-timeout](#client-body-timeout)|int|60|| +|[disable-access-log](#disable-access-log)|bool|false|| +|[disable-ipv6](#disable-ipv6)|bool|false|| +|[disable-ipv6-dns](#disable-ipv6-dns)|bool|false|| +|[enable-underscores-in-headers](#enable-underscores-in-headers)|bool|false|| +|[enable-ocsp](#enable-ocsp)|bool|false|| +|[ignore-invalid-headers](#ignore-invalid-headers)|bool|true|| +|[retry-non-idempotent](#retry-non-idempotent)|bool|"false"|| +|[error-log-level](#error-log-level)|string|"notice"|| +|[http2-max-field-size](#http2-max-field-size)|string|""|DEPRECATED in favour of [large_client_header_buffers](#large-client-header-buffers)| +|[http2-max-header-size](#http2-max-header-size)|string|""|DEPRECATED in favour of [large_client_header_buffers](#large-client-header-buffers)| +|[http2-max-requests](#http2-max-requests)|int|0|DEPRECATED in favour of [keepalive_requests](#keepalive-requests)| +|[http2-max-concurrent-streams](#http2-max-concurrent-streams)|int|128|| +|[hsts](#hsts)|bool|"true"|| +|[hsts-include-subdomains](#hsts-include-subdomains)|bool|"true"|| +|[hsts-max-age](#hsts-max-age)|string|"15724800"|| +|[hsts-preload](#hsts-preload)|bool|"false"|| +|[keep-alive](#keep-alive)|int|75|| +|[keep-alive-requests](#keep-alive-requests)|int|1000|| +|[large-client-header-buffers](#large-client-header-buffers)|string|"4 8k"|| +|[log-format-escape-none](#log-format-escape-none)|bool|"false"|| +|[log-format-escape-json](#log-format-escape-json)|bool|"false"|| +|[log-format-upstream](#log-format-upstream)|string|`$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] [$proxy_alternative_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id`|| +|[log-format-stream](#log-format-stream)|string|`[$remote_addr] [$time_local] $protocol $status $bytes_sent $bytes_received $session_time`|| +|[enable-multi-accept](#enable-multi-accept)|bool|"true"|| +|[max-worker-connections](#max-worker-connections)|int|16384|| +|[max-worker-open-files](#max-worker-open-files)|int|0|| +|[map-hash-bucket-size](#max-hash-bucket-size)|int|64|| +|[nginx-status-ipv4-whitelist](#nginx-status-ipv4-whitelist)|[]string|"127.0.0.1"|| +|[nginx-status-ipv6-whitelist](#nginx-status-ipv6-whitelist)|[]string|"::1"|| +|[proxy-real-ip-cidr](#proxy-real-ip-cidr)|[]string|"0.0.0.0/0"|| +|[proxy-set-headers](#proxy-set-headers)|string|""|| +|[server-name-hash-max-size](#server-name-hash-max-size)|int|1024|| +|[server-name-hash-bucket-size](#server-name-hash-bucket-size)|int|``| +|[proxy-headers-hash-max-size](#proxy-headers-hash-max-size)|int|512|| +|[proxy-headers-hash-bucket-size](#proxy-headers-hash-bucket-size)|int|64|| +|[plugins](#plugins)|[]string| || +|[reuse-port](#reuse-port)|bool|"true"|| +|[server-tokens](#server-tokens)|bool|"false"|| +|[ssl-ciphers](#ssl-ciphers)|string|"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384"|| +|[ssl-ecdh-curve](#ssl-ecdh-curve)|string|"auto"|| +|[ssl-dh-param](#ssl-dh-param)|string|""|| +|[ssl-protocols](#ssl-protocols)|string|"TLSv1.2 TLSv1.3"|| +|[ssl-session-cache](#ssl-session-cache)|bool|"true"|| +|[ssl-session-cache-size](#ssl-session-cache-size)|string|"10m"|| +|[ssl-session-tickets](#ssl-session-tickets)|bool|"false"|| +|[ssl-session-ticket-key](#ssl-session-ticket-key)|string|``| +|[ssl-session-timeout](#ssl-session-timeout)|string|"10m"|| +|[ssl-buffer-size](#ssl-buffer-size)|string|"4k"|| +|[use-proxy-protocol](#use-proxy-protocol)|bool|"false"|| +|[proxy-protocol-header-timeout](#proxy-protocol-header-timeout)|string|"5s"|| +|[use-gzip](#use-gzip)|bool|"false"|| +|[use-geoip](#use-geoip)|bool|"true"|| +|[use-geoip2](#use-geoip2)|bool|"false"|| +|[enable-brotli](#enable-brotli)|bool|"false"|| +|[brotli-level](#brotli-level)|int|4|| +|[brotli-min-length](#brotli-min-length)|int|20|| +|[brotli-types](#brotli-types)|string|"application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component"|| +|[use-http2](#use-http2)|bool|"true"|| +|[gzip-disable](#gzip-disable)|string|""|| +|[gzip-level](#gzip-level)|int|1|| +|[gzip-min-length](#gzip-min-length)|int|256|| +|[gzip-types](#gzip-types)|string|"application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component"|| +|[worker-processes](#worker-processes)|string|``|| +|[worker-cpu-affinity](#worker-cpu-affinity)|string|""|| +|[worker-shutdown-timeout](#worker-shutdown-timeout)|string|"240s"|| +|[load-balance](#load-balance)|string|"round_robin"|| +|[variables-hash-bucket-size](#variables-hash-bucket-size)|int|128|| +|[variables-hash-max-size](#variables-hash-max-size)|int|2048|| +|[upstream-keepalive-connections](#upstream-keepalive-connections)|int|320|| +|[upstream-keepalive-time](#upstream-keepalive-time)|string|"1h"|| +|[upstream-keepalive-timeout](#upstream-keepalive-timeout)|int|60|| +|[upstream-keepalive-requests](#upstream-keepalive-requests)|int|10000|| +|[limit-conn-zone-variable](#limit-conn-zone-variable)|string|"$binary_remote_addr"|| +|[proxy-stream-timeout](#proxy-stream-timeout)|string|"600s"|| +|[proxy-stream-next-upstream](#proxy-stream-next-upstream)|bool|"true"|| +|[proxy-stream-next-upstream-timeout](#proxy-stream-next-upstream-timeout)|string|"600s"|| +|[proxy-stream-next-upstream-tries](#proxy-stream-next-upstream-tries)|int|3|| +|[proxy-stream-responses](#proxy-stream-responses)|int|1|| +|[bind-address](#bind-address)|[]string|""|| +|[use-forwarded-headers](#use-forwarded-headers)|bool|"false"|| +|[enable-real-ip](#enable-real-ip)|bool|"false"|| +|[forwarded-for-header](#forwarded-for-header)|string|"X-Forwarded-For"|| +|[compute-full-forwarded-for](#compute-full-forwarded-for)|bool|"false"|| +|[proxy-add-original-uri-header](#proxy-add-original-uri-header)|bool|"false"|| +|[generate-request-id](#generate-request-id)|bool|"true"|| +|[enable-opentracing](#enable-opentracing)|bool|"false"|| +|[opentracing-operation-name](#opentracing-operation-name)|string|""|| +|[opentracing-location-operation-name](#opentracing-location-operation-name)|string|""|| +|[zipkin-collector-host](#zipkin-collector-host)|string|""|| +|[zipkin-collector-port](#zipkin-collector-port)|int|9411|| +|[zipkin-service-name](#zipkin-service-name)|string|"nginx"|| +|[zipkin-sample-rate](#zipkin-sample-rate)|float|1.0|| +|[jaeger-collector-host](#jaeger-collector-host)|string|""|| +|[jaeger-collector-port](#jaeger-collector-port)|int|6831|| +|[jaeger-endpoint](#jaeger-endpoint)|string|""|| +|[jaeger-service-name](#jaeger-service-name)|string|"nginx"|| +|[jaeger-propagation-format](#jaeger-propagation-format)|string|"jaeger"|| +|[jaeger-sampler-type](#jaeger-sampler-type)|string|"const"|| +|[jaeger-sampler-param](#jaeger-sampler-param)|string|"1"|| +|[jaeger-sampler-host](#jaeger-sampler-host)|string|"http://127.0.0.1"|| +|[jaeger-sampler-port](#jaeger-sampler-port)|int|5778|| +|[jaeger-trace-context-header-name](#jaeger-trace-context-header-name)|string|uber-trace-id|| +|[jaeger-debug-header](#jaeger-debug-header)|string|uber-debug-id|| +|[jaeger-baggage-header](#jaeger-baggage-header)|string|jaeger-baggage|| +|[jaeger-trace-baggage-header-prefix](#jaeger-trace-baggage-header-prefix)|string|uberctx-|| +|[datadog-collector-host](#datadog-collector-host)|string|""|| +|[datadog-collector-port](#datadog-collector-port)|int|8126|| +|[datadog-service-name](#datadog-service-name)|string|"nginx"|| +|[datadog-environment](#datadog-environment)|string|"prod"|| +|[datadog-operation-name-override](#datadog-operation-name-override)|string|"nginx.handle"|| +|[datadog-priority-sampling](#datadog-priority-sampling)|bool|"true"|| +|[datadog-sample-rate](#datadog-sample-rate)|float|1.0|| +|[enable-opentelemetry](#enable-opentelemetry)|bool|"false"|| +|[opentelemetry-trust-incoming-span](#opentelemetry-trust-incoming-span)|bool|"true"|| +|[opentelemetry-operation-name](#opentelemetry-operation-name)|string|""|| +|[opentelemetry-config](#/etc/nginx/opentelemetry.toml)|string|"/etc/nginx/opentelemetry.toml"|| +|[otlp-collector-host](#otlp-collector-host)|string|""|| +|[otlp-collector-port](#otlp-collector-port)|int|4317|| +|[otel-max-queuesize](#otel-max-queuesize)|int||| +|[otel-schedule-delay-millis](#otel-schedule-delay-millis)|int||| +|[otel-max-export-batch-size](#otel-max-export-batch-size)|int||| +|[otel-service-name](#otel-service-name)|string|"nginx"|| +|[otel-sampler](#otel-sampler)|string|"AlwaysOff"|| +|[otel-sampler-parent-based](#otel-sampler-parent-based)|bool|"false"|| +|[otel-sampler-ratio](#otel-sampler-ratio)|float|0.01|| +|[main-snippet](#main-snippet)|string|""|| +|[http-snippet](#http-snippet)|string|""|| +|[server-snippet](#server-snippet)|string|""|| +|[stream-snippet](#stream-snippet)|string|""|| +|[location-snippet](#location-snippet)|string|""|| +|[custom-http-errors](#custom-http-errors)|[]int|[]int{}|| +|[proxy-body-size](#proxy-body-size)|string|"1m"|| +|[proxy-connect-timeout](#proxy-connect-timeout)|int|5|| +|[proxy-read-timeout](#proxy-read-timeout)|int|60|| +|[proxy-send-timeout](#proxy-send-timeout)|int|60|| +|[proxy-buffers-number](#proxy-buffers-number)|int|4|| +|[proxy-buffer-size](#proxy-buffer-size)|string|"4k"|| +|[proxy-cookie-path](#proxy-cookie-path)|string|"off"|| +|[proxy-cookie-domain](#proxy-cookie-domain)|string|"off"|| +|[proxy-next-upstream](#proxy-next-upstream)|string|"error timeout"|| +|[proxy-next-upstream-timeout](#proxy-next-upstream-timeout)|int|0|| +|[proxy-next-upstream-tries](#proxy-next-upstream-tries)|int|3|| +|[proxy-redirect-from](#proxy-redirect-from)|string|"off"|| +|[proxy-request-buffering](#proxy-request-buffering)|string|"on"|| +|[ssl-redirect](#ssl-redirect)|bool|"true"|| +|[force-ssl-redirect](#force-ssl-redirect)|bool|"false"|| +|[denylist-source-range](#denylist-source-range)|[]string|[]string{}|| +|[whitelist-source-range](#whitelist-source-range)|[]string|[]string{}|| +|[skip-access-log-urls](#skip-access-log-urls)|[]string|[]string{}|| +|[limit-rate](#limit-rate)|int|0|| +|[limit-rate-after](#limit-rate-after)|int|0|| +|[lua-shared-dicts](#lua-shared-dicts)|string|""|| +|[http-redirect-code](#http-redirect-code)|int|308|| +|[proxy-buffering](#proxy-buffering)|string|"off"|| +|[limit-req-status-code](#limit-req-status-code)|int|503|| +|[limit-conn-status-code](#limit-conn-status-code)|int|503|| +|[enable-syslog](#enable-syslog)|bool|false|| +|[syslog-host](#syslog-host)|string|""|| +|[syslog-port](#syslog-port)|int|514|| +|[no-tls-redirect-locations](#no-tls-redirect-locations)|string|"/.well-known/acme-challenge"|| +|[global-auth-url](#global-auth-url)|string|""|| +|[global-auth-method](#global-auth-method)|string|""|| +|[global-auth-signin](#global-auth-signin)|string|""|| +|[global-auth-signin-redirect-param](#global-auth-signin-redirect-param)|string|"rd"|| +|[global-auth-response-headers](#global-auth-response-headers)|string|""|| +|[global-auth-request-redirect](#global-auth-request-redirect)|string|""|| +|[global-auth-snippet](#global-auth-snippet)|string|""|| +|[global-auth-cache-key](#global-auth-cache-key)|string|""|| +|[global-auth-cache-duration](#global-auth-cache-duration)|string|"200 202 401 5m"|| +|[no-auth-locations](#no-auth-locations)|string|"/.well-known/acme-challenge"|| +|[block-cidrs](#block-cidrs)|[]string|""|| +|[block-user-agents](#block-user-agents)|[]string|""|| +|[block-referers](#block-referers)|[]string|""|| +|[proxy-ssl-location-only](#proxy-ssl-location-only)|bool|"false"|| +|[default-type](#default-type)|string|"text/html"|| +|[global-rate-limit-memcached-host](#global-rate-limit)|string|""|| +|[global-rate-limit-memcached-port](#global-rate-limit)|int|11211|| +|[global-rate-limit-memcached-connect-timeout](#global-rate-limit)|int|50|| +|[global-rate-limit-memcached-max-idle-timeout](#global-rate-limit)|int|10000|| +|[global-rate-limit-memcached-pool-size](#global-rate-limit)|int|50|| +|[global-rate-limit-status-code](#global-rate-limit)|int|429|| +|[service-upstream](#service-upstream)|bool|"false"|| +|[ssl-reject-handshake](#ssl-reject-handshake)|bool|"false"|| +|[debug-connections](#debug-connections)|[]string|"127.0.0.1,1.1.1.1/24"|| +|[strict-validate-path-type](#strict-validate-path-type)|bool|"false" (v1.7.x)|| ## add-headers diff --git a/go.mod b/go.mod index 1bdcdbc61..4b75cbd12 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/onsi/ginkgo/v2 v2.9.5 github.com/opencontainers/runc v1.1.7 github.com/pmezard/go-difflib v1.0.0 - github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 github.com/spf13/cobra v1.7.0 @@ -25,8 +25,8 @@ require ( github.com/stretchr/testify v1.8.4 github.com/yudai/gojsondiff v1.0.0 github.com/zakjan/cert-chain-resolver v0.0.0-20211122211144-c6b0b792af9a - golang.org/x/crypto v0.9.0 - google.golang.org/grpc v1.55.0 + golang.org/x/crypto v0.10.0 + google.golang.org/grpc v1.56.1 google.golang.org/grpc/examples v0.0.0-20221220003428-4f16fbe410f7 gopkg.in/go-playground/pool.v3 v3.1.1 gopkg.in/mcuadros/go-syslog.v2 v2.3.0 @@ -93,7 +93,7 @@ require ( github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/sergi/go-diff v1.1.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/xlab/treeprint v1.1.0 // indirect @@ -103,13 +103,13 @@ require ( golang.org/x/mod v0.10.0 // indirect golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/term v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/sys v0.9.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 9e65d666f..db879481c 100644 --- a/go.sum +++ b/go.sum @@ -306,8 +306,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -326,8 +326,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= @@ -390,8 +390,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -524,19 +524,19 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -647,8 +647,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -661,8 +661,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/examples v0.0.0-20221220003428-4f16fbe410f7 h1:pPsdyuBif+uoyUoL19yuj/TCfUPsmpJHJZhWQ98JGLU= google.golang.org/grpc/examples v0.0.0-20221220003428-4f16fbe410f7/go.mod h1:8pQa1yxxkh+EsxUK8/455D5MSbv3vgmEJqKCH3y17mI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/hack/manifest-templates/provider/oracle/kustomization.yaml b/hack/manifest-templates/provider/oracle/kustomization.yaml new file mode 100644 index 000000000..cd6ef95be --- /dev/null +++ b/hack/manifest-templates/provider/oracle/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../../common diff --git a/hack/manifest-templates/provider/oracle/values.yaml b/hack/manifest-templates/provider/oracle/values.yaml new file mode 100644 index 000000000..b4480531f --- /dev/null +++ b/hack/manifest-templates/provider/oracle/values.yaml @@ -0,0 +1,8 @@ +controller: + service: + type: LoadBalancer + externalTrafficPolicy: Local + annotations: + service.beta.kubernetes.io/oci-load-balancer-shape: "flexible" + service.beta.kubernetes.io/oci-load-balancer-shape-flex-min: "10" + service.beta.kubernetes.io/oci-load-balancer-shape-flex-max: "100" diff --git a/images/custom-error-pages/rootfs/Dockerfile b/images/custom-error-pages/rootfs/Dockerfile index 30ac54693..04bcb8e08 100755 --- a/images/custom-error-pages/rootfs/Dockerfile +++ b/images/custom-error-pages/rootfs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.20.4-alpine3.18 as builder +FROM golang:1.20.5-alpine3.18 as builder RUN apk update \ && apk upgrade && apk add git diff --git a/images/ext-auth-example-authsvc/rootfs/Dockerfile b/images/ext-auth-example-authsvc/rootfs/Dockerfile index 96dcd9a39..02d92d773 100644 --- a/images/ext-auth-example-authsvc/rootfs/Dockerfile +++ b/images/ext-auth-example-authsvc/rootfs/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.4-alpine3.18 as builder +FROM golang:1.20.5-alpine3.18 as builder RUN mkdir /authsvc WORKDIR /authsvc COPY . ./ diff --git a/images/fastcgi-helloserver/rootfs/Dockerfile b/images/fastcgi-helloserver/rootfs/Dockerfile index a11834373..096d31abb 100755 --- a/images/fastcgi-helloserver/rootfs/Dockerfile +++ b/images/fastcgi-helloserver/rootfs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.20.4-alpine3.18 as builder +FROM golang:1.20.5-alpine3.18 as builder WORKDIR /go/src/k8s.io/ingress-nginx/images/fastcgi diff --git a/images/fastcgi-helloserver/rootfs/main.go b/images/fastcgi-helloserver/rootfs/main.go index 91db60c26..a42c9a487 100644 --- a/images/fastcgi-helloserver/rootfs/main.go +++ b/images/fastcgi-helloserver/rootfs/main.go @@ -26,5 +26,7 @@ func main() { if err != nil { panic(err) } - fcgi.Serve(l, nil) + if err := fcgi.Serve(l, nil); err != nil { + panic(err) + } } diff --git a/images/go-grpc-greeter-server/rootfs/Dockerfile b/images/go-grpc-greeter-server/rootfs/Dockerfile index d457b43e5..46f916fb4 100644 --- a/images/go-grpc-greeter-server/rootfs/Dockerfile +++ b/images/go-grpc-greeter-server/rootfs/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.4-alpine3.18 as build +FROM golang:1.20.5-alpine3.18 as build WORKDIR /go/src/greeter-server diff --git a/images/httpbun/rootfs/Dockerfile b/images/httpbun/rootfs/Dockerfile index a1775d303..e88716bb8 100644 --- a/images/httpbun/rootfs/Dockerfile +++ b/images/httpbun/rootfs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.20 AS builder +FROM golang:1.20.5 AS builder ENV LC_ALL=C.UTF-8 ENV LANG=C.UTF-8 diff --git a/images/kube-webhook-certgen/rootfs/Dockerfile b/images/kube-webhook-certgen/rootfs/Dockerfile index 40a2c31ac..13226dbe2 100644 --- a/images/kube-webhook-certgen/rootfs/Dockerfile +++ b/images/kube-webhook-certgen/rootfs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM --platform=$BUILDPLATFORM golang:1.20.1 as builder +FROM --platform=$BUILDPLATFORM golang:1.20.5 as builder ARG BUILDPLATFORM ARG TARGETARCH diff --git a/images/kube-webhook-certgen/rootfs/pkg/k8s/k8s_test.go b/images/kube-webhook-certgen/rootfs/pkg/k8s/k8s_test.go index deaeb3540..f11bef981 100644 --- a/images/kube-webhook-certgen/rootfs/pkg/k8s/k8s_test.go +++ b/images/kube-webhook-certgen/rootfs/pkg/k8s/k8s_test.go @@ -3,8 +3,8 @@ package k8s import ( "bytes" "context" + "crypto/rand" "errors" - "math/rand" "testing" "time" diff --git a/images/nginx/TAG b/images/nginx/TAG index 77d6f4ca2..8acdd82b7 100644 --- a/images/nginx/TAG +++ b/images/nginx/TAG @@ -1 +1 @@ -0.0.0 +0.0.1 diff --git a/images/nginx/cloudbuild.yaml b/images/nginx/cloudbuild.yaml index c581700dd..71051f26d 100644 --- a/images/nginx/cloudbuild.yaml +++ b/images/nginx/cloudbuild.yaml @@ -8,8 +8,6 @@ steps: entrypoint: bash env: - DOCKER_CLI_EXPERIMENTAL=enabled - - SHORT_SHA=$SHORT_SHA - - BASE_REF=$_PULL_BASE_REF - REGISTRY=gcr.io/k8s-staging-ingress-nginx - HOME=/root args: @@ -17,7 +15,3 @@ steps: - | gcloud auth configure-docker \ && cd images/nginx && make push -substitutions: - _GIT_TAG: "12345" - _PULL_BASE_REF: "master" - _PULL_BASE_SHA: '12345' diff --git a/images/nginx/rootfs/build.sh b/images/nginx/rootfs/build.sh index 44c7b20f8..5e215bf72 100755 --- a/images/nginx/rootfs/build.sh +++ b/images/nginx/rootfs/build.sh @@ -53,8 +53,8 @@ export JAEGER_VERSION=0.7.0 # Check for recent changes: https://github.com/msgpack/msgpack-c/compare/cpp-3.3.0...master export MSGPACK_VERSION=3.3.0 -# Check for recent changes: https://github.com/DataDog/dd-opentracing-cpp/compare/v1.3.2...master -export DATADOG_CPP_VERSION=1.3.2 +# Check for recent changes: https://github.com/DataDog/dd-opentracing-cpp/compare/v1.3.7...master +export DATADOG_CPP_VERSION=1.3.7 # Check for recent changes: https://github.com/SpiderLabs/ModSecurity-nginx/compare/v1.0.3...master export MODSECURITY_VERSION=1.0.3 @@ -80,11 +80,6 @@ export LUA_CJSON_VERSION=2.1.0.10 # Check for recent changes: https://github.com/leev/ngx_http_geoip2_module/compare/3.3...master export GEOIP2_VERSION=a26c6beed77e81553686852dceb6c7fdacc5970d -# Check for recent changes: https://github.com/msva/nginx_ajp_module/compare/fcbb2ccca4901d317ecd7a9dabb3fec9378ff40f...master -# This is a fork from https://github.com/yaoweibin/nginx_ajp_module -# Since it has not been updated and is not compatible with NGINX 1.21 -export NGINX_AJP_VERSION=fcbb2ccca4901d317ecd7a9dabb3fec9378ff40f - # Check for recent changes: https://github.com/openresty/luajit2/compare/v2.1-20220411...v2.1-agentzh export LUAJIT_VERSION=2.1-20220411 @@ -265,15 +260,12 @@ get_src d3f2c870f8f88477b01726b32accab30f6e5d57ae59c5ec87374ff73d0794316 \ "https://github.com/openresty/luajit2/archive/v$LUAJIT_VERSION.tar.gz" fi -get_src 586f92166018cc27080d34e17c59d68219b85af745edf3cc9fe41403fc9b4ac6 \ +get_src 8d39c6b23f941a2d11571daaccc04e69539a3fcbcc50a631837560d5861a7b96 \ "https://github.com/DataDog/dd-opentracing-cpp/archive/v$DATADOG_CPP_VERSION.tar.gz" get_src 4c1933434572226942c65b2f2b26c8a536ab76aa771a3c7f6c2629faa764976b \ "https://github.com/leev/ngx_http_geoip2_module/archive/$GEOIP2_VERSION.tar.gz" -get_src 778fcca851bd69dabfb382dc827d2ee07662f7eca36b5e66e67d5512bad75ef8 \ - "https://github.com/msva/nginx_ajp_module/archive/$NGINX_AJP_VERSION.tar.gz" - get_src 5d16e623d17d4f42cc64ea9cfb69ca960d313e12f5d828f785dd227cc483fcbd \ "https://github.com/openresty/lua-resty-upload/archive/v$LUA_RESTY_UPLOAD_VERSION.tar.gz" @@ -633,7 +625,6 @@ WITH_MODULES=" \ --add-module=$BUILD_PATH/lua-nginx-module-$LUA_NGX_VERSION \ --add-module=$BUILD_PATH/stream-lua-nginx-module-$LUA_STREAM_NGX_VERSION \ --add-module=$BUILD_PATH/lua-upstream-nginx-module-$LUA_UPSTREAM_VERSION \ - --add-module=$BUILD_PATH/nginx_ajp_module-${NGINX_AJP_VERSION} \ --add-dynamic-module=$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH \ --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/opentracing \ --add-dynamic-module=$BUILD_PATH/ModSecurity-nginx-$MODSECURITY_VERSION \ diff --git a/images/opentelemetry/rootfs/Dockerfile b/images/opentelemetry/rootfs/Dockerfile index 69d82cda2..d15c28bbf 100644 --- a/images/opentelemetry/rootfs/Dockerfile +++ b/images/opentelemetry/rootfs/Dockerfile @@ -21,9 +21,11 @@ COPY . /opt/third_party/ # install build tools RUN apk update \ && apk upgrade \ - && apk add -U bash cmake \ + && apk add -U bash cmake ninja \ && bash /opt/third_party/build.sh -p +ENV NINJA_STATUS "[%p/%f/%t]" + # install gRPC FROM base as grpc RUN bash /opt/third_party/build.sh -g v1.49.2 @@ -39,7 +41,17 @@ COPY --from=grpc /opt/third_party/install/ /usr COPY --from=otel-cpp /opt/third_party/install/ /usr RUN bash /opt/third_party/build.sh -n -FROM alpine:3.18.0 as final -COPY --from=base /opt/third_party/init_module.sh /usr/local/bin/init_module.sh +FROM cgr.dev/chainguard/go:latest as build-init + +WORKDIR /go/src/app +COPY . . + +RUN go mod download +RUN CGO_ENABLED=0 go build -o /go/bin/init_module + +FROM cgr.dev/chainguard/static as final +COPY --from=build-init /go/bin/init_module / COPY --from=nginx /etc/nginx/modules /etc/nginx/modules COPY --from=nginx /opt/third_party/install/lib /etc/nginx/modules + +CMD ["/init_module"] diff --git a/images/opentelemetry/rootfs/build.sh b/images/opentelemetry/rootfs/build.sh index 6ad4601c6..30faad304 100755 --- a/images/opentelemetry/rootfs/build.sh +++ b/images/opentelemetry/rootfs/build.sh @@ -70,6 +70,7 @@ install_grpc() mkdir -p $BUILD_PATH/grpc cd ${BUILD_PATH}/grpc cmake -DCMAKE_INSTALL_PREFIX=${INSTAL_DIR} \ + -G Ninja \ -DGRPC_GIT_TAG=${GRPC_GIT_TAG} /opt/third_party \ -DgRPC_BUILD_GRPC_NODE_PLUGIN=OFF \ -DgRPC_BUILD_GRPC_OBJECTIVE_C_PLUGIN=OFF \ @@ -92,6 +93,7 @@ install_otel() cd .build cmake -DCMAKE_BUILD_TYPE=Release \ + -G Ninja \ -DCMAKE_POSITION_INDEPENDENT_CODE=TRUE \ -DWITH_ZIPKIN=OFF \ -DWITH_JAEGER=OFF \ @@ -143,6 +145,7 @@ install_nginx() mkdir -p build cd build cmake -DCMAKE_BUILD_TYPE=Release \ + -G Ninja \ -DCMAKE_INSTALL_PREFIX=${INSTAL_DIR} \ -DBUILD_SHARED_LIBS=ON \ -DNGINX_VERSION=${NGINX_VERSION} \ diff --git a/images/opentelemetry/rootfs/go.mod b/images/opentelemetry/rootfs/go.mod new file mode 100644 index 000000000..f636c81b7 --- /dev/null +++ b/images/opentelemetry/rootfs/go.mod @@ -0,0 +1,3 @@ +module init-otel + +go 1.20 diff --git a/images/opentelemetry/rootfs/init_module.go b/images/opentelemetry/rootfs/init_module.go new file mode 100644 index 000000000..bebec728f --- /dev/null +++ b/images/opentelemetry/rootfs/init_module.go @@ -0,0 +1,104 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "io" + "os" + "path/filepath" +) + +func main() { + // Enable error handling for all operations + err := run() + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +func run() error { + // Create the target directory if it doesn't exist + targetDir := "/modules_mount/etc/nginx/modules/otel" + err := os.MkdirAll(targetDir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create target directory: %w", err) + } + + // Copy files from source directory to target directory + sourceDir := "/etc/nginx/modules/" + err = filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories + if info.IsDir() { + return nil + } + + // Calculate the destination path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + destPath := filepath.Join(targetDir, relPath) + + // Create the destination directory if it doesn't exist + destDir := filepath.Dir(destPath) + err = os.MkdirAll(destDir, os.ModePerm) + if err != nil { + return err + } + + // Copy the file + err = copyFile(path, destPath) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + return fmt.Errorf("failed to copy files: %w", err) + } + + return nil +} + +func copyFile(sourcePath, destPath string) error { + sourceFile, err := os.Open(sourcePath) + if err != nil { + return err + } + defer sourceFile.Close() + + destFile, err := os.Create(destPath) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, sourceFile) + if err != nil { + return err + } + + return nil +} diff --git a/images/opentelemetry/rootfs/init_module.sh b/images/opentelemetry/rootfs/init_module.sh deleted file mode 100755 index 5a675aa2b..000000000 --- a/images/opentelemetry/rootfs/init_module.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# Copyright 2021 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -mkdir -p /modules_mount/etc/nginx/modules/otel -cp -R /etc/nginx/modules/* /modules_mount/etc/nginx/modules/otel diff --git a/images/test-runner/Makefile b/images/test-runner/Makefile index 1d1f4f638..acf3dd660 100644 --- a/images/test-runner/Makefile +++ b/images/test-runner/Makefile @@ -43,7 +43,7 @@ image: --pull \ --push \ --build-arg BASE_IMAGE=${NGINX_BASE_IMAGE} \ - --build-arg GOLANG_VERSION=1.20.4 \ + --build-arg GOLANG_VERSION=1.20.5 \ --build-arg ETCD_VERSION=3.4.3-0 \ --build-arg K8S_RELEASE=v1.26.0 \ --build-arg RESTY_CLI_VERSION=0.27 \ @@ -64,7 +64,7 @@ build: ensure-buildx --progress=${PROGRESS} \ --pull \ --build-arg BASE_IMAGE=${NGINX_BASE_IMAGE} \ - --build-arg GOLANG_VERSION=1.20.4 \ + --build-arg GOLANG_VERSION=1.20.5 \ --build-arg ETCD_VERSION=3.4.3-0 \ --build-arg K8S_RELEASE=v1.26.0 \ --build-arg RESTY_CLI_VERSION=0.27 \ diff --git a/images/test-runner/cloudbuild.yaml b/images/test-runner/cloudbuild.yaml index 761ea7f28..b17de9b31 100644 --- a/images/test-runner/cloudbuild.yaml +++ b/images/test-runner/cloudbuild.yaml @@ -6,8 +6,6 @@ steps: entrypoint: bash env: - DOCKER_CLI_EXPERIMENTAL=enabled - - SHORT_SHA=$SHORT_SHA - - BASE_REF=$_PULL_BASE_REF - REGISTRY=gcr.io/k8s-staging-ingress-nginx # default cloudbuild has HOME=/builder/home and docker buildx is in /root/.docker/cli-plugins/docker-buildx # set the home to /root explicitly to if using docker buildx @@ -17,7 +15,3 @@ steps: - | gcloud auth configure-docker \ && cd images/test-runner && make push -substitutions: - _GIT_TAG: "12345" - _PULL_BASE_REF: "master" - _PULL_BASE_SHA: '12345' \ No newline at end of file diff --git a/internal/admission/controller/main_test.go b/internal/admission/controller/main_test.go index 0a547d4be..8c42f87ef 100644 --- a/internal/admission/controller/main_test.go +++ b/internal/admission/controller/main_test.go @@ -67,7 +67,7 @@ func TestHandleAdmission(t *testing.T) { Checker: failTestChecker{t: t}, } - result, err := adm.HandleAdmission(&admissionv1.AdmissionReview{ + _, err := adm.HandleAdmission(&admissionv1.AdmissionReview{ Request: &admissionv1.AdmissionRequest{ Kind: v1.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, }, @@ -76,12 +76,12 @@ func TestHandleAdmission(t *testing.T) { t.Fatalf("with a non ingress resource, the check should not pass") } - result, err = adm.HandleAdmission(nil) + _, err = adm.HandleAdmission(nil) if err == nil { t.Fatalf("with a nil AdmissionReview request, the check should not pass") } - result, err = adm.HandleAdmission(&admissionv1.AdmissionReview{ + result, err := adm.HandleAdmission(&admissionv1.AdmissionReview{ Request: &admissionv1.AdmissionRequest{ Kind: v1.GroupVersionKind{Group: networking.GroupName, Version: "v1", Kind: "Ingress"}, Object: runtime.RawExtension{ @@ -114,7 +114,9 @@ func TestHandleAdmission(t *testing.T) { err: fmt.Errorf("this is a test error"), } - adm.HandleAdmission(review) + if _, err := adm.HandleAdmission(review); err != nil { + t.Errorf("unexpected error: %v", err) + } if review.Response.Allowed { t.Fatalf("when the checker returns an error, the request should not be allowed") } @@ -124,7 +126,9 @@ func TestHandleAdmission(t *testing.T) { err: nil, } - adm.HandleAdmission(review) + if _, err := adm.HandleAdmission(review); err != nil { + t.Errorf("unexpected error: %v", err) + } if !review.Response.Allowed { t.Fatalf("when the checker returns no error, the request should be allowed") } diff --git a/internal/admission/controller/server.go b/internal/admission/controller/server.go index 29449de50..3fa70971f 100644 --- a/internal/admission/controller/server.go +++ b/internal/admission/controller/server.go @@ -31,7 +31,9 @@ var ( ) func init() { - admissionv1.AddToScheme(scheme) + if err := admissionv1.AddToScheme(scheme); err != nil { + klog.ErrorS(err, "Failed to add scheme") + } } // AdmissionController checks if an object diff --git a/internal/ingress/annotations/backendprotocol/main.go b/internal/ingress/annotations/backendprotocol/main.go index d8ea72386..c749072e3 100644 --- a/internal/ingress/annotations/backendprotocol/main.go +++ b/internal/ingress/annotations/backendprotocol/main.go @@ -31,7 +31,7 @@ import ( const HTTP = "HTTP" var ( - validProtocols = regexp.MustCompile(`^(AUTO_HTTP|HTTP|HTTPS|AJP|GRPC|GRPCS|FCGI)$`) + validProtocols = regexp.MustCompile(`^(AUTO_HTTP|HTTP|HTTPS|GRPC|GRPCS|FCGI)$`) ) type backendProtocol struct { diff --git a/internal/ingress/annotations/mirror/main.go b/internal/ingress/annotations/mirror/main.go index cd54a9826..9cb1b0ede 100644 --- a/internal/ingress/annotations/mirror/main.go +++ b/internal/ingress/annotations/mirror/main.go @@ -93,12 +93,13 @@ func (a mirror) Parse(ing *networking.Ingress) (interface{}, error) { config.Host, err = parser.GetStringAnnotation("mirror-host", ing) if err != nil { if config.Target != "" { - url, err := parser.StringToURL(config.Target) + target := strings.Split(config.Target, "$") + + url, err := parser.StringToURL(target[0]) if err != nil { config.Host = "" } else { - hostname := strings.Split(url.Hostname(), "$") - config.Host = hostname[0] + config.Host = url.Hostname() } } } diff --git a/internal/ingress/annotations/mirror/main_test.go b/internal/ingress/annotations/mirror/main_test.go index f744ab552..add90d768 100644 --- a/internal/ingress/annotations/mirror/main_test.go +++ b/internal/ingress/annotations/mirror/main_test.go @@ -48,6 +48,24 @@ func TestParse(t *testing.T) { Target: "https://test.env.com/$request_uri", Host: "test.env.com", }}, + {map[string]string{backendURL: "https://test.env.com$request_uri"}, &Config{ + Source: ngxURI, + RequestBody: "on", + Target: "https://test.env.com$request_uri", + Host: "test.env.com", + }}, + {map[string]string{backendURL: "https://test.env.com:8080$request_uri"}, &Config{ + Source: ngxURI, + RequestBody: "on", + Target: "https://test.env.com:8080$request_uri", + Host: "test.env.com", + }}, + {map[string]string{backendURL: "https://test.env.com:8080/$request_uri"}, &Config{ + Source: ngxURI, + RequestBody: "on", + Target: "https://test.env.com:8080/$request_uri", + Host: "test.env.com", + }}, {map[string]string{requestBody: "off"}, &Config{ Source: "", RequestBody: "off", diff --git a/internal/ingress/controller/checker_test.go b/internal/ingress/controller/checker_test.go index 848b4ecb6..52ded9dfe 100644 --- a/internal/ingress/controller/checker_test.go +++ b/internal/ingress/controller/checker_test.go @@ -76,7 +76,10 @@ func TestNginxCheck(t *testing.T) { }) // create pid file - os.MkdirAll("/tmp/nginx", file.ReadWriteExecuteByUser) + if err := os.MkdirAll("/tmp/nginx", file.ReadWriteExecuteByUser); err != nil { + t.Errorf("unexpected error creating pid file: %v", err) + } + pidFile, err := os.Create(nginx.PID) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -90,14 +93,23 @@ func TestNginxCheck(t *testing.T) { // start dummy process to use the PID cmd := exec.Command("sleep", "3600") - cmd.Start() + if err := cmd.Start(); err != nil { + t.Errorf("unexpected error: %v", err) + } pid := cmd.Process.Pid - defer cmd.Process.Kill() + defer func() { + if err := cmd.Process.Kill(); err != nil { + t.Errorf("unexpected error killing the process: %v", err) + } + }() go func() { - cmd.Wait() + cmd.Wait() //nolint:errcheck }() - pidFile.Write([]byte(fmt.Sprintf("%v", pid))) + if _, err := pidFile.Write([]byte(fmt.Sprintf("%v", pid))); err != nil { + t.Errorf("unexpected error writing the pid file: %v", err) + } + pidFile.Close() healthz.InstallPathHandler(mux, tt.healthzPath, n) @@ -109,7 +121,7 @@ func TestNginxCheck(t *testing.T) { }) // pollute pid file - pidFile.Write([]byte(fmt.Sprint("999999"))) + pidFile.Write([]byte("999999")) //nolint:errcheck pidFile.Close() t.Run("bad pid", func(t *testing.T) { diff --git a/internal/ingress/controller/config/config.go b/internal/ingress/controller/config/config.go index ad4bebde0..000bfc730 100644 --- a/internal/ingress/controller/config/config.go +++ b/internal/ingress/controller/config/config.go @@ -91,7 +91,7 @@ const ( // Configuration represents the content of nginx.conf file type Configuration struct { - defaults.Backend `json:",squash"` + defaults.Backend `json:",squash"` //nolint:staticcheck // AllowSnippetAnnotations enable users to add their own snippets via ingress annotation. // If disabled, only snippets added via ConfigMap are added to ingress. @@ -215,16 +215,19 @@ type Configuration struct { // https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size // HTTP2MaxFieldSize Limits the maximum size of an HPACK-compressed request header field + // NOTE: Deprecated HTTP2MaxFieldSize string `json:"http2-max-field-size,omitempty"` // https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size // HTTP2MaxHeaderSize Limits the maximum size of the entire request header list after HPACK decompression + // NOTE: Deprecated HTTP2MaxHeaderSize string `json:"http2-max-header-size,omitempty"` // http://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_requests // HTTP2MaxRequests Sets the maximum number of requests (including push requests) that can be served // through one HTTP/2 connection, after which the next client request will lead to connection closing // and the need of establishing a new connection. + // NOTE: Deprecated HTTP2MaxRequests int `json:"http2-max-requests,omitempty"` // http://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_streams @@ -882,9 +885,9 @@ func NewDefault() Configuration { ComputeFullForwardedFor: false, ProxyAddOriginalURIHeader: false, GenerateRequestID: true, - HTTP2MaxFieldSize: "4k", - HTTP2MaxHeaderSize: "16k", - HTTP2MaxRequests: 1000, + HTTP2MaxFieldSize: "", + HTTP2MaxHeaderSize: "", + HTTP2MaxRequests: 0, HTTP2MaxConcurrentStreams: 128, HTTPRedirectCode: 308, HSTS: true, diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index 20de63fd1..4a4417130 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -271,8 +271,6 @@ func (n *NGINXController) CheckWarning(ing *networking.Ingress) ([]string, error "influxdb-host", "influxdb-server-name", "secure-verify-ca-secret", - "fastcgi-params-configmap", - "fastcgi-index", ) // Skip checks if the ingress is marked as deleted diff --git a/internal/ingress/controller/controller_test.go b/internal/ingress/controller/controller_test.go index 41ce9de87..44184f6b9 100644 --- a/internal/ingress/controller/controller_test.go +++ b/internal/ingress/controller/controller_test.go @@ -33,7 +33,7 @@ import ( "github.com/eapache/channels" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" networking "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,7 +48,6 @@ import ( "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/annotations/proxyssl" "k8s.io/ingress-nginx/internal/ingress/annotations/sessionaffinity" - "k8s.io/ingress-nginx/internal/ingress/controller/config" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" "k8s.io/ingress-nginx/internal/ingress/controller/ingressclass" "k8s.io/ingress-nginx/internal/ingress/controller/store" @@ -146,7 +145,7 @@ func (ntc testNginxTestCommand) Test(cfg string) ([]byte, error) { type fakeTemplate struct{} -func (fakeTemplate) Write(conf config.TemplateConfig) ([]byte, error) { +func (fakeTemplate) Write(conf ngx_config.TemplateConfig) ([]byte, error) { r := []byte{} for _, s := range conf.Servers { if len(r) > 0 { @@ -159,7 +158,7 @@ func (fakeTemplate) Write(conf config.TemplateConfig) ([]byte, error) { func TestCheckIngress(t *testing.T) { defer func() { - filepath.Walk(os.TempDir(), func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(os.TempDir(), func(path string, info os.FileInfo, err error) error { if info.IsDir() && os.TempDir() != path { return filepath.SkipDir } @@ -168,6 +167,9 @@ func TestCheckIngress(t *testing.T) { } return nil }) + if err != nil { + t.Errorf("unexpected error: %v", err) + } }() err := file.CreateRequiredDirectories() @@ -177,9 +179,13 @@ func TestCheckIngress(t *testing.T) { // Ensure no panic with wrong arguments var nginx *NGINXController - nginx.CheckIngress(nil) + if err := nginx.CheckIngress(nil); err != nil { + t.Errorf("unexpected error: %v", err) + } nginx = newNGINXController(t) - nginx.CheckIngress(nil) + if err := nginx.CheckIngress(nil); err != nil { + t.Errorf("unexpected error: %v", err) + } nginx.metricCollector = metric.DummyCollector{} nginx.t = fakeTemplate{} @@ -432,7 +438,7 @@ func TestCheckWarning(t *testing.T) { t.Run("adding invalid annotations increases the warning count", func(t *testing.T) { ing.ObjectMeta.Annotations[parser.GetAnnotationWithPrefix("enable-influxdb")] = "true" ing.ObjectMeta.Annotations[parser.GetAnnotationWithPrefix("secure-verify-ca-secret")] = "true" - ing.ObjectMeta.Annotations[parser.GetAnnotationWithPrefix("fastcgi-index")] = "blabla" + ing.ObjectMeta.Annotations[parser.GetAnnotationWithPrefix("influxdb-host")] = "blabla" defer func() { ing.ObjectMeta.Annotations = map[string]string{} }() @@ -1529,7 +1535,7 @@ func TestGetBackendServers(t *testing.T) { testCases := []struct { Ingresses []*ingress.Ingress Validate func(ingresses []*ingress.Ingress, upstreams []*ingress.Backend, servers []*ingress.Server) - SetConfigMap func(namespace string) *v1.ConfigMap + SetConfigMap func(namespace string) *corev1.ConfigMap }{ { Ingresses: []*ingress.Ingress{ @@ -2299,8 +2305,8 @@ func TestGetBackendServers(t *testing.T) { t.Errorf("location cafilename should be '%s', got '%s'", ingresses[1].ParsedAnnotations.ProxySSL.CAFileName, s.Locations[0].ProxySSL.CAFileName) } }, - SetConfigMap: func(ns string) *v1.ConfigMap { - return &v1.ConfigMap{ + SetConfigMap: func(ns string) *corev1.ConfigMap { + return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "config", SelfLink: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/config", ns), @@ -2360,8 +2366,8 @@ func TestGetBackendServers(t *testing.T) { t.Errorf("backend should be upstream-default-backend, got '%s'", s.Locations[0].Backend) } }, - SetConfigMap: func(ns string) *v1.ConfigMap { - return &v1.ConfigMap{ + SetConfigMap: func(ns string) *corev1.ConfigMap { + return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "config", SelfLink: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/config", ns), @@ -2438,8 +2444,8 @@ func TestGetBackendServers(t *testing.T) { } }, - SetConfigMap: func(ns string) *v1.ConfigMap { - return &v1.ConfigMap{ + SetConfigMap: func(ns string) *corev1.ConfigMap { + return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "config", SelfLink: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/config", ns), @@ -2459,8 +2465,8 @@ func TestGetBackendServers(t *testing.T) { } } -func testConfigMap(ns string) *v1.ConfigMap { - return &v1.ConfigMap{ +func testConfigMap(ns string) *corev1.ConfigMap { + return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "config", SelfLink: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/config", ns), @@ -2469,11 +2475,11 @@ func testConfigMap(ns string) *v1.ConfigMap { } func newNGINXController(t *testing.T) *NGINXController { - ns := v1.NamespaceDefault + ns := corev1.NamespaceDefault clientSet := fake.NewSimpleClientset() - configMap := &v1.ConfigMap{ + configMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "config", SelfLink: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/config", ns), @@ -2540,8 +2546,8 @@ func fakeX509Cert(dnsNames []string) *x509.Certificate { } } -func newDynamicNginxController(t *testing.T, setConfigMap func(string) *v1.ConfigMap) *NGINXController { - ns := v1.NamespaceDefault +func newDynamicNginxController(t *testing.T, setConfigMap func(string) *corev1.ConfigMap) *NGINXController { + ns := corev1.NamespaceDefault clientSet := fake.NewSimpleClientset() configMap := setConfigMap(ns) diff --git a/internal/ingress/controller/nginx.go b/internal/ingress/controller/nginx.go index 80693db5c..4a5a07625 100644 --- a/internal/ingress/controller/nginx.go +++ b/internal/ingress/controller/nginx.go @@ -248,8 +248,7 @@ type NGINXController struct { store store.Storer - metricCollector metric.Collector - admissionCollector metric.Collector + metricCollector metric.Collector validationWebhookServer *http.Server @@ -799,45 +798,6 @@ func (n *NGINXController) setupSSLProxy() { }() } -// Helper function to clear Certificates from the ingress configuration since they should be ignored when -// checking if the new configuration changes can be applied dynamically if dynamic certificates is on -func clearCertificates(config *ingress.Configuration) { - var clearedServers []*ingress.Server - for _, server := range config.Servers { - copyOfServer := *server - copyOfServer.SSLCert = nil - clearedServers = append(clearedServers, ©OfServer) - } - config.Servers = clearedServers -} - -// Helper function to clear endpoints from the ingress configuration since they should be ignored when -// checking if the new configuration changes can be applied dynamically. -func clearL4serviceEndpoints(config *ingress.Configuration) { - var clearedTCPL4Services []ingress.L4Service - var clearedUDPL4Services []ingress.L4Service - for _, service := range config.TCPEndpoints { - copyofService := ingress.L4Service{ - Port: service.Port, - Backend: service.Backend, - Endpoints: []ingress.Endpoint{}, - Service: nil, - } - clearedTCPL4Services = append(clearedTCPL4Services, copyofService) - } - for _, service := range config.UDPEndpoints { - copyofService := ingress.L4Service{ - Port: service.Port, - Backend: service.Backend, - Endpoints: []ingress.Endpoint{}, - Service: nil, - } - clearedUDPL4Services = append(clearedUDPL4Services, copyofService) - } - config.TCPEndpoints = clearedTCPL4Services - config.UDPEndpoints = clearedUDPL4Services -} - // configureDynamically encodes new Backends in JSON format and POSTs the // payload to an internal HTTP endpoint handled by Lua. func (n *NGINXController) configureDynamically(pcfg *ingress.Configuration) error { diff --git a/internal/ingress/controller/store/endpointslice_test.go b/internal/ingress/controller/store/endpointslice_test.go index e12a98c2f..1342575ae 100644 --- a/internal/ingress/controller/store/endpointslice_test.go +++ b/internal/ingress/controller/store/endpointslice_test.go @@ -59,7 +59,9 @@ func TestEndpointSliceLister(t *testing.T) { }, }, } - el.Add(endpointSlice) + if err := el.Add(endpointSlice); err != nil { + t.Errorf("unexpected error %v", err) + } endpointSlice = &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Namespace: "namespace", @@ -69,7 +71,9 @@ func TestEndpointSliceLister(t *testing.T) { }, }, } - el.Add(endpointSlice) + if err := el.Add(endpointSlice); err != nil { + t.Errorf("unexpected error %v", err) + } endpointSlice = &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Namespace: "namespace", @@ -79,7 +83,9 @@ func TestEndpointSliceLister(t *testing.T) { }, }, } - el.Add(endpointSlice) + if err := el.Add(endpointSlice); err != nil { + t.Errorf("unexpected error %v", err) + } eps, err := el.MatchByKey(key) if err != nil { @@ -108,7 +114,9 @@ func TestEndpointSliceLister(t *testing.T) { }, }, } - el.Add(endpointSlice) + if err := el.Add(endpointSlice); err != nil { + t.Errorf("unexpected error %v", err) + } endpointSlice2 := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns2, @@ -118,7 +126,9 @@ func TestEndpointSliceLister(t *testing.T) { }, }, } - el.Add(endpointSlice2) + if err := el.Add(endpointSlice2); err != nil { + t.Errorf("unexpected error %v", err) + } eps, err := el.MatchByKey(key) if err != nil { t.Errorf("unexpeted error %v", err) diff --git a/internal/ingress/controller/store/store.go b/internal/ingress/controller/store/store.go index 7157332c3..9b3700739 100644 --- a/internal/ingress/controller/store/store.go +++ b/internal/ingress/controller/store/store.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/labels" k8sruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -405,7 +404,10 @@ func New( return } - store.listers.IngressWithAnnotation.Delete(ing) + if err := store.listers.IngressWithAnnotation.Delete(ing); err != nil { + klog.ErrorS(err, "Error while deleting ingress from store", "ingress", klog.KObj(ing)) + return + } key := k8s.MetaNamespaceKey(ing) store.secretIngressMap.Delete(key) @@ -794,14 +796,26 @@ func New( }, } - store.informers.Ingress.AddEventHandler(ingEventHandler) - if !icConfig.IgnoreIngressClass { - store.informers.IngressClass.AddEventHandler(ingressClassEventHandler) + if _, err := store.informers.Ingress.AddEventHandler(ingEventHandler); err != nil { + klog.Errorf("Error adding ingress event handler: %v", err) + } + if !icConfig.IgnoreIngressClass { + if _, err := store.informers.IngressClass.AddEventHandler(ingressClassEventHandler); err != nil { + klog.Errorf("Error adding ingress class event handler: %v", err) + } + } + if _, err := store.informers.EndpointSlice.AddEventHandler(epsEventHandler); err != nil { + klog.Errorf("Error adding endpoint slice event handler: %v", err) + } + if _, err := store.informers.Secret.AddEventHandler(secrEventHandler); err != nil { + klog.Errorf("Error adding secret event handler: %v", err) + } + if _, err := store.informers.ConfigMap.AddEventHandler(cmEventHandler); err != nil { + klog.Errorf("Error adding configmap event handler: %v", err) + } + if _, err := store.informers.Service.AddEventHandler(serviceHandler); err != nil { + klog.Errorf("Error adding service event handler: %v", err) } - store.informers.EndpointSlice.AddEventHandler(epsEventHandler) - store.informers.Secret.AddEventHandler(secrEventHandler) - store.informers.ConfigMap.AddEventHandler(cmEventHandler) - store.informers.Service.AddEventHandler(serviceHandler) // do not wait for informers to read the configmap configuration ns, name, _ := k8s.ParseNameNS(configmap) @@ -1138,7 +1152,7 @@ func (s *k8sStore) Run(stopCh chan struct{}) { var runtimeScheme = k8sruntime.NewScheme() func init() { - utilruntime.Must(networkingv1.AddToScheme(runtimeScheme)) + runtime.Must(networkingv1.AddToScheme(runtimeScheme)) } func toIngress(obj interface{}) (*networkingv1.Ingress, bool) { diff --git a/internal/ingress/controller/store/store_test.go b/internal/ingress/controller/store/store_test.go index 9fe6e37bb..b91cadc6c 100644 --- a/internal/ingress/controller/store/store_test.go +++ b/internal/ingress/controller/store/store_test.go @@ -92,7 +92,7 @@ func TestStore(t *testing.T) { emptySelector, _ := labels.Parse("") - defer te.Stop() + defer te.Stop() //nolint:errcheck clientSet, err := kubernetes.NewForConfig(cfg) if err != nil { @@ -1377,14 +1377,18 @@ func TestUpdateSecretIngressMap(t *testing.T) { Namespace: "testns", }, } - s.listers.Ingress.Add(ingTpl) + if err := s.listers.Ingress.Add(ingTpl); err != nil { + t.Errorf("error adding the Ingress template: %v", err) + } t.Run("with TLS secret", func(t *testing.T) { ing := ingTpl.DeepCopy() ing.Spec = networking.IngressSpec{ TLS: []networking.IngressTLS{{SecretName: "tls"}}, } - s.listers.Ingress.Update(ing) + if err := s.listers.Ingress.Update(ing); err != nil { + t.Errorf("error updating the Ingress: %v", err) + } s.updateSecretIngressMap(ing) if l := s.secretIngressMap.Len(); !(l == 1 && s.secretIngressMap.Has("testns/tls")) { @@ -1397,7 +1401,9 @@ func TestUpdateSecretIngressMap(t *testing.T) { ing.ObjectMeta.SetAnnotations(map[string]string{ parser.GetAnnotationWithPrefix("auth-secret"): "auth", }) - s.listers.Ingress.Update(ing) + if err := s.listers.Ingress.Update(ing); err != nil { + t.Errorf("error updating the Ingress: %v", err) + } s.updateSecretIngressMap(ing) if l := s.secretIngressMap.Len(); !(l == 1 && s.secretIngressMap.Has("testns/auth")) { @@ -1410,7 +1416,9 @@ func TestUpdateSecretIngressMap(t *testing.T) { ing.ObjectMeta.SetAnnotations(map[string]string{ parser.GetAnnotationWithPrefix("auth-secret"): "otherns/auth", }) - s.listers.Ingress.Update(ing) + if err := s.listers.Ingress.Update(ing); err != nil { + t.Errorf("error updating the Ingress: %v", err) + } s.updateSecretIngressMap(ing) if l := s.secretIngressMap.Len(); !(l == 1 && s.secretIngressMap.Has("otherns/auth")) { @@ -1423,7 +1431,9 @@ func TestUpdateSecretIngressMap(t *testing.T) { ing.ObjectMeta.SetAnnotations(map[string]string{ parser.GetAnnotationWithPrefix("auth-secret"): "ns/name/garbage", }) - s.listers.Ingress.Update(ing) + if err := s.listers.Ingress.Update(ing); err != nil { + t.Errorf("error updating the Ingress: %v", err) + } s.updateSecretIngressMap(ing) if l := s.secretIngressMap.Len(); l != 0 { @@ -1457,7 +1467,9 @@ func TestListIngresses(t *testing.T) { }, }, } - s.listers.IngressWithAnnotation.Add(ingressToIgnore) + if err := s.listers.IngressWithAnnotation.Add(ingressToIgnore); err != nil { + t.Errorf("error adding the Ingress: %v", err) + } ingressWithoutPath := &ingress.Ingress{ Ingress: networking.Ingress{ @@ -1492,8 +1504,9 @@ func TestListIngresses(t *testing.T) { }, }, } - s.listers.IngressWithAnnotation.Add(ingressWithoutPath) - + if err := s.listers.IngressWithAnnotation.Add(ingressWithoutPath); err != nil { + t.Errorf("error adding the Ingress: %v", err) + } ingressWithNginxClassAnnotation := &ingress.Ingress{ Ingress: networking.Ingress{ ObjectMeta: metav1.ObjectMeta{ @@ -1531,8 +1544,9 @@ func TestListIngresses(t *testing.T) { }, }, } - s.listers.IngressWithAnnotation.Add(ingressWithNginxClassAnnotation) - + if err := s.listers.IngressWithAnnotation.Add(ingressWithNginxClassAnnotation); err != nil { + t.Errorf("error adding the Ingress: %v", err) + } ingresses := s.ListIngresses() if s := len(ingresses); s != 3 { diff --git a/internal/ingress/controller/template/template.go b/internal/ingress/controller/template/template.go index 6aadab48e..147455771 100644 --- a/internal/ingress/controller/template/template.go +++ b/internal/ingress/controller/template/template.go @@ -18,13 +18,14 @@ package template import ( "bytes" + "crypto/rand" "crypto/sha1" // #nosec "encoding/base64" "encoding/hex" "encoding/json" "fmt" "io" - "math/rand" // #nosec + "math/big" "net" "net/url" "os" @@ -34,7 +35,6 @@ import ( "strconv" "strings" text_template "text/template" - "time" networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -736,9 +736,6 @@ func buildProxyPass(host string, b interface{}, loc interface{}) string { case "GRPCS": proto = "grpcs://" proxyPass = "grpc_pass" - case "AJP": - proto = "" - proxyPass = "ajp_pass" case "FCGI": proto = "" proxyPass = "fastcgi_pass" @@ -1187,14 +1184,15 @@ func buildAuthSignURLLocation(location, authSignURL string) string { var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") -func init() { - rand.Seed(time.Now().UnixNano()) -} - func randomString() string { b := make([]rune, 32) for i := range b { - b[i] = letters[rand.Intn(len(letters))] // #nosec + idx, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) + if err != nil { + klog.Errorf("unexpected error generating random index: %v", err) + return "" + } + b[i] = letters[idx.Int64()] } return string(b) @@ -1727,7 +1725,7 @@ func buildMirrorLocations(locs []*ingress.Location) string { mapped := sets.Set[string]{} for _, loc := range locs { - if loc.Mirror.Source == "" || loc.Mirror.Target == "" { + if loc.Mirror.Source == "" || loc.Mirror.Target == "" || loc.Mirror.Host == "" { continue } @@ -1738,8 +1736,8 @@ func buildMirrorLocations(locs []*ingress.Location) string { mapped.Insert(loc.Mirror.Source) buffer.WriteString(fmt.Sprintf(`location = %v { internal; -proxy_set_header Host %v; -proxy_pass %v; +proxy_set_header Host "%v"; +proxy_pass "%v"; } `, loc.Mirror.Source, loc.Mirror.Host, loc.Mirror.Target)) diff --git a/internal/ingress/controller/template/template_test.go b/internal/ingress/controller/template/template_test.go index 1980d7e52..a2c3b8299 100644 --- a/internal/ingress/controller/template/template_test.go +++ b/internal/ingress/controller/template/template_test.go @@ -767,7 +767,9 @@ func BenchmarkTemplateWithData(b *testing.B) { } for i := 0; i < b.N; i++ { - ngxTpl.Write(dat) + if _, err := ngxTpl.Write(dat); err != nil { + b.Errorf("unexpected error writing template: %v", err) + } } } @@ -1137,7 +1139,6 @@ func TestOpentracingPropagateContext(t *testing.T) { {BackendProtocol: "AUTO_HTTP"}: "opentracing_propagate_context;", {BackendProtocol: "GRPC"}: "opentracing_grpc_propagate_context;", {BackendProtocol: "GRPCS"}: "opentracing_grpc_propagate_context;", - {BackendProtocol: "AJP"}: "opentracing_propagate_context;", {BackendProtocol: "FCGI"}: "opentracing_propagate_context;", nil: "", } @@ -1157,7 +1158,6 @@ func TestOpentelemetryPropagateContext(t *testing.T) { {BackendProtocol: "AUTO_HTTP"}: "opentelemetry_propagate;", {BackendProtocol: "GRPC"}: "opentelemetry_propagate;", {BackendProtocol: "GRPCS"}: "opentelemetry_propagate;", - {BackendProtocol: "AJP"}: "opentelemetry_propagate;", {BackendProtocol: "FCGI"}: "opentelemetry_propagate;", nil: "", } diff --git a/internal/ingress/metric/collectors/process_test.go b/internal/ingress/metric/collectors/process_test.go index 45170572b..b21d95496 100644 --- a/internal/ingress/metric/collectors/process_test.go +++ b/internal/ingress/metric/collectors/process_test.go @@ -48,7 +48,7 @@ func TestProcessCollector(t *testing.T) { done := make(chan struct{}) go func() { - cmd.Wait() + cmd.Wait() //nolint:errcheck status := cmd.ProcessState.Sys().(syscall.WaitStatus) if status.Signaled() { t.Logf("Signal: %v", status.Signal()) @@ -69,7 +69,7 @@ func TestProcessCollector(t *testing.T) { defer func() { cm.Stop() - cmd.Process.Kill() + cmd.Process.Kill() //nolint:errcheck <-done close(done) }() diff --git a/internal/ingress/metric/collectors/socket_test.go b/internal/ingress/metric/collectors/socket_test.go index fe442aba0..6000f2685 100644 --- a/internal/ingress/metric/collectors/socket_test.go +++ b/internal/ingress/metric/collectors/socket_test.go @@ -58,7 +58,9 @@ func TestNewUDPLogListener(t *testing.T) { }() conn, _ := net.Dial("unix", tmpFile) - conn.Write([]byte("message")) + if _, err := conn.Write([]byte("message")); err != nil { + t.Errorf("unexpected error writing to unix socket: %v", err) + } conn.Close() time.Sleep(1 * time.Millisecond) diff --git a/internal/ingress/status/status.go b/internal/ingress/status/status.go index a7506705c..62b88da16 100644 --- a/internal/ingress/status/status.go +++ b/internal/ingress/status/status.go @@ -95,10 +95,13 @@ func (s statusSync) Run(stopCh chan struct{}) { // when this instance is the leader we need to enqueue // an item to trigger the update of the Ingress status. - wait.PollUntil(time.Duration(UpdateInterval)*time.Second, func() (bool, error) { + err := wait.PollUntil(time.Duration(UpdateInterval)*time.Second, func() (bool, error) { s.syncQueue.EnqueueTask(task.GetDummyObject("sync status")) return false, nil }, stopCh) + if err != nil { + klog.ErrorS(err, "error running poll") + } } // Shutdown stops the sync. In case the instance is the leader it will remove the current IP diff --git a/internal/ingress/status/status_test.go b/internal/ingress/status/status_test.go index 3dd56f37d..ce6b6a0bf 100644 --- a/internal/ingress/status/status_test.go +++ b/internal/ingress/status/status_test.go @@ -325,7 +325,9 @@ func TestStatusActions(t *testing.T) { // wait for the election time.Sleep(100 * time.Millisecond) // execute sync - fk.sync("just-test") + if err := fk.sync("just-test"); err != nil { + t.Errorf("unexpected error: %v", err) + } // PublishService is empty, so the running address is: ["11.0.0.2"] // after updated, the ingress's ip should only be "11.0.0.2" newIPs := []networking.IngressLoadBalancerIngress{{ diff --git a/internal/net/dns/dns_test.go b/internal/net/dns/dns_test.go index 708e3c6df..7e030e17a 100644 --- a/internal/net/dns/dns_test.go +++ b/internal/net/dns/dns_test.go @@ -40,13 +40,16 @@ func TestGetDNSServers(t *testing.T) { defer f.Close() defer os.Remove(f.Name()) - os.WriteFile(f.Name(), []byte(` + err = os.WriteFile(f.Name(), []byte(` # comment ; comment nameserver 2001:4860:4860::8844 nameserver 2001:4860:4860::8888 nameserver 8.8.8.8 `), file.ReadWriteByUser) + if err != nil { + t.Errorf("unexpected error: %v", err) + } defResolvConf = f.Name() s, err = GetSystemNameServers() diff --git a/internal/net/ssl/ssl.go b/internal/net/ssl/ssl.go index fdee2f46e..c74537fe9 100644 --- a/internal/net/ssl/ssl.go +++ b/internal/net/ssl/ssl.go @@ -228,7 +228,7 @@ func ConfigureCRL(name string, crl []byte, sslCert *ingress.SSLCert) error { return fmt.Errorf("CRL file %v contains invalid data, and must be created only with PEM formatted certificates", name) } - _, err := x509.ParseCRL(pemCRLBlock.Bytes) + _, err := x509.ParseRevocationList(pemCRLBlock.Bytes) if err != nil { return fmt.Errorf(err.Error()) } diff --git a/internal/net/ssl/ssl_test.go b/internal/net/ssl/ssl_test.go index e251d01d1..a86ecb87a 100644 --- a/internal/net/ssl/ssl_test.go +++ b/internal/net/ssl/ssl_test.go @@ -20,7 +20,6 @@ import ( "bytes" "crypto" "crypto/rand" - cryptorand "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" @@ -336,7 +335,7 @@ const ( // newPrivateKey creates an RSA private key func newPrivateKey() (*rsa.PrivateKey, error) { - return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) + return rsa.GenerateKey(rand.Reader, rsaKeySize) } // newSignedCert creates a signed certificate using the given CA certificate and key @@ -365,7 +364,7 @@ func newSignedCert(cfg certutil.Config, key crypto.Signer, caCert *x509.Certific KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: cfg.Usages, } - certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + certDERBytes, err := x509.CreateCertificate(rand.Reader, &certTmpl, caCert, key.Public(), caKey) if err != nil { return nil, err } @@ -398,7 +397,9 @@ func newFakeCertificate(t *testing.T) ([]byte, string, string) { t.Errorf("failed to write test key: %v", err) } - certFile.Write(cert) + if _, err := certFile.Write(cert); err != nil { + t.Errorf("failed to write cert: %v", err) + } defer certFile.Close() keyFile, err := os.CreateTemp("", "key-") @@ -406,7 +407,9 @@ func newFakeCertificate(t *testing.T) ([]byte, string, string) { t.Errorf("failed to write test key: %v", err) } - keyFile.Write(key) + if _, err := keyFile.Write(key); err != nil { + t.Errorf("failed to write key: %v", err) + } defer keyFile.Close() return cert, certFile.Name(), keyFile.Name() diff --git a/mkdocs.yml b/mkdocs.yml index 992c04d45..3243f8247 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -127,6 +127,7 @@ nav: - TLS termination: "examples/tls-termination/README.md" - Pod Security Policy (PSP): "examples/psp/README.md" - Open Policy Agent rules: "examples/openpolicyagent/README.md" + - Canary Deployments: "examples/canary/README.md" - Developer Guide: - Getting Started: "developer-guide/getting-started.md" - Code Overview: "developer-guide/code-overview.md" diff --git a/netlify.toml b/netlify.toml new file mode 100644 index 000000000..dc4b0d1ca --- /dev/null +++ b/netlify.toml @@ -0,0 +1,11 @@ +# netlify configuration +[build] +publish = "site" +command = "make build-docs" +ignore = "git diff --quiet $CACHED_COMMIT_REF $COMMIT_REF ./docs" +# available here https://github.com/netlify/build-image/blob/focal/included_software.md#languages +environment = { PYTHON_VERSION = "3.8" } + +[context.deploy-preview] + publish = "site/" + command = "make build-docs" diff --git a/pkg/flags/flags.go b/pkg/flags/flags.go index 370510380..489e24886 100644 --- a/pkg/flags/flags.go +++ b/pkg/flags/flags.go @@ -228,14 +228,10 @@ https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-g flags.IntVar(&nginx.MaxmindRetriesCount, "maxmind-retries-count", 1, "Number of attempts to download the GeoIP DB.") flags.DurationVar(&nginx.MaxmindRetriesTimeout, "maxmind-retries-timeout", time.Second*0, "Maxmind downloading delay between 1st and 2nd attempt, 0s - do not retry to download if something went wrong.") - flag.Set("logtostderr", "true") - flags.AddGoFlagSet(flag.CommandLine) - flags.Parse(os.Args) - - // Workaround for this issue: - // https://github.com/kubernetes/kubernetes/issues/17162 - flag.CommandLine.Parse([]string{}) + if err := flags.Parse(os.Args); err != nil { + return false, nil, err + } pflag.VisitAll(func(flag *pflag.Flag) { klog.V(2).InfoS("FLAG", flag.Name, flag.Value) diff --git a/pkg/tcpproxy/tcp.go b/pkg/tcpproxy/tcp.go index 86850ad54..25cc39ee4 100644 --- a/pkg/tcpproxy/tcp.go +++ b/pkg/tcpproxy/tcp.go @@ -119,7 +119,9 @@ func (p *TCPProxy) Handle(conn net.Conn) { func pipe(client, server net.Conn) { doCopy := func(s, c net.Conn, cancel chan<- bool) { - io.Copy(s, c) + if _, err := io.Copy(s, c); err != nil { + klog.Errorf("Error copying data: %v", err) + } cancel <- true } diff --git a/pkg/util/file/file_test.go b/pkg/util/file/file_test.go index 93ec39cf9..9a43946b3 100644 --- a/pkg/util/file/file_test.go +++ b/pkg/util/file/file_test.go @@ -35,8 +35,12 @@ func TestSHA1(t *testing.T) { if err != nil { t.Fatal(err) } - f.Write(test.content) - f.Sync() + if _, err := f.Write(test.content); err != nil { + t.Error(err) + } + if err := f.Sync(); err != nil { + t.Error(err) + } sha := SHA1(f.Name()) f.Close() diff --git a/pkg/util/file/file_watcher_test.go b/pkg/util/file/file_watcher_test.go index 316cb6f1e..dd2755812 100644 --- a/pkg/util/file/file_watcher_test.go +++ b/pkg/util/file/file_watcher_test.go @@ -59,7 +59,9 @@ func TestFileWatcher(t *testing.T) { t.Fatalf("expected no events before writing a file") case <-timeoutChan: } - os.WriteFile(f.Name(), []byte{}, ReadWriteByUser) + if err := os.WriteFile(f.Name(), []byte{}, ReadWriteByUser); err != nil { + t.Errorf("unexpected error: %v", err) + } select { case <-events: case <-timeoutChan: diff --git a/pkg/util/process/sigterm_test.go b/pkg/util/process/sigterm_test.go index 2c2a6ee91..b7413bed4 100644 --- a/pkg/util/process/sigterm_test.go +++ b/pkg/util/process/sigterm_test.go @@ -42,9 +42,9 @@ func (f *FakeProcess) exiterFunc(code int) { f.exitCode = code } -func sendDelayedSignal(delay time.Duration) { +func sendDelayedSignal(delay time.Duration) error { time.Sleep(delay * time.Second) - syscall.Kill(syscall.Getpid(), syscall.SIGTERM) + return syscall.Kill(syscall.Getpid(), syscall.SIGTERM) } func TestHandleSigterm(t *testing.T) { @@ -66,7 +66,12 @@ func TestHandleSigterm(t *testing.T) { for _, tt := range tests { process := &FakeProcess{shouldError: tt.shouldError} t.Run(tt.name, func(t *testing.T) { - go sendDelayedSignal(2) // Send a signal after 2 seconds + go func() { + err := sendDelayedSignal(2) // Send a signal after 2 seconds + if err != nil { + t.Errorf("error sending delayed signal: %v", err) + } + }() HandleSigterm(process, tt.delay, process.exiterFunc) }) if tt.shouldError && process.exitCode != 1 { diff --git a/rootfs/etc/nginx/template/nginx.tmpl b/rootfs/etc/nginx/template/nginx.tmpl index 9b3a47de3..6ace87448 100644 --- a/rootfs/etc/nginx/template/nginx.tmpl +++ b/rootfs/etc/nginx/template/nginx.tmpl @@ -307,7 +307,6 @@ http { client_body_temp_path /tmp/nginx/client-body; fastcgi_temp_path /tmp/nginx/fastcgi-temp; proxy_temp_path /tmp/nginx/proxy-temp; - ajp_temp_path /tmp/nginx/ajp-temp; client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; client_header_timeout {{ $cfg.ClientHeaderTimeout }}s; @@ -315,9 +314,15 @@ http { client_body_buffer_size {{ $cfg.ClientBodyBufferSize }}; client_body_timeout {{ $cfg.ClientBodyTimeout }}s; + {{ if and (ne $cfg.HTTP2MaxHeaderSize "") (ne $cfg.HTTP2MaxFieldSize "") }} http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }}; http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }}; + {{ end }} + + {{ if (gt $cfg.HTTP2MaxRequests 0) }} http2_max_requests {{ $cfg.HTTP2MaxRequests }}; + {{ end }} + http2_max_concurrent_streams {{ $cfg.HTTP2MaxConcurrentStreams }}; types_hash_max_size 2048; @@ -1382,13 +1387,11 @@ stream { {{ end }} {{/* By default use vhost as Host to upstream, but allow overrides */}} - {{ if not (eq $proxySetHeader "grpc_set_header") }} {{ if not (empty $location.UpstreamVhost) }} {{ $proxySetHeader }} Host {{ $location.UpstreamVhost | quote }}; {{ else }} {{ $proxySetHeader }} Host $best_http_host; {{ end }} - {{ end }} # Pass the extracted client certificate to the backend {{ if not (empty $server.CertificateAuth.CAFileName) }} diff --git a/test/e2e-image/Makefile b/test/e2e-image/Makefile index f74ea8c74..f68d6ea6e 100644 --- a/test/e2e-image/Makefile +++ b/test/e2e-image/Makefile @@ -1,6 +1,6 @@ DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) -E2E_BASE_IMAGE ?= "registry.k8s.io/ingress-nginx/e2e-test-runner:v20230527@sha256:a98ce8ab90f16bdd8539b168a4d000f366afa4eec23a220b3ce39698c5769bfd" +E2E_BASE_IMAGE ?= "registry.k8s.io/ingress-nginx/e2e-test-runner:v20230623-d50c7193b@sha256:e5c68dc56934c273850bfb75c0348a2819756669baf59fcdce9e16771537b247" image: echo "..entered Makefile in /test/e2e-image" diff --git a/test/e2e/HTTPBUN_IMAGE b/test/e2e/HTTPBUN_IMAGE new file mode 100644 index 000000000..2d95865c7 --- /dev/null +++ b/test/e2e/HTTPBUN_IMAGE @@ -0,0 +1 @@ +registry.k8s.io/ingress-nginx/e2e-test-httpbun:v20230505-v0.0.1 diff --git a/test/e2e/annotations/affinitymode.go b/test/e2e/annotations/affinitymode.go index cce2b004d..ad210cfa5 100644 --- a/test/e2e/annotations/affinitymode.go +++ b/test/e2e/annotations/affinitymode.go @@ -125,7 +125,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() { framework.Sleep() // validate, there is no backend to serve the request - response = request.WithCookies(cookies).Expect().Status(http.StatusServiceUnavailable) + request.WithCookies(cookies).Expect().Status(http.StatusServiceUnavailable) // create brand new backends replicas = 2 diff --git a/test/e2e/annotations/auth.go b/test/e2e/annotations/auth.go index 4ca034825..8011186a1 100644 --- a/test/e2e/annotations/auth.go +++ b/test/e2e/annotations/auth.go @@ -23,7 +23,6 @@ import ( "net/url" "regexp" "strings" - "time" "golang.org/x/crypto/bcrypt" @@ -38,7 +37,7 @@ import ( ) var _ = framework.DescribeAnnotation("auth-*", func() { - f := framework.NewDefaultFramework("auth") + f := framework.NewDefaultFramework("auth", framework.WithHTTPBunEnabled()) ginkgo.BeforeEach(func() { f.NewEchoDeployment() @@ -390,10 +389,10 @@ http { assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets), 1, "expected at least one endpoint") assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets[0].Addresses), 1, "expected at least one address ready in the endpoint") - httpbunIP := e.Subsets[0].Addresses[0].IP + nginxIP := e.Subsets[0].Addresses[0].IP annotations = map[string]string{ - "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/cookies/set/alma/armud", httpbunIP), + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/cookies/set/alma/armud", nginxIP), "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", } @@ -457,21 +456,8 @@ http { var ing *networking.Ingress ginkgo.BeforeEach(func() { - f.NewHttpbunDeployment() - - err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, framework.HTTPBunService, f.Namespace, 1) - assert.Nil(ginkgo.GinkgoT(), err) - - e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get(context.TODO(), framework.HTTPBunService, metav1.GetOptions{}) - assert.Nil(ginkgo.GinkgoT(), err) - - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets), 1, "expected at least one endpoint") - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets[0].Addresses), 1, "expected at least one address ready in the endpoint") - - httpbunIP := e.Subsets[0].Addresses[0].IP - annotations = map[string]string{ - "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbunIP), + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", f.HTTPBunIP), "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", } @@ -650,20 +636,8 @@ http { var ing *networking.Ingress ginkgo.BeforeEach(func() { - f.NewHttpbunDeployment() - - var httpbunIP string - - err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, framework.HTTPBunService, f.Namespace, 1) - assert.Nil(ginkgo.GinkgoT(), err) - - e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get(context.TODO(), framework.HTTPBunService, metav1.GetOptions{}) - assert.Nil(ginkgo.GinkgoT(), err) - - httpbunIP = e.Subsets[0].Addresses[0].IP - annotations = map[string]string{ - "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbunIP), + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", f.HTTPBunIP), "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", "nginx.ingress.kubernetes.io/auth-signin-redirect-param": "orig", } @@ -729,23 +703,8 @@ http { barPath := "/bar" ginkgo.BeforeEach(func() { - f.NewHttpbunDeployment() - - err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, framework.HTTPBunService, f.Namespace, 1) - assert.Nil(ginkgo.GinkgoT(), err) - - framework.Sleep(1 * time.Second) - - e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get(context.TODO(), framework.HTTPBunService, metav1.GetOptions{}) - assert.Nil(ginkgo.GinkgoT(), err) - - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets), 1, "expected at least one endpoint") - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets[0].Addresses), 1, "expected at least one address ready in the endpoint") - - httpbunIP := e.Subsets[0].Addresses[0].IP - annotations := map[string]string{ - "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbunIP), + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", f.HTTPBunIP), "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", "nginx.ingress.kubernetes.io/auth-cache-key": "fixed", "nginx.ingress.kubernetes.io/auth-cache-duration": "200 201 401 30m", diff --git a/test/e2e/annotations/backendprotocol.go b/test/e2e/annotations/backendprotocol.go index bccb03afb..566a6921e 100644 --- a/test/e2e/annotations/backendprotocol.go +++ b/test/e2e/annotations/backendprotocol.go @@ -105,19 +105,4 @@ var _ = framework.DescribeAnnotation("backend-protocol", func() { return strings.Contains(server, "fastcgi_pass upstream_balancer;") }) }) - - ginkgo.It("should set backend protocol to '' and use ajp_pass", func() { - host := "backendprotocol.foo.com" - annotations := map[string]string{ - "nginx.ingress.kubernetes.io/backend-protocol": "AJP", - } - - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) - - f.WaitForNginxServer(host, - func(server string) bool { - return strings.Contains(server, "ajp_pass upstream_balancer;") - }) - }) }) diff --git a/test/e2e/annotations/canary.go b/test/e2e/annotations/canary.go index 6dd81fdd8..15cbeffa7 100644 --- a/test/e2e/annotations/canary.go +++ b/test/e2e/annotations/canary.go @@ -30,18 +30,15 @@ import ( ) const ( - canaryService = "echo-canary" + canaryService = "httpbun-canary" ) var _ = framework.DescribeAnnotation("canary-*", func() { - f := framework.NewDefaultFramework("canary") + f := framework.NewDefaultFramework("canary", framework.WithHTTPBunEnabled()) ginkgo.BeforeEach(func() { - // Deployment for main backend - f.NewEchoDeployment() - // Deployment for canary backend - f.NewEchoDeployment(framework.WithDeploymentName(canaryService)) + f.NewHttpbunDeployment(framework.WithDeploymentName(canaryService)) }) ginkgo.Context("when canary is created", func() { @@ -49,9 +46,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -65,16 +67,23 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) }) ginkgo.It("should return 404 status for requests to the canary if no matching ingress is found", func() { @@ -87,13 +96,17 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "always"). Expect(). @@ -108,7 +121,7 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) + ing := framework.NewSingleIngress(host, "/info", host, f.Namespace, framework.HTTPBunService, 80, annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -123,7 +136,7 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, canaryService, + canaryIng := framework.NewSingleIngress(canaryIngName, "/info", host, f.Namespace, canaryService, 80, canaryAnnotations) f.EnsureIngress(canaryIng) @@ -162,9 +175,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -178,29 +196,37 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests destined for the mainline ingress to the maineline upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "never"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) ginkgo.By("routing requests destined for the canary ingress to the canary upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "always"). Expect(). Status(http.StatusOK). - Body().Contains(canaryService) + Body(). + Contains(canaryService) }) ginkgo.It("should route requests to the correct upstream if mainline ingress is created after the canary ingress", func() { @@ -213,15 +239,25 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -230,30 +266,38 @@ var _ = framework.DescribeAnnotation("canary-*", func() { ginkgo.By("routing requests destined for the mainline ingress to the mainelin upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "never"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) ginkgo.By("routing requests destined for the canary ingress to the canary upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "always"). Expect(). Status(http.StatusOK). - Body().Contains(canaryService) + Body(). + Contains(canaryService) }) ginkgo.It("should route requests to the correct upstream if the mainline ingress is modified", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -267,18 +311,27 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) modAnnotations := map[string]string{ "foo": "bar", } - modIng := framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, modAnnotations) - - f.UpdateIngress(modIng) + f.UpdateIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + modAnnotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -287,30 +340,38 @@ var _ = framework.DescribeAnnotation("canary-*", func() { ginkgo.By("routing requests destined fro the mainline ingress to the mainline upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "never"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) ginkgo.By("routing requests destined for the canary ingress to the canary upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "always"). Expect(). Status(http.StatusOK). - Body().Contains(canaryService) + Body(). + Contains(canaryService) }) ginkgo.It("should route requests to the correct upstream if the canary ingress is modified", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -324,9 +385,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -338,10 +404,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader2", } - modIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, newAnnotations) - - f.UpdateIngress(modIng) + f.UpdateIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + newAnnotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -350,21 +420,24 @@ var _ = framework.DescribeAnnotation("canary-*", func() { ginkgo.By("routing requests destined for the mainline ingress to the mainline upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader2", "never"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) ginkgo.By("routing requests destined for the canary ingress to the canary upstream") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader2", "always"). Expect(). Status(http.StatusOK). - Body().Contains(canaryService) + Body(). + Contains(canaryService) }) }) @@ -372,9 +445,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { ginkgo.It("should route requests to the correct upstream", func() { host := "foo" - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, nil) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + nil)) f.WaitForNginxServer(host, func(server string) bool { @@ -388,37 +466,46 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests to the canary upstream when header is set to 'always'") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "always"). Expect(). Status(http.StatusOK). - Body().Contains(canaryService) + Body(). + Contains(canaryService) ginkgo.By("routing requests to the mainline upstream when header is set to 'never'") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "never"). Expect(). Status(http.StatusOK). Body(). - Contains(framework.EchoService).NotContains(canaryService) + Contains(framework.HTTPBunService). + NotContains(canaryService) ginkgo.By("routing requests to the mainline upstream when header is set to anything else") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "badheadervalue"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) }) }) @@ -427,9 +514,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -444,45 +536,57 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests to the canary upstream when header is set to 'DoCanary'") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "DoCanary"). Expect(). Status(http.StatusOK). - Body().Contains(canaryService) + Body(). + Contains(canaryService) ginkgo.By("routing requests to the mainline upstream when header is set to 'always'") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "always"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) ginkgo.By("routing requests to the mainline upstream when header is set to 'never'") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "never"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) ginkgo.By("routing requests to the mainline upstream when header is set to anything else") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "otherheadervalue"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body(). + Contains(framework.HTTPBunService). + NotContains(canaryService) }) }) @@ -491,8 +595,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -507,13 +617,18 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, canaryService, - 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests to the canary upstream when header pattern is matched") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "DoCanary"). Expect(). @@ -522,19 +637,25 @@ var _ = framework.DescribeAnnotation("canary-*", func() { ginkgo.By("routing requests to the mainline upstream when header failed to match header value") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "Docanary"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body().Contains(framework.HTTPBunService).NotContains(canaryService) }) ginkgo.It("should route requests to the correct upstream", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -550,25 +671,36 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, canaryService, - 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests to the mainline upstream when header is set to 'DoCananry' and header-value is 'DoCanary'") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "DoCananry"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body().Contains(framework.HTTPBunService).NotContains(canaryService) }) ginkgo.It("should routes to mainline upstream when the given Regex causes error", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -584,19 +716,24 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, canaryService, - 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests to the mainline upstream when the given Regex causes error") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "DoCanary"). WithCookie("CanaryByCookie", "always"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body().Contains(framework.HTTPBunService).NotContains(canaryService) }) }) @@ -605,9 +742,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -623,13 +765,18 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests to the canary upstream when header value does not match and cookie is set to 'always'") f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("CanaryByHeader", "otherheadervalue"). WithCookie("CanaryByCookie", "always"). @@ -644,9 +791,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -660,14 +812,19 @@ var _ = framework.DescribeAnnotation("canary-*", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) ginkgo.By("routing requests to the canary upstream when cookie is set to 'always'") for i := 0; i < 50; i++ { f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithCookie("Canary-By-Cookie", "always"). Expect(). @@ -678,12 +835,12 @@ var _ = framework.DescribeAnnotation("canary-*", func() { ginkgo.By("routing requests to the mainline upstream when cookie is set to 'never'") for i := 0; i < 50; i++ { f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithCookie("Canary-By-Cookie", "never"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body().Contains(framework.HTTPBunService).NotContains(canaryService) } ginkgo.By("routing requests to the mainline upstream when cookie is set to anything else") @@ -691,12 +848,12 @@ var _ = framework.DescribeAnnotation("canary-*", func() { // This test relies on canary cookie not parsing into the valid // affinity data and canary weight not being specified at all. f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithCookie("Canary-By-Cookie", "badcookievalue"). Expect(). Status(http.StatusOK). - Body().Contains(framework.EchoService).NotContains(canaryService) + Body().Contains(framework.HTTPBunService).NotContains(canaryService) } }) }) @@ -706,9 +863,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -721,9 +883,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-weight": "0", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -731,12 +898,12 @@ var _ = framework.DescribeAnnotation("canary-*", func() { }) f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). Expect(). Status(http.StatusOK). Body(). - Contains(framework.EchoService). + Contains(framework.HTTPBunService). NotContains(canaryService) }) @@ -744,9 +911,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -759,12 +931,17 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-weight": "100", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). Expect(). Status(http.StatusOK). @@ -776,9 +953,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -792,12 +974,17 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-weight-total": "1000", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). Expect(). Status(http.StatusOK). @@ -809,9 +996,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -824,9 +1016,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-weight": "50", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) TestMainlineCanaryDistribution(f, host) }) @@ -835,9 +1032,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -851,9 +1053,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-weight-total": "200", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) TestMainlineCanaryDistribution(f, host) }) @@ -868,17 +1075,23 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", } - ing := framework.NewSingleCatchAllIngress(canaryIngName, - f.Namespace, canaryService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleCatchAllIngress( + canaryIngName, + f.Namespace, + canaryService, + 80, + annotations)) - ing = framework.NewSingleCatchAllIngress(host, f.Namespace, - framework.EchoService, 80, nil) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleCatchAllIngress( + host, + f.Namespace, + framework.HTTPBunService, + 80, + nil)) f.WaitForNginxServer("_", func(server string) bool { - upstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.Namespace, framework.EchoService, "80") + upstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.Namespace, framework.HTTPBunService, "80") canaryUpstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.Namespace, canaryService, "80") return strings.Contains(server, fmt.Sprintf(`set $ingress_name "%v";`, host)) && @@ -896,14 +1109,24 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", } - ing := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + annotations)) otherHost := "bar" - ing = framework.NewSingleIngress(otherHost, "/", otherHost, - f.Namespace, framework.EchoService, 80, nil) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + otherHost, + "/info", + otherHost, + f.Namespace, + framework.HTTPBunService, + 80, + nil)) f.WaitForNginxConfiguration(func(cfg string) bool { return strings.Contains(cfg, "server_name "+otherHost) && @@ -921,13 +1144,22 @@ var _ = framework.DescribeAnnotation("canary-*", func() { } paths := []string{"/foo", "/bar"} - ing := framework.NewSingleIngressWithMultiplePaths(canaryIngName, paths, host, - f.Namespace, "httpy-svc-canary", 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngressWithMultiplePaths( + canaryIngName, + paths, + host, + f.Namespace, + "httpy-svc-canary", + 80, + annotations)) - ing = framework.NewSingleIngress(host, "/", host, f.Namespace, - framework.EchoService, 80, nil) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, f.Namespace, + framework.HTTPBunService, + 80, + nil)) f.WaitForNginxServer(host, func(server string) bool { @@ -946,9 +1178,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/session-cookie-name": affinityCookieName, } - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -964,14 +1201,19 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/canary-weight": "1", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) // This request will produce affinity cookie coming from the canary // backend. forcedRequestToCanary := f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("ForceCanary", "yes"). Expect(). @@ -988,7 +1230,7 @@ var _ = framework.DescribeAnnotation("canary-*", func() { // routed to a specific backend. for i := 0; i < 50; i++ { f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithCookie(affinityCookieName, affinityCookie.Raw().Value). Expect(). @@ -1003,9 +1245,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/session-cookie-name": affinityCookieName, } - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -1022,14 +1269,19 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/affinity-canary-behavior": "sticky", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) // This request will produce affinity cookie coming from the canary // backend. forcedRequestToCanary := f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("ForceCanary", "yes"). Expect(). @@ -1046,7 +1298,7 @@ var _ = framework.DescribeAnnotation("canary-*", func() { // routed to a specific backend. for i := 0; i < 50; i++ { f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithCookie(affinityCookieName, affinityCookie.Raw().Value). Expect(). @@ -1061,9 +1313,14 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/session-cookie-name": affinityCookieName, } - ing := framework.NewSingleIngress(host, "/", host, - f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/info", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -1080,14 +1337,19 @@ var _ = framework.DescribeAnnotation("canary-*", func() { "nginx.ingress.kubernetes.io/affinity-canary-behavior": "legacy", } - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, - f.Namespace, canaryService, 80, canaryAnnotations) - f.EnsureIngress(canaryIng) + f.EnsureIngress(framework.NewSingleIngress( + canaryIngName, + "/info", + host, + f.Namespace, + canaryService, + 80, + canaryAnnotations)) // This request will produce affinity cookie coming from the canary // backend. forcedRequestToCanary := f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). WithHeader("ForceCanary", "yes"). Expect(). @@ -1113,7 +1375,7 @@ var _ = framework.DescribeAnnotation("canary-*", func() { // This method assumes canary weight being configured at 50%. func TestMainlineCanaryDistribution(f *framework.Framework, host string) { - re := regexp.MustCompile(fmt.Sprintf(`%s.*`, framework.EchoService)) + re := regexp.MustCompile(fmt.Sprintf(`%s.*`, framework.HTTPBunService)) replicaRequestCount := map[string]int{} // The implementation of choice by weight doesn't guarantee exact @@ -1124,7 +1386,7 @@ func TestMainlineCanaryDistribution(f *framework.Framework, host string) { for i := 0; i < requestsToGet; i++ { body := f.HTTPTestClient(). - GET("/"). + GET("/info"). WithHeader("Host", host). Expect(). Status(http.StatusOK).Body().Raw() @@ -1143,6 +1405,14 @@ func TestMainlineCanaryDistribution(f *framework.Framework, host string) { assert.Equal(ginkgo.GinkgoT(), 2, len(keys)) - assert.GreaterOrEqual(ginkgo.GinkgoT(), int(replicaRequestCount[keys[0].String()]), requestsNumberToTest) - assert.GreaterOrEqual(ginkgo.GinkgoT(), int(replicaRequestCount[keys[1].String()]), requestsNumberToTest) + assert.GreaterOrEqual( + ginkgo.GinkgoT(), + int(replicaRequestCount[keys[0].String()]), + requestsNumberToTest, + ) + assert.GreaterOrEqual( + ginkgo.GinkgoT(), + int(replicaRequestCount[keys[1].String()]), + requestsNumberToTest, + ) } diff --git a/test/e2e/annotations/connection.go b/test/e2e/annotations/connection.go index 9cfcbacd0..428d85876 100644 --- a/test/e2e/annotations/connection.go +++ b/test/e2e/annotations/connection.go @@ -17,7 +17,6 @@ limitations under the License. package annotations import ( - "fmt" "net/http" "strings" @@ -52,6 +51,6 @@ var _ = framework.DescribeAnnotation("connection-proxy-header", func() { WithHeader("Host", host). Expect(). Status(http.StatusOK). - Body().Contains(fmt.Sprintf("connection=keep-alive")) + Body().Contains("connection=keep-alive") }) }) diff --git a/test/e2e/annotations/grpc.go b/test/e2e/annotations/grpc.go index 046191b21..243307df4 100644 --- a/test/e2e/annotations/grpc.go +++ b/test/e2e/annotations/grpc.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" - core "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -38,7 +37,7 @@ import ( ) var _ = framework.DescribeAnnotation("backend-protocol - GRPC", func() { - f := framework.NewDefaultFramework("grpc") + f := framework.NewDefaultFramework("grpc", framework.WithHTTPBunEnabled()) ginkgo.It("should use grpc_pass in the configuration file", func() { f.NewGRPCFortuneTellerDeployment() @@ -70,7 +69,7 @@ var _ = framework.DescribeAnnotation("backend-protocol - GRPC", func() { host := "echo" - svc := &core.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "grpcbin-test", Namespace: f.Namespace, @@ -121,15 +120,14 @@ var _ = framework.DescribeAnnotation("backend-protocol - GRPC", func() { metadata := res.GetMetadata() assert.Equal(ginkgo.GinkgoT(), metadata["content-type"].Values[0], "application/grpc") + assert.Equal(ginkgo.GinkgoT(), metadata[":authority"].Values[0], host) }) ginkgo.It("authorization metadata should be overwritten by external auth response headers", func() { f.NewGRPCBinDeployment() - f.NewHttpbunDeployment() - host := "echo" - svc := &core.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "grpcbin-test", Namespace: f.Namespace, @@ -149,19 +147,8 @@ var _ = framework.DescribeAnnotation("backend-protocol - GRPC", func() { } f.EnsureService(svc) - err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, framework.HTTPBunService, f.Namespace, 1) - assert.Nil(ginkgo.GinkgoT(), err) - - e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get(context.TODO(), framework.HTTPBunService, metav1.GetOptions{}) - assert.Nil(ginkgo.GinkgoT(), err) - - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets), 1, "expected at least one endpoint") - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets[0].Addresses), 1, "expected at least one address ready in the endpoint") - - httpbunIP := e.Subsets[0].Addresses[0].IP - annotations := map[string]string{ - "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/response-headers?authorization=foo", httpbunIP), + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/response-headers?authorization=foo", f.HTTPBunIP), "nginx.ingress.kubernetes.io/auth-response-headers": "Authorization", "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", } @@ -201,7 +188,7 @@ var _ = framework.DescribeAnnotation("backend-protocol - GRPC", func() { host := "echo" - svc := &core.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "grpcbin-test", Namespace: f.Namespace, diff --git a/test/e2e/annotations/mirror.go b/test/e2e/annotations/mirror.go index ad178a947..787cbfa3b 100644 --- a/test/e2e/annotations/mirror.go +++ b/test/e2e/annotations/mirror.go @@ -60,7 +60,7 @@ var _ = framework.DescribeAnnotation("mirror-*", func() { func(server string) bool { return strings.Contains(server, fmt.Sprintf("mirror /_mirror-%v;", ing.UID)) && strings.Contains(server, "mirror_request_body on;") && - strings.Contains(server, "proxy_pass https://test.env.com/$request_uri;") + strings.Contains(server, `proxy_pass "https://test.env.com/$request_uri";`) }) }) diff --git a/test/e2e/annotations/satisfy.go b/test/e2e/annotations/satisfy.go index 758ad21a4..6ba6db33e 100644 --- a/test/e2e/annotations/satisfy.go +++ b/test/e2e/annotations/satisfy.go @@ -17,7 +17,6 @@ limitations under the License. package annotations import ( - "context" "fmt" "net/http" "net/url" @@ -27,13 +26,12 @@ import ( "github.com/stretchr/testify/assert" networking "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/test/e2e/framework" ) var _ = framework.DescribeAnnotation("satisfy", func() { - f := framework.NewDefaultFramework("satisfy") + f := framework.NewDefaultFramework("satisfy", framework.WithHTTPBunEnabled()) ginkgo.BeforeEach(func() { f.NewEchoDeployment() @@ -84,17 +82,6 @@ var _ = framework.DescribeAnnotation("satisfy", func() { ginkgo.It("should allow multiple auth with satisfy any", func() { host := "auth" - // setup external auth - f.NewHttpbunDeployment() - - err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, framework.HTTPBunService, f.Namespace, 1) - assert.Nil(ginkgo.GinkgoT(), err) - - e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get(context.TODO(), framework.HTTPBunService, metav1.GetOptions{}) - assert.Nil(ginkgo.GinkgoT(), err) - - httpbunIP := e.Subsets[0].Addresses[0].IP - // create basic auth secret at ingress s := f.EnsureSecret(buildSecret("uname", "pwd", "basic-secret", f.Namespace)) @@ -105,7 +92,7 @@ var _ = framework.DescribeAnnotation("satisfy", func() { "nginx.ingress.kubernetes.io/auth-realm": "test basic auth", // annotations for external auth - "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbunIP), + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", f.HTTPBunIP), "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", // set satisfy any diff --git a/test/e2e/annotations/snippet.go b/test/e2e/annotations/snippet.go index 367708302..2bbd3e33a 100644 --- a/test/e2e/annotations/snippet.go +++ b/test/e2e/annotations/snippet.go @@ -26,21 +26,25 @@ import ( ) var _ = framework.DescribeAnnotation("configuration-snippet", func() { - f := framework.NewDefaultFramework("configurationsnippet") + f := framework.NewDefaultFramework( + "configurationsnippet", + framework.WithHTTPBunEnabled(), + ) - ginkgo.BeforeEach(func() { - f.NewEchoDeployment() - }) - - ginkgo.It(`set snippet "more_set_headers "Foo1: Bar1";" in all locations"`, func() { + ginkgo.It("set snippet more_set_headers in all locations", func() { host := "configurationsnippet.foo.com" annotations := map[string]string{ - "nginx.ingress.kubernetes.io/configuration-snippet": ` - more_set_headers "Foo1: Bar1";`, + "nginx.ingress.kubernetes.io/configuration-snippet": `more_set_headers "Foo1: Bar1";`, } - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) - f.EnsureIngress(ing) + f.EnsureIngress(framework.NewSingleIngress( + host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations)) f.WaitForNginxServer(host, func(server string) bool { @@ -51,23 +55,32 @@ var _ = framework.DescribeAnnotation("configuration-snippet", func() { GET("/"). WithHeader("Host", host). Expect(). - Status(http.StatusOK).Headers(). + Status(http.StatusOK). + Headers(). ValueEqual("Foo1", []string{"Bar1"}) }) - ginkgo.It(`drops snippet "more_set_headers "Foo1: Bar1";" in all locations if disabled by admin"`, func() { + ginkgo.It("drops snippet more_set_header in all locations if disabled by admin", func() { host := "noconfigurationsnippet.foo.com" annotations := map[string]string{ - "nginx.ingress.kubernetes.io/configuration-snippet": ` - more_set_headers "Foo1: Bar1";`, + "nginx.ingress.kubernetes.io/configuration-snippet": `more_set_headers "Foo1: Bar1";`, } - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations) + ing := framework.NewSingleIngress( + host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations) + f.UpdateNginxConfigMapData("allow-snippet-annotations", "false") defer func() { // Return to the original value f.UpdateNginxConfigMapData("allow-snippet-annotations", "true") }() + // Sleep a while just to guarantee that the configmap is applied framework.Sleep() f.EnsureIngress(ing) @@ -81,7 +94,8 @@ var _ = framework.DescribeAnnotation("configuration-snippet", func() { GET("/"). WithHeader("Host", host). Expect(). - Status(http.StatusOK).Headers(). + Status(http.StatusOK). + Headers(). NotContainsKey("Foo1") }) }) diff --git a/test/e2e/endpointslices/topology.go b/test/e2e/endpointslices/topology.go index 7cc67cedb..8dd0becfb 100644 --- a/test/e2e/endpointslices/topology.go +++ b/test/e2e/endpointslices/topology.go @@ -76,7 +76,9 @@ var _ = framework.IngressNginxDescribeSerial("[TopologyHints] topology aware rou status, err := f.ExecIngressPod(curlCmd) assert.Nil(ginkgo.GinkgoT(), err) var backends []map[string]interface{} - json.Unmarshal([]byte(status), &backends) + if err := json.Unmarshal([]byte(status), &backends); err != nil { + assert.Nil(ginkgo.GinkgoT(), err) + } gotBackends := 0 for _, bck := range backends { if strings.Contains(bck["name"].(string), "topology") { diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go index 565b8f4ac..bcb1d3960 100644 --- a/test/e2e/framework/deployment.go +++ b/test/e2e/framework/deployment.go @@ -43,12 +43,21 @@ const HTTPBunService = "httpbun" // NipService name of external service using nip.io const NIPService = "external-nip" +// HTTPBunImage is the default image that is used to deploy HTTPBun with the framwork +var HTTPBunImage = os.Getenv("HTTPBUN_IMAGE") + +// EchoImage is the default image to be used by the echo service +const EchoImage = "registry.k8s.io/ingress-nginx/e2e-test-echo@sha256:4938d1d91a2b7d19454460a8c1b010b89f6ff92d2987fd889ac3e8fc3b70d91a" + +// TODO: change all Deployment functions to use these options +// in order to reduce complexity and have a unified API accross the +// framework type deploymentOptions struct { - namespace string name string + namespace string + image string replicas int svcAnnotations map[string]string - image string } // WithDeploymentNamespace allows configuring the deployment's namespace @@ -100,22 +109,25 @@ func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) { namespace: f.Namespace, name: EchoService, replicas: 1, - image: "registry.k8s.io/ingress-nginx/e2e-test-echo@sha256:6fc5aa2994c86575975bb20a5203651207029a0d28e3f491d8a127d08baadab4", + image: EchoImage, } for _, o := range opts { o(options) } - deployment := newDeployment(options.name, options.namespace, options.image, 80, int32(options.replicas), + f.EnsureDeployment(newDeployment( + options.name, + options.namespace, + options.image, + 80, + int32(options.replicas), nil, nil, nil, []corev1.VolumeMount{}, []corev1.Volume{}, true, - ) + )) - f.EnsureDeployment(deployment) - - service := &corev1.Service{ + f.EnsureService(&corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: options.name, Namespace: options.namespace, @@ -134,11 +146,15 @@ func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) { "app": options.name, }, }, - } + }) - f.EnsureService(service) - - err := WaitForEndpoints(f.KubeClientSet, DefaultTimeout, options.name, options.namespace, options.replicas) + err := WaitForEndpoints( + f.KubeClientSet, + DefaultTimeout, + options.name, + options.namespace, + options.replicas, + ) assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready") } @@ -147,6 +163,12 @@ func BuildNIPHost(ip string) string { return fmt.Sprintf("%s.nip.io", ip) } +// GetNipHost used to generate a nip host for external DNS resolving +// for the instance deployed by the framework +func (f *Framework) GetNIPHost() string { + return BuildNIPHost(f.HTTPBunIP) +} + // BuildNIPExternalNameService used to generate a service pointing to nip.io to // help resolve to an IP address func BuildNIPExternalNameService(f *Framework, ip, portName string) *corev1.Service { @@ -177,22 +199,34 @@ func (f *Framework) NewHttpbunDeployment(opts ...func(*deploymentOptions)) strin namespace: f.Namespace, name: HTTPBunService, replicas: 1, - image: "registry.k8s.io/ingress-nginx/e2e-test-httpbun:v20230505-v0.0.1", + image: HTTPBunImage, } for _, o := range opts { o(options) } - deployment := newDeployment(options.name, options.namespace, options.image, 80, int32(options.replicas), - nil, nil, nil, + // Create the HTTPBun Deployment + f.EnsureDeployment(newDeployment( + options.name, + options.namespace, + options.image, + 80, + int32(options.replicas), + nil, nil, + //Required to get hostname information + []corev1.EnvVar{ + { + Name: "HTTPBUN_INFO_ENABLED", + Value: "1", + }, + }, []corev1.VolumeMount{}, []corev1.Volume{}, true, - ) + )) - f.EnsureDeployment(deployment) - - service := &corev1.Service{ + // Create a service pointing to deployment + f.EnsureService(&corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: options.name, Namespace: options.namespace, @@ -211,14 +245,26 @@ func (f *Framework) NewHttpbunDeployment(opts ...func(*deploymentOptions)) strin "app": options.name, }, }, - } + }) - s := f.EnsureService(service) - - err := WaitForEndpoints(f.KubeClientSet, DefaultTimeout, options.name, options.namespace, options.replicas) + // Wait for deployment to become available + err := WaitForEndpoints( + f.KubeClientSet, + DefaultTimeout, + options.name, + options.namespace, + options.replicas, + ) assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready") - return s.Spec.ClusterIPs[0] + // Get cluster ip for HTTPBun to be used in tests + e, err := f.KubeClientSet. + CoreV1(). + Endpoints(f.Namespace). + Get(context.TODO(), HTTPBunService, metav1.GetOptions{}) + assert.Nil(ginkgo.GinkgoT(), err, "failed to get httpbun endpoint") + + return e.Subsets[0].Addresses[0].IP } // NewSlowEchoDeployment creates a new deployment of the slow echo server image in a particular namespace. @@ -276,13 +322,16 @@ func (f *Framework) NGINXDeployment(name string, cfg string, waitendpoint bool) "nginx.conf": cfg, } - _, err := f.KubeClientSet.CoreV1().ConfigMaps(f.Namespace).Create(context.TODO(), &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: f.Namespace, - }, - Data: cfgMap, - }, metav1.CreateOptions{}) + _, err := f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Create(context.TODO(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: f.Namespace, + }, + Data: cfgMap, + }, metav1.CreateOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "creating configmap") deployment := newDeployment(name, f.Namespace, f.GetNginxBaseImage(), 80, 1, diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index cac6dfd20..69f6dae78 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -27,7 +27,6 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" networking "k8s.io/api/networking/v1" apiextcs "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -38,7 +37,6 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - restclient "k8s.io/client-go/rest" "k8s.io/klog/v2" ) @@ -62,23 +60,39 @@ type Framework struct { // A Kubernetes and Service Catalog client KubeClientSet kubernetes.Interface - KubeConfig *restclient.Config + KubeConfig *rest.Config APIExtensionsClientSet apiextcs.Interface Namespace string IngressClass string - pod *corev1.Pod + pod *v1.Pod + // We use httpbun as a service that we route to in our tests through + // the ingress controller. We add it as part of the framework as it + // is used extensively + HTTPBunIP string + HTTPBunEnabled bool +} + +// WithHTTPBunEnabled deploys an instance of HTTPBun for the specific test +func WithHTTPBunEnabled() func(*Framework) { + return func(f *Framework) { + f.HTTPBunEnabled = true + } } // NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for // you (you can write additional before/after each functions). -func NewDefaultFramework(baseName string) *Framework { +func NewDefaultFramework(baseName string, opts ...func(*Framework)) *Framework { defer ginkgo.GinkgoRecover() f := &Framework{ BaseName: baseName, } + // set framework options + for _, o := range opts { + o(f) + } ginkgo.BeforeEach(f.BeforeEach) ginkgo.AfterEach(f.AfterEach) @@ -88,12 +102,16 @@ func NewDefaultFramework(baseName string) *Framework { // NewSimpleFramework makes a new framework that allows the usage of a namespace // for arbitraty tests. -func NewSimpleFramework(baseName string) *Framework { +func NewSimpleFramework(baseName string, opts ...func(*Framework)) *Framework { defer ginkgo.GinkgoRecover() f := &Framework{ BaseName: baseName, } + // set framework options + for _, o := range opts { + o(f) + } ginkgo.BeforeEach(f.CreateEnvironment) ginkgo.AfterEach(f.DestroyEnvironment) @@ -142,6 +160,11 @@ func (f *Framework) BeforeEach() { assert.Nil(ginkgo.GinkgoT(), err, "updating ingress controller pod information") f.WaitForNginxListening(80) + + // If HTTPBun is enabled deploy an instance to the namespace + if f.HTTPBunEnabled { + f.HTTPBunIP = f.NewHttpbunDeployment() + } } // AfterEach deletes the namespace, after reading its events. @@ -162,7 +185,7 @@ func (f *Framework) AfterEach() { return } - cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf") + cmd := "cat /etc/nginx/nginx.conf" o, err := f.ExecCommand(f.pod, cmd) if err != nil { Logf("Unexpected error obtaining nginx.conf file: %v", err) @@ -233,7 +256,7 @@ func (f *Framework) GetURL(scheme RequestScheme) string { } // GetIngressNGINXPod returns the ingress controller running pod -func (f *Framework) GetIngressNGINXPod() *corev1.Pod { +func (f *Framework) GetIngressNGINXPod() *v1.Pod { return f.pod } @@ -279,7 +302,7 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b return func() (bool, error) { var cmd string if name == "" { - cmd = fmt.Sprintf("cat /etc/nginx/nginx.conf") + cmd = "cat /etc/nginx/nginx.conf" } else { cmd = fmt.Sprintf("cat /etc/nginx/nginx.conf | awk '/## start server %v/,/## end server %v/'", name, name) } @@ -413,13 +436,13 @@ func (f *Framework) WaitForReload(fn func()) { assert.Nil(ginkgo.GinkgoT(), err, "while waiting for ingress controller reload") } -func getReloadCount(pod *corev1.Pod, namespace string, client kubernetes.Interface) int { +func getReloadCount(pod *v1.Pod, namespace string, client kubernetes.Interface) int { events, err := client.CoreV1().Events(namespace).Search(scheme.Scheme, pod) assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX Pod") reloadCount := 0 for _, e := range events.Items { - if e.Reason == "RELOAD" && e.Type == corev1.EventTypeNormal { + if e.Reason == "RELOAD" && e.Type == v1.EventTypeNormal { reloadCount++ } } @@ -793,7 +816,7 @@ func Sleep(duration ...time.Duration) { time.Sleep(sleepFor) } -func loadConfig() (*restclient.Config, error) { +func loadConfig() (*rest.Config, error) { config, err := rest.InClusterConfig() if err != nil { return nil, err diff --git a/test/e2e/framework/httpexpect/chain.go b/test/e2e/framework/httpexpect/chain.go index 79956fb33..33c4d15b7 100644 --- a/test/e2e/framework/httpexpect/chain.go +++ b/test/e2e/framework/httpexpect/chain.go @@ -36,19 +36,3 @@ func (c *chain) fail(message string, args ...interface{}) { c.failbit = true c.reporter.Errorf(message, args...) } - -func (c *chain) reset() { - c.failbit = false -} - -func (c *chain) assertFailed(r Reporter) { - if !c.failbit { - r.Errorf("expected chain is failed, but it's ok") - } -} - -func (c *chain) assertOK(r Reporter) { - if c.failbit { - r.Errorf("expected chain is ok, but it's failed") - } -} diff --git a/test/e2e/run-e2e-suite.sh b/test/e2e/run-e2e-suite.sh index a3bf589cd..b56312afd 100755 --- a/test/e2e/run-e2e-suite.sh +++ b/test/e2e/run-e2e-suite.sh @@ -51,6 +51,7 @@ fi BASEDIR=$(dirname "$0") NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE) +HTTPBUN_IMAGE=$(cat $BASEDIR/HTTPBUN_IMAGE) echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}" kubectl create serviceaccount ingress-nginx-e2e || true @@ -79,6 +80,7 @@ kubectl run --rm \ --env="IS_CHROOT=${IS_CHROOT:-false}"\ --env="E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS}" \ --env="NGINX_BASE_IMAGE=${NGINX_BASE_IMAGE}" \ + --env="HTTPBUN_IMAGE=${HTTPBUN_IMAGE}" \ --overrides='{ "apiVersion": "v1", "spec":{"serviceAccountName": "ingress-nginx-e2e"}}' \ e2e --image=nginx-ingress-controller:e2e diff --git a/test/e2e/security/request_smuggling.go b/test/e2e/security/request_smuggling.go index 786a7a397..58b17c4d8 100644 --- a/test/e2e/security/request_smuggling.go +++ b/test/e2e/security/request_smuggling.go @@ -79,7 +79,9 @@ func smugglingRequest(host, addr string, port int) (string, error) { defer conn.Close() - conn.SetDeadline(time.Now().Add(time.Second * 10)) + if err := conn.SetDeadline(time.Now().Add(time.Second * 10)); err != nil { + return "", err + } _, err = fmt.Fprintf(conn, "GET /echo HTTP/1.1\r\nHost: %v\r\nContent-Length: 56\r\n\r\nGET /_hidden/index.html HTTP/1.1\r\nHost: notlocalhost\r\n\r\n", host) if err != nil { diff --git a/test/e2e/servicebackend/service_externalname.go b/test/e2e/servicebackend/service_externalname.go index 2fd6cd080..89ae77b10 100644 --- a/test/e2e/servicebackend/service_externalname.go +++ b/test/e2e/servicebackend/service_externalname.go @@ -35,7 +35,7 @@ import ( ) var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { - f := framework.NewDefaultFramework("type-externalname") + f := framework.NewDefaultFramework("type-externalname", framework.WithHTTPBunEnabled()) ginkgo.It("works with external name set to incomplete fqdn", func() { f.NewEchoDeployment() @@ -43,7 +43,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: framework.HTTPBunService, + Name: framework.NIPService, Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{ @@ -51,10 +51,15 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { Type: corev1.ServiceTypeExternalName, }, } - f.EnsureService(svc) - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBunService, 80, nil) + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.NIPService, + 80, + nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -70,10 +75,6 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { }) ginkgo.It("should return 200 for service type=ExternalName without a port defined", func() { - // This is a workaround so we only depend on a self hosted instance of - // httpbun - ip := f.NewHttpbunDeployment() - host := "echo" svc := &corev1.Service{ @@ -82,17 +83,23 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{ - ExternalName: framework.BuildNIPHost(ip), + ExternalName: f.GetNIPHost(), Type: corev1.ServiceTypeExternalName, }, } - f.EnsureService(svc) annotations := map[string]string{ - "nginx.ingress.kubernetes.io/upstream-vhost": framework.BuildNIPHost(ip), + "nginx.ingress.kubernetes.io/upstream-vhost": f.GetNIPHost(), } - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBunService, 80, annotations) + + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -108,19 +115,21 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { }) ginkgo.It("should return 200 for service type=ExternalName with a port defined", func() { - // This is a workaround so we only depend on a self hosted instance of - // httpbun - ip := f.NewHttpbunDeployment() - host := "echo" - svc := framework.BuildNIPExternalNameService(f, ip, host) + svc := framework.BuildNIPExternalNameService(f, f.HTTPBunIP, host) f.EnsureService(svc) annotations := map[string]string{ - "nginx.ingress.kubernetes.io/upstream-vhost": framework.BuildNIPHost(ip), + "nginx.ingress.kubernetes.io/upstream-vhost": f.GetNIPHost(), } - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBunService, 80, annotations) + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -140,7 +149,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: framework.HTTPBunService, + Name: framework.NIPService, Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{ @@ -148,10 +157,15 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { Type: corev1.ServiceTypeExternalName, }, } - f.EnsureService(svc) - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBunService, 80, nil) + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.NIPService, + 80, + nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -167,19 +181,22 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { }) ginkgo.It("should return 200 for service type=ExternalName using a port name", func() { - // This is a workaround so we only depend on a self hosted instance of - // httpbun - ip := f.NewHttpbunDeployment() - host := "echo" - svc := framework.BuildNIPExternalNameService(f, ip, host) + svc := framework.BuildNIPExternalNameService(f, f.HTTPBunIP, host) f.EnsureService(svc) annotations := map[string]string{ - "nginx.ingress.kubernetes.io/upstream-vhost": framework.BuildNIPHost(ip), + "nginx.ingress.kubernetes.io/upstream-vhost": f.GetNIPHost(), } - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBunService, 80, annotations) + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations) + namedBackend := networking.IngressBackend{ Service: &networking.IngressServiceBackend{ Name: framework.NIPService, @@ -188,6 +205,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { }, }, } + ing.Spec.Rules[0].HTTP.Paths[0].Backend = namedBackend f.EnsureIngress(ing) @@ -204,10 +222,6 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { }) ginkgo.It("should return 200 for service type=ExternalName using FQDN with trailing dot", func() { - // This is a workaround so we only depend on a self hosted instance of - // httpbun - ip := f.NewHttpbunDeployment() - host := "echo" svc := &corev1.Service{ @@ -216,14 +230,19 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{ - ExternalName: framework.BuildNIPHost(ip), + ExternalName: f.GetNIPHost(), Type: corev1.ServiceTypeExternalName, }, } - f.EnsureService(svc) - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBunService, 80, nil) + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -239,20 +258,23 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { }) ginkgo.It("should update the external name after a service update", func() { - // This is a workaround so we only depend on a self hosted instance of - // httpbun - ip := f.NewHttpbunDeployment() - host := "echo" - svc := framework.BuildNIPExternalNameService(f, ip, host) + svc := framework.BuildNIPExternalNameService(f, f.HTTPBunIP, host) f.EnsureService(svc) annotations := map[string]string{ - "nginx.ingress.kubernetes.io/upstream-vhost": framework.BuildNIPHost(ip), + "nginx.ingress.kubernetes.io/upstream-vhost": f.GetNIPHost(), } - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.HTTPBunService, 80, annotations) + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.HTTPBunService, + 80, + annotations) + namedBackend := networking.IngressBackend{ Service: &networking.IngressServiceBackend{ Name: framework.NIPService, @@ -279,14 +301,20 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { assert.Contains(ginkgo.GinkgoT(), body, `"X-Forwarded-Host": "echo"`) - svc, err := f.KubeClientSet.CoreV1().Services(f.Namespace).Get(context.TODO(), framework.NIPService, metav1.GetOptions{}) + svc, err := f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Get(context.TODO(), framework.NIPService, metav1.GetOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining external service") - ip = f.NewHttpbunDeployment(framework.WithDeploymentName("eu-server")) - + //Deploy a new instance to switch routing to + ip := f.NewHttpbunDeployment(framework.WithDeploymentName("eu-server")) svc.Spec.ExternalName = framework.BuildNIPHost(ip) - _, err = f.KubeClientSet.CoreV1().Services(f.Namespace).Update(context.Background(), svc, metav1.UpdateOptions{}) + _, err = f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Update(context.Background(), svc, metav1.UpdateOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating external service") framework.Sleep() @@ -302,21 +330,31 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { assert.Contains(ginkgo.GinkgoT(), body, `"X-Forwarded-Host": "echo"`) ginkgo.By("checking the service is updated to use new host") - curlCmd := fmt.Sprintf("curl --fail --silent http://localhost:%v/configuration/backends", nginx.StatusPort) + curlCmd := fmt.Sprintf( + "curl --fail --silent http://localhost:%v/configuration/backends", + nginx.StatusPort, + ) + output, err := f.ExecIngressPod(curlCmd) assert.Nil(ginkgo.GinkgoT(), err) - assert.Contains(ginkgo.GinkgoT(), output, fmt.Sprintf("{\"address\":\"%s\"", framework.BuildNIPHost(ip))) + assert.Contains( + ginkgo.GinkgoT(), + output, + fmt.Sprintf("{\"address\":\"%s\"", framework.BuildNIPHost(ip)), + ) }) ginkgo.It("should sync ingress on external name service addition/deletion", func() { - // This is a workaround so we only depend on a self hosted instance of - // httpbun - ip := f.NewHttpbunDeployment() - host := "echo" // Create the Ingress first - ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.NIPService, 80, nil) + ing := framework.NewSingleIngress(host, + "/", + host, + f.Namespace, + framework.NIPService, + 80, + nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -332,7 +370,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { Status(http.StatusServiceUnavailable) // Now create the service - svc := framework.BuildNIPExternalNameService(f, ip, host) + svc := framework.BuildNIPExternalNameService(f, f.HTTPBunIP, host) f.EnsureService(svc) framework.Sleep() @@ -345,7 +383,10 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() { Status(http.StatusOK) // And back to 503 after deleting the service - err := f.KubeClientSet.CoreV1().Services(f.Namespace).Delete(context.TODO(), framework.NIPService, metav1.DeleteOptions{}) + err := f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Delete(context.TODO(), framework.NIPService, metav1.DeleteOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error deleting external service") framework.Sleep() diff --git a/test/e2e/settings/brotli.go b/test/e2e/settings/brotli.go index a13678f66..aacaddec5 100644 --- a/test/e2e/settings/brotli.go +++ b/test/e2e/settings/brotli.go @@ -28,14 +28,13 @@ import ( ) var _ = framework.IngressNginxDescribe("brotli", func() { - f := framework.NewDefaultFramework("brotli") + f := framework.NewDefaultFramework( + "brotli", + framework.WithHTTPBunEnabled(), + ) host := "brotli" - ginkgo.BeforeEach(func() { - f.NewHttpbunDeployment() - }) - ginkgo.It("should only compress responses that meet the `brotli-min-length` condition", func() { brotliMinLength := 24 contentEncoding := "application/octet-stream" diff --git a/test/e2e/settings/disable_service_external_name.go b/test/e2e/settings/disable_service_external_name.go index 7f03e5355..4ecf69e81 100644 --- a/test/e2e/settings/disable_service_external_name.go +++ b/test/e2e/settings/disable_service_external_name.go @@ -33,7 +33,10 @@ import ( ) var _ = framework.IngressNginxDescribe("[Flag] disable-service-external-name", func() { - f := framework.NewDefaultFramework("disabled-service-external-name") + f := framework.NewDefaultFramework( + "disabled-service-external-name", + framework.WithHTTPBunEnabled(), + ) ginkgo.BeforeEach(func() { f.NewEchoDeployment(framework.WithDeploymentReplicas(2)) @@ -54,21 +57,18 @@ var _ = framework.IngressNginxDescribe("[Flag] disable-service-external-name", f externalhost := "echo-external-svc.com" - ip := f.NewHttpbunDeployment() - svc := framework.BuildNIPExternalNameService(f, ip, "echo") - f.EnsureService(svc) + f.EnsureService(framework.BuildNIPExternalNameService(f, f.HTTPBunIP, "echo")) - svcexternal := &corev1.Service{ + f.EnsureService(&corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "external", Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{ - ExternalName: framework.BuildNIPHost(ip), + ExternalName: f.GetNIPHost(), Type: corev1.ServiceTypeExternalName, }, - } - f.EnsureService(svcexternal) + }) ingexternal := framework.NewSingleIngress(externalhost, "/", externalhost, f.Namespace, "external", 80, nil) f.EnsureIngress(ingexternal) diff --git a/test/e2e/settings/enable_real_ip.go b/test/e2e/settings/enable_real_ip.go index 9be2e52d9..778011b9f 100644 --- a/test/e2e/settings/enable_real_ip.go +++ b/test/e2e/settings/enable_real_ip.go @@ -64,15 +64,15 @@ var _ = framework.DescribeSetting("enable-real-ip", func() { Body(). Raw() - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=myhost")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-host=myhost")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=myproto")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=1234")) + assert.NotContains(ginkgo.GinkgoT(), body, "host=myhost") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-host=myhost") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-proto=myproto") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-port=1234") assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=%s", host)) assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-host=%s", host)) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=http")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=80")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=1.2.3.4")) + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-proto=http") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-port=80") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-for=1.2.3.4") }) ginkgo.It("should not trust X-Forwarded-For header when setting is false", func() { @@ -101,13 +101,13 @@ var _ = framework.DescribeSetting("enable-real-ip", func() { Raw() assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=%s", host)) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=80")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=http")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-original-forwarded-for=1.2.3.4")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=myhost")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-host=myhost")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=myproto")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=1234")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=1.2.3.4")) + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-port=80") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-proto=http") + assert.Contains(ginkgo.GinkgoT(), body, "x-original-forwarded-for=1.2.3.4") + assert.NotContains(ginkgo.GinkgoT(), body, "host=myhost") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-host=myhost") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-proto=myproto") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-port=1234") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-for=1.2.3.4") }) }) diff --git a/test/e2e/settings/forwarded_headers.go b/test/e2e/settings/forwarded_headers.go index b929e683b..d4ffee545 100644 --- a/test/e2e/settings/forwarded_headers.go +++ b/test/e2e/settings/forwarded_headers.go @@ -17,7 +17,6 @@ limitations under the License. package settings import ( - "fmt" "net/http" "strings" @@ -65,12 +64,12 @@ var _ = framework.DescribeSetting("use-forwarded-headers", func() { Body(). Raw() - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=myhost")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-host=myhost")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=myproto")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-scheme=myproto")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=1234")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=1.2.3.4")) + assert.Contains(ginkgo.GinkgoT(), body, "host=myhost") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-host=myhost") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-proto=myproto") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-scheme=myproto") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-port=1234") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-for=1.2.3.4") ginkgo.By("ensuring that first entry in X-Forwarded-Host is used as the best host") body = f.HTTPTestClient(). @@ -85,8 +84,8 @@ var _ = framework.DescribeSetting("use-forwarded-headers", func() { Body(). Raw() - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=myhost.com")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-host=myhost.com")) + assert.Contains(ginkgo.GinkgoT(), body, "host=myhost.com") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-host=myhost.com") }) ginkgo.It("should not trust X-Forwarded headers when setting is false", func() { @@ -115,16 +114,16 @@ var _ = framework.DescribeSetting("use-forwarded-headers", func() { Body(). Raw() - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=forwarded-headers")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=80")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=http")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-scheme=http")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-original-forwarded-for=1.2.3.4")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=myhost")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-host=myhost")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=myproto")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-scheme=myproto")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=1234")) - assert.NotContains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=1.2.3.4")) + assert.Contains(ginkgo.GinkgoT(), body, "host=forwarded-headers") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-port=80") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-proto=http") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-scheme=http") + assert.Contains(ginkgo.GinkgoT(), body, "x-original-forwarded-for=1.2.3.4") + assert.NotContains(ginkgo.GinkgoT(), body, "host=myhost") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-host=myhost") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-proto=myproto") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-scheme=myproto") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-port=1234") + assert.NotContains(ginkgo.GinkgoT(), body, "x-forwarded-for=1.2.3.4") }) }) diff --git a/test/e2e/settings/global_external_auth.go b/test/e2e/settings/global_external_auth.go index c5964299f..cc98099ae 100644 --- a/test/e2e/settings/global_external_auth.go +++ b/test/e2e/settings/global_external_auth.go @@ -32,7 +32,10 @@ import ( ) var _ = framework.DescribeSetting("[Security] global-auth-url", func() { - f := framework.NewDefaultFramework("global-external-auth") + f := framework.NewDefaultFramework( + "global-external-auth", + framework.WithHTTPBunEnabled(), + ) host := "global-external-auth" @@ -50,7 +53,6 @@ var _ = framework.DescribeSetting("[Security] global-auth-url", func() { ginkgo.BeforeEach(func() { f.NewEchoDeployment() - f.NewHttpbunDeployment() }) ginkgo.Context("when global external authentication is configured", func() { @@ -307,9 +309,9 @@ http { assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets), 1, "expected at least one endpoint") assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets[0].Addresses), 1, "expected at least one address ready in the endpoint") - httpbunIP := e.Subsets[0].Addresses[0].IP + nginxIP := e.Subsets[0].Addresses[0].IP - f.UpdateNginxConfigMapData(globalExternalAuthURLSetting, fmt.Sprintf("http://%s/cookies/set/alma/armud", httpbunIP)) + f.UpdateNginxConfigMapData(globalExternalAuthURLSetting, fmt.Sprintf("http://%s/cookies/set/alma/armud", nginxIP)) ing1 = framework.NewSingleIngress(host, "/", host, f.Namespace, "http-cookie-with-error", 80, nil) f.EnsureIngress(ing1) diff --git a/test/e2e/settings/keep-alive.go b/test/e2e/settings/keep-alive.go index 510a90125..d139f61c0 100644 --- a/test/e2e/settings/keep-alive.go +++ b/test/e2e/settings/keep-alive.go @@ -17,7 +17,6 @@ limitations under the License. package settings import ( - "fmt" "regexp" "strings" @@ -41,7 +40,7 @@ var _ = framework.DescribeSetting("keep-alive keep-alive-requests", func() { f.UpdateNginxConfigMapData("keep-alive", "140") f.WaitForNginxConfiguration(func(server string) bool { - return strings.Contains(server, fmt.Sprintf(`keepalive_timeout 140s;`)) + return strings.Contains(server, `keepalive_timeout 140s;`) }) }) @@ -49,7 +48,7 @@ var _ = framework.DescribeSetting("keep-alive keep-alive-requests", func() { f.UpdateNginxConfigMapData("keep-alive-requests", "200") f.WaitForNginxConfiguration(func(server string) bool { - return strings.Contains(server, fmt.Sprintf(`keepalive_requests 200;`)) + return strings.Contains(server, `keepalive_requests 200;`) }) }) diff --git a/test/e2e/settings/listen_nondefault_ports.go b/test/e2e/settings/listen_nondefault_ports.go index 8b5d22f6e..7e3b11b21 100644 --- a/test/e2e/settings/listen_nondefault_ports.go +++ b/test/e2e/settings/listen_nondefault_ports.go @@ -17,14 +17,12 @@ limitations under the License. package settings import ( - "context" "fmt" "net/http" "strings" "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-nginx/test/e2e/framework" ) @@ -33,7 +31,7 @@ var _ = framework.IngressNginxDescribe("[Flag] custom HTTP and HTTPS ports", fun host := "forwarded-headers" - f := framework.NewDefaultFramework("forwarded-port-headers") + f := framework.NewDefaultFramework("forwarded-port-headers", framework.WithHTTPBunEnabled()) ginkgo.BeforeEach(func() { f.NewEchoDeployment() @@ -92,27 +90,14 @@ var _ = framework.IngressNginxDescribe("[Flag] custom HTTP and HTTPS ports", fun Expect(). Status(http.StatusOK). Body(). - Contains(fmt.Sprintf("x-forwarded-port=443")) + Contains("x-forwarded-port=443") }) ginkgo.Context("when external authentication is configured", func() { ginkgo.It("should set the X-Forwarded-Port header to 443", func() { - f.NewHttpbunDeployment() - - err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, framework.HTTPBunService, f.Namespace, 1) - assert.Nil(ginkgo.GinkgoT(), err) - - e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get(context.TODO(), framework.HTTPBunService, metav1.GetOptions{}) - assert.Nil(ginkgo.GinkgoT(), err) - - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets), 1, "expected at least one endpoint") - assert.GreaterOrEqual(ginkgo.GinkgoT(), len(e.Subsets[0].Addresses), 1, "expected at least one address ready in the endpoint") - - httpbunIP := e.Subsets[0].Addresses[0].IP - annotations := map[string]string{ - "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", httpbunIP), + "nginx.ingress.kubernetes.io/auth-url": fmt.Sprintf("http://%s/basic-auth/user/password", f.HTTPBunIP), "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", } @@ -141,7 +126,7 @@ var _ = framework.IngressNginxDescribe("[Flag] custom HTTP and HTTPS ports", fun Expect(). Status(http.StatusOK). Body(). - Contains(fmt.Sprintf("x-forwarded-port=443")) + Contains("x-forwarded-port=443") }) }) }) diff --git a/test/e2e/settings/no_tls_redirect_locations.go b/test/e2e/settings/no_tls_redirect_locations.go index 2fca545ff..332d764d6 100644 --- a/test/e2e/settings/no_tls_redirect_locations.go +++ b/test/e2e/settings/no_tls_redirect_locations.go @@ -17,7 +17,6 @@ limitations under the License. package settings import ( - "fmt" "strings" "github.com/onsi/ginkgo/v2" @@ -34,7 +33,7 @@ var _ = framework.DescribeSetting("Add no tls redirect locations", func() { f.EnsureIngress(ing) f.WaitForNginxConfiguration(func(server string) bool { - return !strings.Contains(server, fmt.Sprintf("force_no_ssl_redirect = true,")) + return !strings.Contains(server, "force_no_ssl_redirect = true,") }) wlKey := "no-tls-redirect-locations" @@ -43,7 +42,7 @@ var _ = framework.DescribeSetting("Add no tls redirect locations", func() { f.UpdateNginxConfigMapData(wlKey, wlValue) f.WaitForNginxConfiguration(func(server string) bool { - return strings.Contains(server, fmt.Sprintf("force_no_ssl_redirect = true,")) + return strings.Contains(server, "force_no_ssl_redirect = true,") }) }) diff --git a/test/e2e/settings/proxy_host.go b/test/e2e/settings/proxy_host.go index 8f564414a..efc254e45 100644 --- a/test/e2e/settings/proxy_host.go +++ b/test/e2e/settings/proxy_host.go @@ -66,7 +66,7 @@ var _ = framework.IngressNginxDescribe("Dynamic $proxy_host", func() { f.WaitForNginxConfiguration( func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %v", test)) && - strings.Contains(server, fmt.Sprintf("set $proxy_host $proxy_upstream_name")) + strings.Contains(server, "set $proxy_host $proxy_upstream_name") }) f.HTTPTestClient(). diff --git a/test/e2e/settings/proxy_protocol.go b/test/e2e/settings/proxy_protocol.go index 8b0e56fe4..1567b2267 100644 --- a/test/e2e/settings/proxy_protocol.go +++ b/test/e2e/settings/proxy_protocol.go @@ -63,17 +63,21 @@ var _ = framework.DescribeSetting("use-proxy-protocol", func() { defer conn.Close() header := "PROXY TCP4 192.168.0.1 192.168.0.11 56324 1234\r\n" - conn.Write([]byte(header)) - conn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")) + if _, err := conn.Write([]byte(header)); err != nil { + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error writing header") + } + if _, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")); err != nil { + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error writing request") + } data, err := io.ReadAll(conn) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error reading connection data") body := string(data) assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=%v", "proxy-protocol")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=1234")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=http")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=192.168.0.1")) + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-port=1234") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-proto=http") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-for=192.168.0.1") }) ginkgo.It("should respect proto passed by the PROXY Protocol server port", func() { @@ -96,17 +100,21 @@ var _ = framework.DescribeSetting("use-proxy-protocol", func() { defer conn.Close() header := "PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n" - conn.Write([]byte(header)) - conn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")) + if _, err := conn.Write([]byte(header)); err != nil { + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error writing header") + } + if _, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")); err != nil { + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error writing request") + } data, err := io.ReadAll(conn) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error reading connection data") body := string(data) assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=%v", "proxy-protocol")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=443")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=https")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=192.168.0.1")) + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-port=443") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-proto=https") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-for=192.168.0.1") }) ginkgo.It("should enable PROXY Protocol for HTTPS", func() { @@ -146,10 +154,10 @@ var _ = framework.DescribeSetting("use-proxy-protocol", func() { body := string(data) assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("host=%v", "proxy-protocol")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-port=1234")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-proto=https")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-scheme=https")) - assert.Contains(ginkgo.GinkgoT(), body, fmt.Sprintf("x-forwarded-for=192.168.0.1")) + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-port=1234") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-proto=https") + assert.Contains(ginkgo.GinkgoT(), body, "x-scheme=https") + assert.Contains(ginkgo.GinkgoT(), body, "x-forwarded-for=192.168.0.1") }) ginkgo.It("should enable PROXY Protocol for TCP", func() { @@ -205,9 +213,12 @@ var _ = framework.DescribeSetting("use-proxy-protocol", func() { defer conn.Close() header := "PROXY TCP4 192.168.0.1 192.168.0.11 56324 8080\r\n" - conn.Write([]byte(header)) - conn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")) - + if _, err := conn.Write([]byte(header)); err != nil { + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error writing header") + } + if _, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: proxy-protocol\r\n\r\n")); err != nil { + assert.Nil(ginkgo.GinkgoT(), err, "unexpected error writing request") + } _, err = io.ReadAll(conn) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error reading connection data") diff --git a/test/e2e/settings/ssl_passthrough.go b/test/e2e/settings/ssl_passthrough.go index a906a2d11..f0859f878 100644 --- a/test/e2e/settings/ssl_passthrough.go +++ b/test/e2e/settings/ssl_passthrough.go @@ -34,7 +34,7 @@ import ( ) var _ = framework.IngressNginxDescribe("[Flag] enable-ssl-passthrough", func() { - f := framework.NewDefaultFramework("ssl-passthrough") + f := framework.NewDefaultFramework("ssl-passthrough", framework.WithHTTPBunEnabled()) ginkgo.BeforeEach(func() { err := f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error { @@ -86,7 +86,14 @@ var _ = framework.IngressNginxDescribe("[Flag] enable-ssl-passthrough", func() { "nginx.ingress.kubernetes.io/ssl-passthrough": "true", } - ingressDef := framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, echoName, 80, annotations) + ingressDef := framework.NewSingleIngressWithTLS(host, + "/", + host, + []string{host}, + f.Namespace, + echoName, + 80, + annotations) tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ingressDef.Spec.TLS[0].Hosts, ingressDef.Spec.TLS[0].SecretName, @@ -119,7 +126,17 @@ var _ = framework.IngressNginxDescribe("[Flag] enable-ssl-passthrough", func() { Value: "/certs/tls.key", }, } - f.NewDeploymentWithOpts("echopass", "ghcr.io/sharat87/httpbun:latest", 80, 1, nil, nil, envs, volumeMount, volume, false) + + f.NewDeploymentWithOpts("echopass", + framework.HTTPBunImage, + 80, + 1, + nil, + nil, + envs, + volumeMount, + volume, + false) f.EnsureIngress(ingressDef) @@ -133,7 +150,14 @@ var _ = framework.IngressNginxDescribe("[Flag] enable-ssl-passthrough", func() { /* This one should not receive traffic as it does not contain passthrough annotation */ hostBad := "noannotationnopassthrough.com" - ingBad := f.EnsureIngress(framework.NewSingleIngressWithTLS(hostBad, "/", hostBad, []string{hostBad}, f.Namespace, echoName, 80, nil)) + ingBad := f.EnsureIngress(framework.NewSingleIngressWithTLS(hostBad, + "/", + hostBad, + []string{hostBad}, + f.Namespace, + echoName, + 80, + nil)) tlsConfigBad, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ingBad.Spec.TLS[0].Hosts, ingBad.Spec.TLS[0].SecretName, diff --git a/test/e2e/settings/tls.go b/test/e2e/settings/tls.go index a249f8bad..a820e41dd 100644 --- a/test/e2e/settings/tls.go +++ b/test/e2e/settings/tls.go @@ -112,7 +112,7 @@ var _ = framework.DescribeSetting("[SSL] TLS protocols, ciphers and headers)", f f.UpdateNginxConfigMapData(hstsMaxAge, "86400") f.WaitForNginxConfiguration(func(server string) bool { - return strings.Contains(server, fmt.Sprintf(`hsts_max_age = 86400,`)) + return strings.Contains(server, `hsts_max_age = 86400,`) }) f.HTTPTestClientWithTLSConfig(tlsConfig). @@ -131,7 +131,7 @@ var _ = framework.DescribeSetting("[SSL] TLS protocols, ciphers and headers)", f }) f.WaitForNginxConfiguration(func(server string) bool { - return strings.Contains(server, fmt.Sprintf(`hsts_include_subdomains = false,`)) + return strings.Contains(server, `hsts_include_subdomains = false,`) }) f.HTTPTestClientWithTLSConfig(tlsConfig). @@ -151,7 +151,7 @@ var _ = framework.DescribeSetting("[SSL] TLS protocols, ciphers and headers)", f }) f.WaitForNginxConfiguration(func(server string) bool { - return strings.Contains(server, fmt.Sprintf(`hsts_preload = true,`)) + return strings.Contains(server, `hsts_preload = true,`) }) f.HTTPTestClientWithTLSConfig(tlsConfig). diff --git a/test/e2e/ssl/secret_update.go b/test/e2e/ssl/secret_update.go index 77e64c6b2..242b370ee 100644 --- a/test/e2e/ssl/secret_update.go +++ b/test/e2e/ssl/secret_update.go @@ -73,7 +73,9 @@ var _ = framework.IngressNginxDescribe("[SSL] secret update", func() { dummySecret.Data["some-key"] = []byte("some value") - f.KubeClientSet.CoreV1().Secrets(f.Namespace).Update(context.TODO(), dummySecret, metav1.UpdateOptions{}) + if _, err := f.KubeClientSet.CoreV1().Secrets(f.Namespace).Update(context.TODO(), dummySecret, metav1.UpdateOptions{}); err != nil { + assert.Nil(ginkgo.GinkgoT(), err, "updating secret") + } assert.NotContains(ginkgo.GinkgoT(), log, fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace)) assert.NotContains(ginkgo.GinkgoT(), log, fmt.Sprintf("error obtaining PEM from secret %v/dummy", f.Namespace)) diff --git a/test/e2e/status/update.go b/test/e2e/status/update.go index 5c6ea4977..046752d2b 100644 --- a/test/e2e/status/update.go +++ b/test/e2e/status/update.go @@ -71,7 +71,7 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() { f.NewEchoDeployment() - ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, nil)) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, nil)) f.WaitForNginxConfiguration( func(cfg string) bool { @@ -84,7 +84,7 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() { err = cmd.Process.Kill() assert.Nil(ginkgo.GinkgoT(), err, "unexpected error terminating kubectl proxy") - ing, err = f.KubeClientSet.NetworkingV1().Ingresses(f.Namespace).Get(context.TODO(), host, metav1.GetOptions{}) + ing, err := f.KubeClientSet.NetworkingV1().Ingresses(f.Namespace).Get(context.TODO(), host, metav1.GetOptions{}) assert.Nil(ginkgo.GinkgoT(), err, "unexpected error getting %s/%v Ingress", f.Namespace, host) ing.Status.LoadBalancer.Ingress = []v1.IngressLoadBalancerIngress{}