
* Drop v1beta1 from ingress nginx (#7156) * Drop v1beta1 from ingress nginx Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix intorstr logic in controller Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * fixing admission Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * more intorstr fixing * correct template rendering Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix e2e tests for v1 api Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix gofmt errors * This is finally working...almost there... Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Re-add removed validation of AdmissionReview * Prepare for v1.0.0-alpha.1 release Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Update changelog and matrix table for v1.0.0-alpha.1 (#7274) Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * add docs for syslog feature (#7219) * Fix link to e2e-tests.md in developer-guide (#7201) * Use ENV expansion for namespace in args (#7146) Update the DaemonSet namespace references to use the `POD_NAMESPACE` environment variable in the same way that the Deployment does. * chart: using Helm builtin capabilities check (#7190) Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com> * Update proper default value for HTTP2MaxConcurrentStreams in Docs (#6944) It should be 128 as documented in https://github.com/kubernetes/ingress-nginx/blob/master/internal/ingress/controller/config/config.go#L780 * Fix MaxWorkerOpenFiles calculation on high cores nodes (#7107) * Fix MaxWorkerOpenFiles calculation on high cores nodes * Add e2e test for rlimit_nofile * Fix doc for max-worker-open-files * ingress/tcp: add additional error logging on failed (#7208) * Add file containing stable release (#7313) * Handle named (non-numeric) ports correctly (#7311) Signed-off-by: Carlos Panato <ctadeu@gmail.com> * Updated v1beta1 to v1 as its deprecated (#7308) * remove mercurial from build (#7031) * Retry to download maxmind DB if it fails (#7242) * Retry to download maxmind DB if it fails. Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Add retries count arg, move retry logic into DownloadGeoLite2DB function Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Reorder parameters in DownloadGeoLite2DB Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Remove hardcoded value Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Release v1.0.0-alpha.1 * Add changelog for v1.0.0-alpha.2 * controller: ignore non-service backends (#7332) * controller: ignore non-service backends Signed-off-by: Carlos Panato <ctadeu@gmail.com> * update per feedback Signed-off-by: Carlos Panato <ctadeu@gmail.com> * fix: allow scope/tcp/udp configmap namespace to altered (#7161) * Lower webhook timeout for digital ocean (#7319) * Lower webhook timeout for digital ocean * Set Digital Ocean value controller.admissionWebhooks.timeoutSeconds to 29 * update OWNERS and aliases files (#7365) (#7366) Signed-off-by: Carlos Panato <ctadeu@gmail.com> * Downgrade Lua modules for s390x (#7355) Downgrade Lua modules to last known working version. * Fix IngressClass logic for newer releases (#7341) * Fix IngressClass logic for newer releases Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Change e2e tests for the new IngressClass presence * Fix chart and admission tests Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix helm chart test Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix reviews * Remove ingressclass code from admission * update tag to v1.0.0-beta.1 * update readme and changelog for v1.0.0-beta.1 * Release v1.0.0-beta.1 - helm and manifests (#7422) * Change the order of annotation just to trigger a new helm release (#7425) * [cherry-pick] Add dev-v1 branch into helm releaser (#7428) * Add dev-v1 branch into helm releaser (#7424) * chore: add link for artifacthub.io/prerelease annotations Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com> Co-authored-by: Ricardo Katz <rikatz@users.noreply.github.com> * k8s job ci pipeline for dev-v1 br v1.22.0 (#7453) * k8s job ci pipeline for dev-v1 br v1.22.0 Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com> * k8s job ci pipeline for dev-v1 br v1.21.2 Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com> * remove v1.21.1 version Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com> * Add controller.watchIngressWithoutClass config option (#7459) Signed-off-by: Akshit Grover <akshit.grover2016@gmail.com> * Release new helm chart with certgen fixed (#7478) * Update go version, modules and remove ioutil * Release new helm chart with certgen fixed * changed appversion, chartversion, TAG, image (#7490) * Fix CI conflict * Fix CI conflict * Fix build.sh from rebase process * Fix controller_test post rebase Co-authored-by: Tianhao Guo <rggth09@gmail.com> Co-authored-by: Ray <61553+rctay@users.noreply.github.com> Co-authored-by: Bill Cassidy <cassid4@gmail.com> Co-authored-by: Jintao Zhang <tao12345666333@163.com> Co-authored-by: Sathish Ramani <rsathishx87@gmail.com> Co-authored-by: Mansur Marvanov <nanorobocop@gmail.com> Co-authored-by: Matt1360 <568198+Matt1360@users.noreply.github.com> Co-authored-by: Carlos Tadeu Panato Junior <ctadeu@gmail.com> Co-authored-by: Kundan Kumar <kundan.kumar@india.nec.com> Co-authored-by: Tom Hayward <thayward@infoblox.com> Co-authored-by: Sergey Shakuto <sshakuto@infoblox.com> Co-authored-by: Tore <tore.lonoy@gmail.com> Co-authored-by: Bouke Versteegh <info@boukeversteegh.nl> Co-authored-by: Shahid <shahid@us.ibm.com> Co-authored-by: James Strong <strong.james.e@gmail.com> Co-authored-by: Long Wu Yuan <longwuyuan@gmail.com> Co-authored-by: Jintao Zhang <zhangjintao9020@gmail.com> Co-authored-by: Neha Lohia <nehapithadiya444@gmail.com> Co-authored-by: Akshit Grover <akshit.grover2016@gmail.com>
241 lines
7 KiB
Go
241 lines
7 KiB
Go
/*
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package lua
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"fmt"
|
|
"net/http"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/onsi/ginkgo"
|
|
"github.com/stretchr/testify/assert"
|
|
networking "k8s.io/api/networking/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/ingress-nginx/test/e2e/framework"
|
|
)
|
|
|
|
const (
|
|
logRequireBackendReload = "Configuration changes detected, backend reload required"
|
|
logBackendReloadSuccess = "Backend successfully reloaded"
|
|
|
|
waitForLuaSync = 5 * time.Second
|
|
)
|
|
|
|
var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
|
f := framework.NewDefaultFramework("dynamic-configuration")
|
|
|
|
ginkgo.BeforeEach(func() {
|
|
f.NewEchoDeploymentWithReplicas(1)
|
|
ensureIngress(f, "foo.com", framework.EchoService)
|
|
})
|
|
|
|
ginkgo.It("configures balancer Lua middleware correctly", func() {
|
|
f.WaitForNginxConfiguration(func(cfg string) bool {
|
|
return strings.Contains(cfg, "balancer.init_worker()") && strings.Contains(cfg, "balancer.balance()")
|
|
})
|
|
|
|
host := "foo.com"
|
|
f.WaitForNginxServer(host, func(server string) bool {
|
|
return strings.Contains(server, "balancer.rewrite()") && strings.Contains(server, "balancer.log()")
|
|
})
|
|
})
|
|
|
|
ginkgo.Context("when only backends change", func() {
|
|
ginkgo.It("handles endpoints only changes", func() {
|
|
var nginxConfig string
|
|
f.WaitForNginxConfiguration(func(cfg string) bool {
|
|
nginxConfig = cfg
|
|
return true
|
|
})
|
|
|
|
replicas := 2
|
|
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, replicas, nil)
|
|
assert.Nil(ginkgo.GinkgoT(), err)
|
|
|
|
f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", "foo.com").
|
|
Expect().
|
|
Status(http.StatusOK)
|
|
|
|
var newNginxConfig string
|
|
f.WaitForNginxConfiguration(func(cfg string) bool {
|
|
newNginxConfig = cfg
|
|
return true
|
|
})
|
|
assert.Equal(ginkgo.GinkgoT(), nginxConfig, newNginxConfig)
|
|
})
|
|
|
|
ginkgo.It("handles endpoints only changes (down scaling of replicas)", func() {
|
|
var nginxConfig string
|
|
f.WaitForNginxConfiguration(func(cfg string) bool {
|
|
nginxConfig = cfg
|
|
return true
|
|
})
|
|
|
|
replicas := 2
|
|
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, replicas, nil)
|
|
assert.Nil(ginkgo.GinkgoT(), err)
|
|
|
|
framework.Sleep(waitForLuaSync)
|
|
|
|
f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", "foo.com").
|
|
Expect().
|
|
Status(http.StatusOK)
|
|
|
|
var newNginxConfig string
|
|
f.WaitForNginxConfiguration(func(cfg string) bool {
|
|
newNginxConfig = cfg
|
|
return true
|
|
})
|
|
assert.Equal(ginkgo.GinkgoT(), nginxConfig, newNginxConfig)
|
|
|
|
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, framework.EchoService, 0, nil)
|
|
assert.Nil(ginkgo.GinkgoT(), err)
|
|
|
|
framework.Sleep(waitForLuaSync)
|
|
|
|
f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", "foo.com").
|
|
Expect().
|
|
Status(503)
|
|
})
|
|
|
|
ginkgo.It("handles endpoints only changes consistently (down scaling of replicas vs. empty service)", func() {
|
|
deploymentName := "scalingecho"
|
|
f.NewEchoDeploymentWithNameAndReplicas(deploymentName, 0)
|
|
createIngress(f, "scaling.foo.com", deploymentName)
|
|
|
|
resp := f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", "scaling.foo.com").
|
|
Expect().Raw()
|
|
|
|
originalResponseCode := resp.StatusCode
|
|
|
|
replicas := 2
|
|
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
|
assert.Nil(ginkgo.GinkgoT(), err)
|
|
|
|
framework.Sleep(waitForLuaSync)
|
|
|
|
resp = f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", "scaling.foo.com").
|
|
Expect().Raw()
|
|
|
|
expectedSuccessResponseCode := resp.StatusCode
|
|
|
|
replicas = 0
|
|
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
|
assert.Nil(ginkgo.GinkgoT(), err)
|
|
|
|
framework.Sleep(waitForLuaSync)
|
|
|
|
resp = f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", "scaling.foo.com").
|
|
Expect().Raw()
|
|
|
|
expectedFailureResponseCode := resp.StatusCode
|
|
|
|
assert.Equal(ginkgo.GinkgoT(), originalResponseCode, 503, "Expected empty service to return 503 response")
|
|
assert.Equal(ginkgo.GinkgoT(), expectedFailureResponseCode, 503, "Expected downscaled replicaset to return 503 response")
|
|
assert.Equal(ginkgo.GinkgoT(), expectedSuccessResponseCode, 200, "Expected intermediate scaled replicaset to return a 200 response")
|
|
})
|
|
|
|
ginkgo.It("handles an annotation change", func() {
|
|
var nginxConfig string
|
|
f.WaitForNginxConfiguration(func(cfg string) bool {
|
|
nginxConfig = cfg
|
|
return true
|
|
})
|
|
|
|
ingress, err := f.KubeClientSet.NetworkingV1().Ingresses(f.Namespace).Get(context.TODO(), "foo.com", metav1.GetOptions{})
|
|
assert.Nil(ginkgo.GinkgoT(), err)
|
|
|
|
ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/load-balance"] = "round_robin"
|
|
_, err = f.KubeClientSet.NetworkingV1().Ingresses(f.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
|
|
assert.Nil(ginkgo.GinkgoT(), err)
|
|
|
|
f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", "foo.com").
|
|
Expect().
|
|
Status(http.StatusOK)
|
|
|
|
var newNginxConfig string
|
|
f.WaitForNginxConfiguration(func(cfg string) bool {
|
|
newNginxConfig = cfg
|
|
return true
|
|
})
|
|
|
|
assert.Equal(ginkgo.GinkgoT(), nginxConfig, newNginxConfig)
|
|
})
|
|
})
|
|
})
|
|
|
|
func ensureIngress(f *framework.Framework, host string, deploymentName string) *networking.Ingress {
|
|
ing := createIngress(f, host, deploymentName)
|
|
|
|
f.HTTPTestClient().
|
|
GET("/").
|
|
WithHeader("Host", host).
|
|
Expect().
|
|
Status(http.StatusOK)
|
|
|
|
return ing
|
|
}
|
|
|
|
func createIngress(f *framework.Framework, host string, deploymentName string) *networking.Ingress {
|
|
ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, deploymentName, 80,
|
|
map[string]string{
|
|
"nginx.ingress.kubernetes.io/load-balance": "ewma",
|
|
},
|
|
))
|
|
|
|
f.WaitForNginxServer(host,
|
|
func(server string) bool {
|
|
return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) &&
|
|
strings.Contains(server, "proxy_pass http://upstream_balancer;")
|
|
})
|
|
|
|
return ing
|
|
}
|
|
|
|
func ensureHTTPSRequest(f *framework.Framework, url string, host string, expectedDNSName string) {
|
|
resp := f.HTTPTestClientWithTLSConfig(&tls.Config{
|
|
ServerName: host,
|
|
InsecureSkipVerify: true,
|
|
}).
|
|
GET("/").
|
|
WithURL(url).
|
|
WithHeader("Host", host).
|
|
Expect().
|
|
Raw()
|
|
|
|
assert.Equal(ginkgo.GinkgoT(), resp.StatusCode, http.StatusOK)
|
|
assert.Equal(ginkgo.GinkgoT(), len(resp.TLS.PeerCertificates), 1)
|
|
assert.Equal(ginkgo.GinkgoT(), resp.TLS.PeerCertificates[0].DNSNames[0], expectedDNSName)
|
|
}
|