
* Drop v1beta1 from ingress nginx (#7156) * Drop v1beta1 from ingress nginx Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix intorstr logic in controller Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * fixing admission Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * more intorstr fixing * correct template rendering Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix e2e tests for v1 api Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix gofmt errors * This is finally working...almost there... Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Re-add removed validation of AdmissionReview * Prepare for v1.0.0-alpha.1 release Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Update changelog and matrix table for v1.0.0-alpha.1 (#7274) Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * add docs for syslog feature (#7219) * Fix link to e2e-tests.md in developer-guide (#7201) * Use ENV expansion for namespace in args (#7146) Update the DaemonSet namespace references to use the `POD_NAMESPACE` environment variable in the same way that the Deployment does. * chart: using Helm builtin capabilities check (#7190) Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com> * Update proper default value for HTTP2MaxConcurrentStreams in Docs (#6944) It should be 128 as documented in https://github.com/kubernetes/ingress-nginx/blob/master/internal/ingress/controller/config/config.go#L780 * Fix MaxWorkerOpenFiles calculation on high cores nodes (#7107) * Fix MaxWorkerOpenFiles calculation on high cores nodes * Add e2e test for rlimit_nofile * Fix doc for max-worker-open-files * ingress/tcp: add additional error logging on failed (#7208) * Add file containing stable release (#7313) * Handle named (non-numeric) ports correctly (#7311) Signed-off-by: Carlos Panato <ctadeu@gmail.com> * Updated v1beta1 to v1 as its deprecated (#7308) * remove mercurial from build (#7031) * Retry to download maxmind DB if it fails (#7242) * Retry to download maxmind DB if it fails. Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Add retries count arg, move retry logic into DownloadGeoLite2DB function Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Reorder parameters in DownloadGeoLite2DB Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Remove hardcoded value Signed-off-by: Sergey Shakuto <sshakuto@infoblox.com> * Release v1.0.0-alpha.1 * Add changelog for v1.0.0-alpha.2 * controller: ignore non-service backends (#7332) * controller: ignore non-service backends Signed-off-by: Carlos Panato <ctadeu@gmail.com> * update per feedback Signed-off-by: Carlos Panato <ctadeu@gmail.com> * fix: allow scope/tcp/udp configmap namespace to altered (#7161) * Lower webhook timeout for digital ocean (#7319) * Lower webhook timeout for digital ocean * Set Digital Ocean value controller.admissionWebhooks.timeoutSeconds to 29 * update OWNERS and aliases files (#7365) (#7366) Signed-off-by: Carlos Panato <ctadeu@gmail.com> * Downgrade Lua modules for s390x (#7355) Downgrade Lua modules to last known working version. * Fix IngressClass logic for newer releases (#7341) * Fix IngressClass logic for newer releases Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Change e2e tests for the new IngressClass presence * Fix chart and admission tests Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix helm chart test Signed-off-by: Ricardo Pchevuzinske Katz <ricardo.katz@gmail.com> * Fix reviews * Remove ingressclass code from admission * update tag to v1.0.0-beta.1 * update readme and changelog for v1.0.0-beta.1 * Release v1.0.0-beta.1 - helm and manifests (#7422) * Change the order of annotation just to trigger a new helm release (#7425) * [cherry-pick] Add dev-v1 branch into helm releaser (#7428) * Add dev-v1 branch into helm releaser (#7424) * chore: add link for artifacthub.io/prerelease annotations Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com> Co-authored-by: Ricardo Katz <rikatz@users.noreply.github.com> * k8s job ci pipeline for dev-v1 br v1.22.0 (#7453) * k8s job ci pipeline for dev-v1 br v1.22.0 Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com> * k8s job ci pipeline for dev-v1 br v1.21.2 Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com> * remove v1.21.1 version Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com> * Add controller.watchIngressWithoutClass config option (#7459) Signed-off-by: Akshit Grover <akshit.grover2016@gmail.com> * Release new helm chart with certgen fixed (#7478) * Update go version, modules and remove ioutil * Release new helm chart with certgen fixed * changed appversion, chartversion, TAG, image (#7490) * Fix CI conflict * Fix CI conflict * Fix build.sh from rebase process * Fix controller_test post rebase Co-authored-by: Tianhao Guo <rggth09@gmail.com> Co-authored-by: Ray <61553+rctay@users.noreply.github.com> Co-authored-by: Bill Cassidy <cassid4@gmail.com> Co-authored-by: Jintao Zhang <tao12345666333@163.com> Co-authored-by: Sathish Ramani <rsathishx87@gmail.com> Co-authored-by: Mansur Marvanov <nanorobocop@gmail.com> Co-authored-by: Matt1360 <568198+Matt1360@users.noreply.github.com> Co-authored-by: Carlos Tadeu Panato Junior <ctadeu@gmail.com> Co-authored-by: Kundan Kumar <kundan.kumar@india.nec.com> Co-authored-by: Tom Hayward <thayward@infoblox.com> Co-authored-by: Sergey Shakuto <sshakuto@infoblox.com> Co-authored-by: Tore <tore.lonoy@gmail.com> Co-authored-by: Bouke Versteegh <info@boukeversteegh.nl> Co-authored-by: Shahid <shahid@us.ibm.com> Co-authored-by: James Strong <strong.james.e@gmail.com> Co-authored-by: Long Wu Yuan <longwuyuan@gmail.com> Co-authored-by: Jintao Zhang <zhangjintao9020@gmail.com> Co-authored-by: Neha Lohia <nehapithadiya444@gmail.com> Co-authored-by: Akshit Grover <akshit.grover2016@gmail.com>
402 lines
12 KiB
Go
402 lines
12 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package framework
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/onsi/ginkgo"
|
|
"github.com/stretchr/testify/assert"
|
|
appsv1 "k8s.io/api/apps/v1"
|
|
api "k8s.io/api/core/v1"
|
|
core "k8s.io/api/core/v1"
|
|
v1 "k8s.io/api/core/v1"
|
|
networking "k8s.io/api/networking/v1"
|
|
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
"k8s.io/client-go/kubernetes"
|
|
)
|
|
|
|
// EnsureSecret creates a Secret object or returns it if it already exists.
|
|
func (f *Framework) EnsureSecret(secret *api.Secret) *api.Secret {
|
|
err := createSecretWithRetries(f.KubeClientSet, f.Namespace, secret)
|
|
assert.Nil(ginkgo.GinkgoT(), err, "creating secret")
|
|
|
|
s, err := f.KubeClientSet.CoreV1().Secrets(secret.Namespace).Get(context.TODO(), secret.Name, metav1.GetOptions{})
|
|
assert.Nil(ginkgo.GinkgoT(), err, "getting secret")
|
|
assert.NotNil(ginkgo.GinkgoT(), s, "getting secret")
|
|
|
|
return s
|
|
}
|
|
|
|
// EnsureConfigMap creates a ConfigMap object or returns it if it already exists.
|
|
func (f *Framework) EnsureConfigMap(configMap *api.ConfigMap) (*api.ConfigMap, error) {
|
|
cm, err := f.KubeClientSet.CoreV1().ConfigMaps(f.Namespace).Create(context.TODO(), configMap, metav1.CreateOptions{})
|
|
if err != nil {
|
|
if k8sErrors.IsAlreadyExists(err) {
|
|
return f.KubeClientSet.CoreV1().ConfigMaps(f.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
|
|
}
|
|
return nil, err
|
|
}
|
|
|
|
return cm, nil
|
|
}
|
|
|
|
// GetIngress gets an Ingress object from the given namespace, name and returns it, throws error if it does not exists.
|
|
func (f *Framework) GetIngress(namespace string, name string) *networking.Ingress {
|
|
ing, err := f.KubeClientSet.NetworkingV1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
|
assert.Nil(ginkgo.GinkgoT(), err, "getting ingress")
|
|
assert.NotNil(ginkgo.GinkgoT(), ing, "expected an ingress but none returned")
|
|
return ing
|
|
}
|
|
|
|
// EnsureIngress creates an Ingress object and returns it, throws error if it already exists.
|
|
func (f *Framework) EnsureIngress(ingress *networking.Ingress) *networking.Ingress {
|
|
fn := func() {
|
|
err := createIngressWithRetries(f.KubeClientSet, f.Namespace, ingress)
|
|
assert.Nil(ginkgo.GinkgoT(), err, "creating ingress")
|
|
}
|
|
|
|
f.WaitForReload(fn)
|
|
|
|
ing := f.GetIngress(f.Namespace, ingress.Name)
|
|
if ing.Annotations == nil {
|
|
ing.Annotations = make(map[string]string)
|
|
}
|
|
|
|
return ing
|
|
}
|
|
|
|
// UpdateIngress updates an Ingress object and returns the updated object.
|
|
func (f *Framework) UpdateIngress(ingress *networking.Ingress) *networking.Ingress {
|
|
err := updateIngressWithRetries(f.KubeClientSet, f.Namespace, ingress)
|
|
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress")
|
|
|
|
ing := f.GetIngress(f.Namespace, ingress.Name)
|
|
if ing.Annotations == nil {
|
|
ing.Annotations = make(map[string]string)
|
|
}
|
|
|
|
// updating an ingress requires a reload.
|
|
Sleep(1 * time.Second)
|
|
|
|
return ing
|
|
}
|
|
|
|
// EnsureService creates a Service object and returns it, throws error if it already exists.
|
|
func (f *Framework) EnsureService(service *core.Service) *core.Service {
|
|
err := createServiceWithRetries(f.KubeClientSet, f.Namespace, service)
|
|
assert.Nil(ginkgo.GinkgoT(), err, "creating service")
|
|
|
|
s, err := f.KubeClientSet.CoreV1().Services(f.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{})
|
|
assert.Nil(ginkgo.GinkgoT(), err, "getting service")
|
|
assert.NotNil(ginkgo.GinkgoT(), s, "expected a service but none returned")
|
|
|
|
return s
|
|
}
|
|
|
|
// EnsureDeployment creates a Deployment object and returns it, throws error if it already exists.
|
|
func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) *appsv1.Deployment {
|
|
err := createDeploymentWithRetries(f.KubeClientSet, f.Namespace, deployment)
|
|
assert.Nil(ginkgo.GinkgoT(), err, "creating deployment")
|
|
|
|
d, err := f.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
|
|
assert.Nil(ginkgo.GinkgoT(), err, "getting deployment")
|
|
assert.NotNil(ginkgo.GinkgoT(), d, "expected a deployment but none returned")
|
|
|
|
return d
|
|
}
|
|
|
|
// waitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
|
func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
|
|
return wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
|
|
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
|
if err != nil {
|
|
return false, nil
|
|
}
|
|
|
|
r := 0
|
|
for _, p := range pl.Items {
|
|
if isRunning, _ := podRunningReady(&p); isRunning {
|
|
r++
|
|
}
|
|
}
|
|
|
|
if r == expectedReplicas {
|
|
return true, nil
|
|
}
|
|
|
|
return false, nil
|
|
})
|
|
}
|
|
|
|
// waitForPodsDeleted waits for a given amount of time until a group of Pods are deleted in the given namespace.
|
|
func waitForPodsDeleted(kubeClientSet kubernetes.Interface, timeout time.Duration, namespace string, opts metav1.ListOptions) error {
|
|
return wait.Poll(Poll, timeout, func() (bool, error) {
|
|
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
|
if err != nil {
|
|
return false, nil
|
|
}
|
|
|
|
if len(pl.Items) == 0 {
|
|
return true, nil
|
|
}
|
|
|
|
return false, nil
|
|
})
|
|
}
|
|
|
|
// WaitForEndpoints waits for a given amount of time until the number of endpoints = expectedEndpoints.
|
|
func WaitForEndpoints(kubeClientSet kubernetes.Interface, timeout time.Duration, name, ns string, expectedEndpoints int) error {
|
|
if expectedEndpoints == 0 {
|
|
return nil
|
|
}
|
|
|
|
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
|
endpoint, err := kubeClientSet.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
|
if k8sErrors.IsNotFound(err) {
|
|
return false, nil
|
|
}
|
|
|
|
assert.Nil(ginkgo.GinkgoT(), err, "getting endpoints")
|
|
|
|
if countReadyEndpoints(endpoint) == expectedEndpoints {
|
|
return true, nil
|
|
}
|
|
|
|
return false, nil
|
|
})
|
|
}
|
|
|
|
func countReadyEndpoints(e *core.Endpoints) int {
|
|
if e == nil || e.Subsets == nil {
|
|
return 0
|
|
}
|
|
|
|
num := 0
|
|
for _, sub := range e.Subsets {
|
|
num += len(sub.Addresses)
|
|
}
|
|
|
|
return num
|
|
}
|
|
|
|
// podRunningReady checks whether pod p's phase is running and it has a ready
|
|
// condition of status true.
|
|
func podRunningReady(p *core.Pod) (bool, error) {
|
|
// Check the phase is running.
|
|
if p.Status.Phase != core.PodRunning {
|
|
return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'",
|
|
p.ObjectMeta.Name, p.Spec.NodeName, core.PodRunning, p.Status.Phase)
|
|
}
|
|
// Check the ready condition is true.
|
|
|
|
if !isPodReady(p) {
|
|
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
|
|
p.ObjectMeta.Name, p.Spec.NodeName, core.PodReady, core.ConditionTrue, p.Status.Conditions)
|
|
}
|
|
return true, nil
|
|
}
|
|
|
|
func isPodReady(p *core.Pod) bool {
|
|
for _, condition := range p.Status.Conditions {
|
|
if condition.Type != core.ContainersReady {
|
|
continue
|
|
}
|
|
|
|
return condition.Status == core.ConditionTrue
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
// getIngressNGINXPod returns the ingress controller running pod
|
|
func getIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Pod, error) {
|
|
var pod *core.Pod
|
|
err := wait.Poll(1*time.Second, DefaultTimeout, func() (bool, error) {
|
|
l, err := kubeClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
|
|
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
|
})
|
|
if err != nil {
|
|
return false, nil
|
|
}
|
|
|
|
for _, p := range l.Items {
|
|
if strings.HasPrefix(p.GetName(), "nginx-ingress-controller") {
|
|
isRunning, err := podRunningReady(&p)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
|
|
if isRunning {
|
|
pod = &p
|
|
return true, nil
|
|
}
|
|
}
|
|
}
|
|
|
|
return false, nil
|
|
})
|
|
if err != nil {
|
|
if err == wait.ErrWaitTimeout {
|
|
return nil, fmt.Errorf("timeout waiting at least one ingress-nginx pod running in namespace %v", ns)
|
|
}
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return pod, nil
|
|
}
|
|
|
|
func createDeploymentWithRetries(c kubernetes.Interface, namespace string, obj *appsv1.Deployment) error {
|
|
if obj == nil {
|
|
return fmt.Errorf("Object provided to create is empty")
|
|
}
|
|
createFunc := func() (bool, error) {
|
|
_, err := c.AppsV1().Deployments(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
|
if err == nil {
|
|
return true, nil
|
|
}
|
|
if k8sErrors.IsAlreadyExists(err) {
|
|
return false, err
|
|
}
|
|
if isRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
|
}
|
|
|
|
return retryWithExponentialBackOff(createFunc)
|
|
}
|
|
|
|
func createSecretWithRetries(c kubernetes.Interface, namespace string, obj *v1.Secret) error {
|
|
if obj == nil {
|
|
return fmt.Errorf("Object provided to create is empty")
|
|
}
|
|
createFunc := func() (bool, error) {
|
|
_, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
|
if err == nil {
|
|
return true, nil
|
|
}
|
|
if k8sErrors.IsAlreadyExists(err) {
|
|
return false, err
|
|
}
|
|
if isRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
|
}
|
|
return retryWithExponentialBackOff(createFunc)
|
|
}
|
|
|
|
func createServiceWithRetries(c kubernetes.Interface, namespace string, obj *v1.Service) error {
|
|
if obj == nil {
|
|
return fmt.Errorf("Object provided to create is empty")
|
|
}
|
|
createFunc := func() (bool, error) {
|
|
_, err := c.CoreV1().Services(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
|
if err == nil {
|
|
return true, nil
|
|
}
|
|
if k8sErrors.IsAlreadyExists(err) {
|
|
return false, err
|
|
}
|
|
if isRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
|
}
|
|
|
|
return retryWithExponentialBackOff(createFunc)
|
|
}
|
|
|
|
func createIngressWithRetries(c kubernetes.Interface, namespace string, obj *networking.Ingress) error {
|
|
if obj == nil {
|
|
return fmt.Errorf("Object provided to create is empty")
|
|
}
|
|
createFunc := func() (bool, error) {
|
|
_, err := c.NetworkingV1().Ingresses(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
|
if err == nil {
|
|
return true, nil
|
|
}
|
|
if k8sErrors.IsAlreadyExists(err) {
|
|
return false, err
|
|
}
|
|
if isRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
|
}
|
|
|
|
return retryWithExponentialBackOff(createFunc)
|
|
}
|
|
|
|
func updateIngressWithRetries(c kubernetes.Interface, namespace string, obj *networking.Ingress) error {
|
|
if obj == nil {
|
|
return fmt.Errorf("Object provided to create is empty")
|
|
}
|
|
updateFunc := func() (bool, error) {
|
|
_, err := c.NetworkingV1().Ingresses(namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
|
|
if err == nil {
|
|
return true, nil
|
|
}
|
|
if isRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, fmt.Errorf("Failed to update object with non-retriable error: %v", err)
|
|
}
|
|
|
|
return retryWithExponentialBackOff(updateFunc)
|
|
}
|
|
|
|
const (
|
|
// Parameters for retrying with exponential backoff.
|
|
retryBackoffInitialDuration = 100 * time.Millisecond
|
|
retryBackoffFactor = 3
|
|
retryBackoffJitter = 0
|
|
retryBackoffSteps = 6
|
|
)
|
|
|
|
// Utility for retrying the given function with exponential backoff.
|
|
func retryWithExponentialBackOff(fn wait.ConditionFunc) error {
|
|
backoff := wait.Backoff{
|
|
Duration: retryBackoffInitialDuration,
|
|
Factor: retryBackoffFactor,
|
|
Jitter: retryBackoffJitter,
|
|
Steps: retryBackoffSteps,
|
|
}
|
|
return wait.ExponentialBackoff(backoff, fn)
|
|
}
|
|
|
|
func isRetryableAPIError(err error) bool {
|
|
// These errors may indicate a transient error that we can retry in tests.
|
|
if k8sErrors.IsInternalError(err) || k8sErrors.IsTimeout(err) || k8sErrors.IsServerTimeout(err) ||
|
|
k8sErrors.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) {
|
|
return true
|
|
}
|
|
// If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
|
|
if _, shouldRetry := k8sErrors.SuggestsClientDelay(err); shouldRetry {
|
|
return true
|
|
}
|
|
|
|
return false
|
|
}
|