Rework Ginkgo usage (#9522)

* Rework Ginkgo usage

Currently Ginkgo is launched multiple times with different options to
accomodate various use-cases. In particular, some specs needs to be run
sequentially because non-namespaced objects are created that conflicts
with concurent Helm deployments.
However Ginkgo is able to handle such cases natively, in particular
specs that needs to be run sequentially are supported (Serial spec).

This commit marks the specs that needs to be run sequentially as Serial
specs and runs the whole test suite from a single Ginkgo invocation. As
a result, a single JUnit report is now generated.

Signed-off-by: Hervé Werner <dud225@hotmail.com>

* Fix controller error in test

Error getting ConfigMap "$NAMESPACE/tcp-services": no object matching key "$NAMESPACE/tcp-services" in local store

Signed-off-by: Hervé Werner <dud225@hotmail.com>

* Replace "go get" invocations by "go install"

Executing "go get" changes the go.mod & go.sum files which is not the
case of "go install".

Signed-off-by: Hervé Werner <dud225@hotmail.com>

* Always clean out the Helm deployment

Signed-off-by: Hervé Werner <dud225@hotmail.com>

* Add E2E test to verify that changes to one or more configmap trigger an update

Signed-off-by: Hervé Werner <dud225@hotmail.com>

---------

Signed-off-by: Hervé Werner <dud225@hotmail.com>
This commit is contained in:
Hervé 2023-02-16 15:15:39 +01:00 committed by GitHub
parent 080c905fab
commit d6bba85351
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 165 additions and 252 deletions

View file

@ -31,7 +31,7 @@ TAG ?= $(shell cat TAG)
# e2e settings # e2e settings
# Allow limiting the scope of the e2e tests. By default run everything # Allow limiting the scope of the e2e tests. By default run everything
FOCUS ?= .* FOCUS ?=
# number of parallel test # number of parallel test
E2E_NODES ?= 7 E2E_NODES ?= 7
# run e2e test suite with tests that check for memory leaks? (default is false) # run e2e test suite with tests that check for memory leaks? (default is false)

View file

@ -701,7 +701,6 @@ func New(
}, },
} }
// TODO: add e2e test to verify that changes to one or more configmap trigger an update
changeTriggerUpdate := func(name string) bool { changeTriggerUpdate := func(name string) bool {
return name == configmap || name == tcp || name == udp return name == configmap || name == tcp || name == udp
} }

View file

@ -78,7 +78,7 @@ func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP
var ( var (
// IngressPodDetails hold information about the ingress-nginx pod // IngressPodDetails hold information about the ingress-nginx pod
IngressPodDetails *PodInfo IngressPodDetails *PodInfo
// IngressNodeDetails old information about the node running ingress-nginx pod // IngressNodeDetails hold information about the node running ingress-nginx pod
IngressNodeDetails *NodeInfo IngressNodeDetails *NodeInfo
) )

View file

@ -14,70 +14,39 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
set -e set -eu
NC='\e[0m' NC='\e[0m'
BGREEN='\e[32m' BGREEN='\e[32m'
#SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-"5s"}
FOCUS=${FOCUS:-.*}
E2E_NODES=${E2E_NODES:-5} E2E_NODES=${E2E_NODES:-5}
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-""} E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-""}
reportFile="report-e2e-test-suite.xml"
ginkgo_args=( ginkgo_args=(
"-randomize-all" "--fail-fast"
"-flake-attempts=2" "--flake-attempts=2"
"-fail-fast" "--junit-report=${reportFile}"
"--show-node-events" "--nodes=${E2E_NODES}"
"--poll-progress-after=180s" "--poll-progress-after=180s"
# "-slow-spec-threshold=${SLOW_E2E_THRESHOLD}" "--randomize-all"
"-succinct" "--show-node-events"
"-timeout=75m" "--succinct"
"--timeout=75m"
) )
# Variable for the prefix of report filenames if [ -n "${FOCUS}" ]; then
reportFileNamePrefix="report-e2e-test-suite" ginkgo_args+=("--focus=${FOCUS}")
echo -e "${BGREEN}Running e2e test suite (FOCUS=${FOCUS})...${NC}"
ginkgo "${ginkgo_args[@]}" \
-focus="${FOCUS}" \
-skip="\[Serial\]|\[MemoryLeak\]|\[TopologyHints\]" \
-nodes="${E2E_NODES}" \
--junit-report=$reportFileNamePrefix.xml \
/e2e.test
# Create configMap out of a compressed report file for extraction later
# Must be isolated, there is a collision if multiple helms tries to install same clusterRole at same time
echo -e "${BGREEN}Running e2e test for topology aware hints...${NC}"
ginkgo "${ginkgo_args[@]}" \
-focus="\[TopologyHints\]" \
-skip="\[Serial\]|\[MemoryLeak\]]" \
-nodes="${E2E_NODES}" \
--junit-report=$reportFileNamePrefix-topology.xml \
/e2e.test
# Create configMap out of a compressed report file for extraction later
echo -e "${BGREEN}Running e2e test suite with tests that require serial execution...${NC}"
ginkgo "${ginkgo_args[@]}" \
-focus="\[Serial\]" \
-skip="\[MemoryLeak\]" \
--junit-report=$reportFileNamePrefix-serial.xml \
/e2e.test
# Create configMap out of a compressed report file for extraction later
if [[ ${E2E_CHECK_LEAKS} != "" ]]; then
echo -e "${BGREEN}Running e2e test suite with tests that check for memory leaks...${NC}"
ginkgo "${ginkgo_args[@]}" \
-focus="\[MemoryLeak\]" \
-skip="\[Serial\]" \
--junit-report=$reportFileNamePrefix-memleak.xml \
/e2e.test
# Create configMap out of a compressed report file for extraction later
fi fi
for rFile in `ls $reportFileNamePrefix*` if [ -z "${E2E_CHECK_LEAKS}" ]; then
do ginkgo_args+=("--skip=\[Memory Leak\]")
gzip -k $rFile fi
kubectl create cm $rFile.gz --from-file $rFile.gz
kubectl label cm $rFile.gz junitreport=true echo -e "${BGREEN}Running e2e test suite...${NC}"
done (set -x; ginkgo "${ginkgo_args[@]}" /e2e.test)
# Create configMap out of a compressed report file for extraction later
gzip -k ${reportFile}
kubectl create cm ${reportFile}.gz --from-file ${reportFile}.gz
kubectl label cm ${reportFile}.gz junitreport=true

View file

@ -8,6 +8,7 @@ controller:
digest: digest:
digestChroot: digestChroot:
scope: scope:
# Necessary to allow the ingress controller to get the topology information from the nodes
enabled: false enabled: false
config: config:
worker-processes: "1" worker-processes: "1"
@ -19,12 +20,7 @@ controller:
periodSeconds: 1 periodSeconds: 1
service: service:
type: NodePort type: NodePort
electionID: ingress-controller-leader
ingressClassResource:
# We will create and remove each IC/ClusterRole/ClusterRoleBinding per test so there's no conflict
enabled: false
extraArgs: extraArgs:
tcp-services-configmap: $NAMESPACE/tcp-services
# e2e tests do not require information about ingress status # e2e tests do not require information about ingress status
update-status: "false" update-status: "false"
terminationGracePeriodSeconds: 1 terminationGracePeriodSeconds: 1
@ -33,19 +29,6 @@ controller:
enableTopologyAwareRouting: true enableTopologyAwareRouting: true
# ulimit -c unlimited
# mkdir -p /tmp/coredump
# chmod a+rwx /tmp/coredump
# echo "/tmp/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
extraVolumeMounts:
- name: coredump
mountPath: /tmp/coredump
extraVolumes:
- name: coredump
hostPath:
path: /tmp/coredump
rbac: rbac:
create: true create: true
scope: false scope: false

View file

@ -32,7 +32,7 @@ import (
"k8s.io/ingress-nginx/test/e2e/framework" "k8s.io/ingress-nginx/test/e2e/framework"
) )
var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() { var _ = framework.IngressNginxDescribeSerial("[Admission] admission controller", func() {
f := framework.NewDefaultFramework("admission") f := framework.NewDefaultFramework("admission")
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
@ -40,11 +40,6 @@ var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
f.NewSlowEchoDeployment() f.NewSlowEchoDeployment()
}) })
ginkgo.AfterEach(func() {
err := uninstallChart(f)
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
})
ginkgo.It("reject ingress with global-rate-limit annotations when memcached is not configured", func() { ginkgo.It("reject ingress with global-rate-limit annotations when memcached is not configured", func() {
host := "admission-test" host := "admission-test"
@ -216,16 +211,6 @@ var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
}) })
}) })
func uninstallChart(f *framework.Framework) error {
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
_, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
}
return nil
}
const ( const (
validV1Ingress = ` validV1Ingress = `
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1

View file

@ -21,7 +21,6 @@ import (
"strings" "strings"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -81,9 +80,7 @@ var _ = framework.DescribeAnnotation("backend-protocol - FastCGI", func() {
}, },
} }
cm, err := f.EnsureConfigMap(configuration) f.EnsureConfigMap(configuration)
assert.Nil(ginkgo.GinkgoT(), err, "creating configmap")
assert.NotNil(ginkgo.GinkgoT(), cm, "expected a configmap but none returned")
host := "fastcgi-params-configmap" host := "fastcgi-params-configmap"

View file

@ -21,7 +21,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"os/exec"
"strings" "strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -33,7 +32,7 @@ import (
"k8s.io/ingress-nginx/test/e2e/framework" "k8s.io/ingress-nginx/test/e2e/framework"
) )
var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing", func() { var _ = framework.IngressNginxDescribeSerial("[TopologyHints] topology aware routing", func() {
f := framework.NewDefaultFramework("topology") f := framework.NewDefaultFramework("topology")
host := "topology-svc.foo.com" host := "topology-svc.foo.com"
@ -41,12 +40,6 @@ var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing",
f.NewEchoDeployment(framework.WithDeploymentReplicas(2), framework.WithSvcTopologyAnnotations()) f.NewEchoDeployment(framework.WithDeploymentReplicas(2), framework.WithSvcTopologyAnnotations())
}) })
ginkgo.AfterEach(func() {
// we need to uninstall chart because of clusterRole which is not destroyed with namespace
err := uninstallChart(f)
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
})
ginkgo.It("should return 200 when service has topology hints", func() { ginkgo.It("should return 200 when service has topology hints", func() {
annotations := make(map[string]string) annotations := make(map[string]string)
@ -100,13 +93,3 @@ var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing",
} }
}) })
}) })
func uninstallChart(f *framework.Framework) error {
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
_, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
}
return nil
}

View file

@ -152,6 +152,16 @@ func (f *Framework) KubectlProxy(port int) (int, *exec.Cmd, error) {
return -1, cmd, fmt.Errorf("failed to parse port from proxy stdout: %s", output) return -1, cmd, fmt.Errorf("failed to parse port from proxy stdout: %s", output)
} }
func (f *Framework) UninstallChart() error {
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
_, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
}
return nil
}
func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) { func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe() stdout, err = cmd.StdoutPipe()
if err != nil { if err != nil {

View file

@ -150,7 +150,11 @@ func (f *Framework) AfterEach() {
defer func(kubeClient kubernetes.Interface, ingressclass string) { defer func(kubeClient kubernetes.Interface, ingressclass string) {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
err := deleteIngressClass(kubeClient, ingressclass)
err := f.UninstallChart()
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
err = deleteIngressClass(kubeClient, ingressclass)
assert.Nil(ginkgo.GinkgoT(), err, "deleting IngressClass") assert.Nil(ginkgo.GinkgoT(), err, "deleting IngressClass")
}(f.KubeClientSet, f.IngressClass) }(f.KubeClientSet, f.IngressClass)
@ -192,6 +196,11 @@ func IngressNginxDescribe(text string, body func()) bool {
return ginkgo.Describe(text, body) return ginkgo.Describe(text, body)
} }
// IngressNginxDescribeSerial wrapper function for ginkgo describe. Adds namespacing.
func IngressNginxDescribeSerial(text string, body func()) bool {
return ginkgo.Describe(text, ginkgo.Serial, body)
}
// DescribeAnnotation wrapper function for ginkgo describe. Adds namespacing. // DescribeAnnotation wrapper function for ginkgo describe. Adds namespacing.
func DescribeAnnotation(text string, body func()) bool { func DescribeAnnotation(text string, body func()) bool {
return ginkgo.Describe("[Annotations] "+text, body) return ginkgo.Describe("[Annotations] "+text, body)
@ -202,11 +211,6 @@ func DescribeSetting(text string, body func()) bool {
return ginkgo.Describe("[Setting] "+text, body) return ginkgo.Describe("[Setting] "+text, body)
} }
// MemoryLeakIt is wrapper function for ginkgo It. Adds "[MemoryLeak]" tag and makes static analysis easier.
func MemoryLeakIt(text string, body interface{}) bool {
return ginkgo.It(text+" [MemoryLeak]", body)
}
// GetNginxIP returns the number of TCP port where NGINX is running // GetNginxIP returns the number of TCP port where NGINX is running
func (f *Framework) GetNginxIP() string { func (f *Framework) GetNginxIP() string {
s, err := f.KubeClientSet. s, err := f.KubeClientSet.
@ -387,7 +391,7 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
} }
// WaitForReload calls the passed function and // WaitForReload calls the passed function and
// asser it has caused at least 1 reload. // asserts it has caused at least 1 reload.
func (f *Framework) WaitForReload(fn func()) { func (f *Framework) WaitForReload(fn func()) {
initialReloadCount := getReloadCount(f.pod, f.Namespace, f.KubeClientSet) initialReloadCount := getReloadCount(f.pod, f.Namespace, f.KubeClientSet)

View file

@ -68,9 +68,7 @@ func (f *Framework) NewInfluxDBDeployment() {
}, },
} }
cm, err := f.EnsureConfigMap(configuration) f.EnsureConfigMap(configuration)
assert.Nil(ginkgo.GinkgoT(), err, "creating an Influxdb deployment")
assert.NotNil(ginkgo.GinkgoT(), cm, "expected a configmap but none returned")
deployment := &appsv1.Deployment{ deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -136,7 +134,7 @@ func (f *Framework) NewInfluxDBDeployment() {
d := f.EnsureDeployment(deployment) d := f.EnsureDeployment(deployment)
err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{ err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(), LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
}) })
assert.Nil(ginkgo.GinkgoT(), err, "waiting for influxdb pod to become ready") assert.Nil(ginkgo.GinkgoT(), err, "waiting for influxdb pod to become ready")

View file

@ -25,9 +25,7 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
api "k8s.io/api/core/v1"
core "k8s.io/api/core/v1" core "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1" networking "k8s.io/api/networking/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors" k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -36,8 +34,8 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
) )
// EnsureSecret creates a Secret object or returns it if it already exists. // EnsureSecret creates a Secret object or returns it.
func (f *Framework) EnsureSecret(secret *api.Secret) *api.Secret { func (f *Framework) EnsureSecret(secret *core.Secret) *core.Secret {
err := createSecretWithRetries(f.KubeClientSet, secret.Namespace, secret) err := createSecretWithRetries(f.KubeClientSet, secret.Namespace, secret)
assert.Nil(ginkgo.GinkgoT(), err, "creating secret") assert.Nil(ginkgo.GinkgoT(), err, "creating secret")
@ -48,17 +46,30 @@ func (f *Framework) EnsureSecret(secret *api.Secret) *api.Secret {
return s return s
} }
// EnsureConfigMap creates a ConfigMap object or returns it if it already exists. // GetConfigMap gets a ConfigMap object from the given namespace, name and returns it, throws error if it does not exist.
func (f *Framework) EnsureConfigMap(configMap *api.ConfigMap) (*api.ConfigMap, error) { func (f *Framework) GetConfigMap(namespace string, name string) *core.ConfigMap {
cm, err := f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) cm, err := f.KubeClientSet.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { assert.Nil(ginkgo.GinkgoT(), err, "getting configmap")
if k8sErrors.IsAlreadyExists(err) { assert.NotNil(ginkgo.GinkgoT(), cm, "expected a configmap but none returned")
return f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) return cm
}
return nil, err
} }
return cm, nil // EnsureConfigMap creates or updates an existing ConfigMap object or returns it.
func (f *Framework) EnsureConfigMap(configMap *core.ConfigMap) *core.ConfigMap {
cm := configMap.DeepCopy()
// Clean out ResourceVersion field if present
if cm.ObjectMeta.ResourceVersion != "" {
cm.ObjectMeta.ResourceVersion = ""
}
res, err := f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{})
if k8sErrors.IsAlreadyExists(err) {
res, err = f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
}
assert.Nil(ginkgo.GinkgoT(), err, "updating configmap")
assert.NotNil(ginkgo.GinkgoT(), res, "updating configmap")
return res
} }
// GetIngress gets an Ingress object from the given namespace, name and returns it, throws error if it does not exists. // GetIngress gets an Ingress object from the given namespace, name and returns it, throws error if it does not exists.
@ -293,7 +304,7 @@ func createDeploymentWithRetries(c kubernetes.Interface, namespace string, obj *
return retryWithExponentialBackOff(createFunc) return retryWithExponentialBackOff(createFunc)
} }
func createSecretWithRetries(c kubernetes.Interface, namespace string, obj *v1.Secret) error { func createSecretWithRetries(c kubernetes.Interface, namespace string, obj *core.Secret) error {
if obj == nil { if obj == nil {
return fmt.Errorf("Object provided to create is empty") return fmt.Errorf("Object provided to create is empty")
} }
@ -313,7 +324,7 @@ func createSecretWithRetries(c kubernetes.Interface, namespace string, obj *v1.S
return retryWithExponentialBackOff(createFunc) return retryWithExponentialBackOff(createFunc)
} }
func createServiceWithRetries(c kubernetes.Interface, namespace string, obj *v1.Service) error { func createServiceWithRetries(c kubernetes.Interface, namespace string, obj *core.Service) error {
if obj == nil { if obj == nil {
return fmt.Errorf("Object provided to create is empty") return fmt.Errorf("Object provided to create is empty")
} }

View file

@ -39,7 +39,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun
f.NewEchoDeployment() f.NewEchoDeployment()
}) })
framework.MemoryLeakIt("should not leak memory from ingress SSL certificates or configuration updates", func() { ginkgo.It("should not leak memory from ingress SSL certificates or configuration updates", func() {
hostCount := 1000 hostCount := 1000
iterations := 10 iterations := 10

View file

@ -78,7 +78,7 @@ fi
if [ "${SKIP_IMAGE_CREATION:-false}" = "false" ]; then if [ "${SKIP_IMAGE_CREATION:-false}" = "false" ]; then
if ! command -v ginkgo &> /dev/null; then if ! command -v ginkgo &> /dev/null; then
go get github.com/onsi/ginkgo/v2/ginkgo@v2.6.1 go install github.com/onsi/ginkgo/v2/ginkgo@v2.6.1
fi fi
echo "[dev-env] building image" echo "[dev-env] building image"
make -C ${DIR}/../../ clean-image build image make -C ${DIR}/../../ clean-image build image

View file

@ -49,15 +49,9 @@ if [ "$missing" = true ]; then
exit 1 exit 1
fi fi
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-}
FOCUS=${FOCUS:-.*}
BASEDIR=$(dirname "$0") BASEDIR=$(dirname "$0")
NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE) NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE)
export E2E_CHECK_LEAKS
export FOCUS
echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}" echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}"
kubectl create serviceaccount ingress-nginx-e2e || true kubectl create serviceaccount ingress-nginx-e2e || true
kubectl create clusterrolebinding permissive-binding \ kubectl create clusterrolebinding permissive-binding \
@ -66,7 +60,6 @@ kubectl create clusterrolebinding permissive-binding \
--user=kubelet \ --user=kubelet \
--serviceaccount=default:ingress-nginx-e2e || true --serviceaccount=default:ingress-nginx-e2e || true
VER=$(kubectl version --client=false -o json |jq '.serverVersion.minor |tonumber') VER=$(kubectl version --client=false -o json |jq '.serverVersion.minor |tonumber')
if [ $VER -lt 24 ]; then if [ $VER -lt 24 ]; then
echo -e "${BGREEN}Waiting service account...${NC}"; \ echo -e "${BGREEN}Waiting service account...${NC}"; \
@ -76,7 +69,6 @@ if [ $VER -lt 24 ]; then
done done
fi fi
echo -e "Starting the e2e test pod" echo -e "Starting the e2e test pod"
kubectl run --rm \ kubectl run --rm \
@ -90,38 +82,10 @@ kubectl run --rm \
e2e --image=nginx-ingress-controller:e2e e2e --image=nginx-ingress-controller:e2e
# Get the junit-reports stored in the configMaps created during e2etests # Get the junit-reports stored in the configMaps created during e2etests
echo "Getting the report files out now.." echo "Getting the report file out now.."
reportsDir="test/junitreports" reportsDir="test/junitreports"
reportFileName="report-e2e-test-suite" reportFile="report-e2e-test-suite.xml.gz"
[ ! -e ${reportsDir} ] && mkdir $reportsDir mkdir -p $reportsDir
cd $reportsDir cd $reportsDir
kubectl get cm $reportFile -o "jsonpath={.binaryData['${reportFile//\./\\.}']}" | base64 -d | gunzip > ${reportFile%\.gz}
# TODO: Seeking Rikatz help here to extract in a loop. Tried things like below without success echo "done getting the report file out.."
#for cmName in `k get cm -l junitreport=true -o json | jq '.items[].binaryData | keys[]' | tr '\"' ' '`
#do
#
#
# kubectl get cm -l junitreport=true -o json | jq -r '[.items[].binaryData | to_entries[] | {"key": .key, "value": .value }] | from_entries'
#
# Below lines successfully extract the report but they are one line per report.
# We only have 3 ginkgo reports so its ok for now
# But still, ideally this should be a loop as talked about in comments a few lines above
kubectl get cm $reportFileName.xml.gz -o "jsonpath={.binaryData['report-e2e-test-suite\.xml\.gz']}" > $reportFileName.xml.gz.base64
kubectl get cm $reportFileName-serial.xml.gz -o "jsonpath={.binaryData['report-e2e-test-suite-serial\.xml\.gz']}" > $reportFileName-serial.xml.gz.base64
cat $reportFileName.xml.gz.base64 | base64 -d > $reportFileName.xml.gz
cat $reportFileName-serial.xml.gz.base64 | base64 -d > $reportFileName-serial.xml.gz
gzip -d $reportFileName.xml.gz
gzip -d $reportFileName-serial.xml.gz
rm *.base64
cd ../..
# TODO Temporary: if condition to check if the memleak cm exists and only then try the extract for the memleak report
#
#kubectl get cm $reportFileName-serial -o "jsonpath={.data['report-e2e-test-suite-memleak\.xml\.gz']}" > $reportFileName-memleak.base64
#cat $reportFileName-memleak.base64 | base64 -d > $reportFileName-memleak.xml.gz
#gzip -d $reportFileName-memleak.xml.gz
echo "done getting the reports files out.."

View file

@ -95,7 +95,7 @@ fi
if [ "${SKIP_E2E_IMAGE_CREATION}" = "false" ]; then if [ "${SKIP_E2E_IMAGE_CREATION}" = "false" ]; then
if ! command -v ginkgo &> /dev/null; then if ! command -v ginkgo &> /dev/null; then
go get github.com/onsi/ginkgo/v2/ginkgo@v2.6.1 go install github.com/onsi/ginkgo/v2/ginkgo@v2.6.1
fi fi
echo "[dev-env] .. done building controller images" echo "[dev-env] .. done building controller images"

View file

@ -73,5 +73,9 @@ var _ = framework.DescribeSetting("Configmap change", func() {
return strings.ContainsAny(cfg, "error_log /var/log/nginx/error.log debug;") return strings.ContainsAny(cfg, "error_log /var/log/nginx/error.log debug;")
}) })
assert.NotEqual(ginkgo.GinkgoT(), checksum, newChecksum) assert.NotEqual(ginkgo.GinkgoT(), checksum, newChecksum)
logs, err := f.NginxLogs()
assert.Nil(ginkgo.GinkgoT(), err, "obtaining nginx logs")
assert.Contains(ginkgo.GinkgoT(), logs, "Backend successfully reloaded")
}) })
}) })

View file

@ -27,7 +27,7 @@ import (
"k8s.io/ingress-nginx/test/e2e/framework" "k8s.io/ingress-nginx/test/e2e/framework"
) )
var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func() { var _ = framework.IngressNginxDescribeSerial("[Flag] watch namespace selector", func() {
f := framework.NewDefaultFramework("namespace-selector") f := framework.NewDefaultFramework("namespace-selector")
notMatchedHost, matchedHost := "bar", "foo" notMatchedHost, matchedHost := "bar", "foo"
var notMatchedNs string var notMatchedNs string
@ -45,7 +45,7 @@ var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func()
cleanupNamespace := func(ns string) { cleanupNamespace := func(ns string) {
err := framework.DeleteKubeNamespace(f.KubeClientSet, ns) err := framework.DeleteKubeNamespace(f.KubeClientSet, ns)
assert.Nil(ginkgo.GinkgoT(), err, "deleting temporarily crated namespace") assert.Nil(ginkgo.GinkgoT(), err, "deleting temporarily created namespace")
} }
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
@ -56,13 +56,6 @@ var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func()
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
cleanupNamespace(notMatchedNs) cleanupNamespace(notMatchedNs)
cleanupNamespace(matchedNs) cleanupNamespace(matchedNs)
// cleanup clusterrole/clusterrolebinding created by installing chart with controller.scope.enabled=false
err := f.KubeClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), "nginx-ingress", metav1.DeleteOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "deleting clusterrole nginx-ingress")
err = f.KubeClientSet.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nginx-ingress", metav1.DeleteOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "deleting clusterrolebinging nginx-ingress")
}) })
ginkgo.Context("With specific watch-namespace-selector flags", func() { ginkgo.Context("With specific watch-namespace-selector flags", func() {

View file

@ -85,7 +85,7 @@ var _ = framework.DescribeSetting("OCSP", func() {
cfsslDB, err := os.ReadFile("empty.db") cfsslDB, err := os.ReadFile("empty.db")
assert.Nil(ginkgo.GinkgoT(), err) assert.Nil(ginkgo.GinkgoT(), err)
cmap, err := f.EnsureConfigMap(&corev1.ConfigMap{ f.EnsureConfigMap(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "ocspserve", Name: "ocspserve",
Namespace: f.Namespace, Namespace: f.Namespace,
@ -95,8 +95,6 @@ var _ = framework.DescribeSetting("OCSP", func() {
"db-config.json": []byte(`{"driver":"sqlite3","data_source":"/data/empty.db"}`), "db-config.json": []byte(`{"driver":"sqlite3","data_source":"/data/empty.db"}`),
}, },
}) })
assert.Nil(ginkgo.GinkgoT(), err)
assert.NotNil(ginkgo.GinkgoT(), cmap)
d, s := ocspserveDeployment(f.Namespace) d, s := ocspserveDeployment(f.Namespace)
f.EnsureDeployment(d) f.EnsureDeployment(d)

View file

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"regexp"
"strings" "strings"
"time" "time"
@ -36,58 +37,39 @@ import (
var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() { var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
f := framework.NewDefaultFramework("tcp") f := framework.NewDefaultFramework("tcp")
var ip string
ginkgo.BeforeEach(func() {
ip = f.GetNginxIP()
})
ginkgo.It("should expose a TCP service", func() { ginkgo.It("should expose a TCP service", func() {
f.NewEchoDeployment() f.NewEchoDeployment()
config, err := f.KubeClientSet. cm := f.GetConfigMap(f.Namespace, "tcp-services")
CoreV1(). cm.Data = map[string]string{
ConfigMaps(f.Namespace). "8080": fmt.Sprintf("%v/%v:80", f.Namespace, framework.EchoService),
Get(context.TODO(), "tcp-services", metav1.GetOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining tcp-services configmap")
assert.NotNil(ginkgo.GinkgoT(), config, "expected a configmap but none returned")
if config.Data == nil {
config.Data = map[string]string{}
} }
f.EnsureConfigMap(cm)
config.Data["8080"] = fmt.Sprintf("%v/%v:80", f.Namespace, framework.EchoService) svc := f.GetService(f.Namespace, "nginx-ingress-controller")
_, err = f.KubeClientSet.
CoreV1().
ConfigMaps(f.Namespace).
Update(context.TODO(), config, metav1.UpdateOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating configmap")
svc, err := f.KubeClientSet.
CoreV1().
Services(f.Namespace).
Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining ingress-nginx service")
assert.NotNil(ginkgo.GinkgoT(), svc, "expected a service but none returned")
svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{ svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{
Name: framework.EchoService, Name: framework.EchoService,
Port: 8080, Port: 8080,
TargetPort: intstr.FromInt(8080), TargetPort: intstr.FromInt(8080),
}) })
_, err = f.KubeClientSet. _, err := f.KubeClientSet.
CoreV1(). CoreV1().
Services(f.Namespace). Services(f.Namespace).
Update(context.TODO(), svc, metav1.UpdateOptions{}) Update(context.TODO(), svc, metav1.UpdateOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service") assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service")
// wait for update and nginx reload and new endpoint is available
framework.Sleep()
f.WaitForNginxConfiguration( f.WaitForNginxConfiguration(
func(cfg string) bool { func(cfg string) bool {
return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-%v-80"`, return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-%v-80"`,
f.Namespace, framework.EchoService)) f.Namespace, framework.EchoService))
}) })
ip := f.GetNginxIP()
f.HTTPTestClient(). f.HTTPTestClient().
GET("/"). GET("/").
WithURL(fmt.Sprintf("http://%v:8080", ip)). WithURL(fmt.Sprintf("http://%v:8080", ip)).
@ -122,44 +104,25 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
} }
f.EnsureService(externalService) f.EnsureService(externalService)
// Expose the `external name` port on the `ingress-nginx` service // Expose the `external name` port on the `ingress-nginx-controller` service
svc, err := f.KubeClientSet. svc := f.GetService(f.Namespace, "nginx-ingress-controller")
CoreV1().
Services(f.Namespace).
Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining ingress-nginx service")
assert.NotNil(ginkgo.GinkgoT(), svc, "expected a service but none returned")
svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{ svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{
Name: "dns-svc", Name: "dns-svc",
Port: 5353, Port: 5353,
TargetPort: intstr.FromInt(5353), TargetPort: intstr.FromInt(5353),
}) })
_, err = f.KubeClientSet. _, err := f.KubeClientSet.
CoreV1(). CoreV1().
Services(f.Namespace). Services(f.Namespace).
Update(context.TODO(), svc, metav1.UpdateOptions{}) Update(context.TODO(), svc, metav1.UpdateOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service") assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service")
// Update the TCP configmap to link port 5353 to the DNS external name service // Update the TCP configmap to link port 5353 to the DNS external name service
config, err := f.KubeClientSet. config := f.GetConfigMap(f.Namespace, "tcp-services")
CoreV1(). config.Data = map[string]string{
ConfigMaps(f.Namespace). "5353": fmt.Sprintf("%v/dns-external-name-svc:5353", f.Namespace),
Get(context.TODO(), "tcp-services", metav1.GetOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining tcp-services configmap")
assert.NotNil(ginkgo.GinkgoT(), config, "expected a configmap but none returned")
if config.Data == nil {
config.Data = map[string]string{}
} }
f.EnsureConfigMap(config)
config.Data["5353"] = fmt.Sprintf("%v/dns-external-name-svc:5353", f.Namespace)
_, err = f.KubeClientSet.
CoreV1().
ConfigMaps(f.Namespace).
Update(context.TODO(), config, metav1.UpdateOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating configmap")
// Validate that the generated nginx config contains the expected `proxy_upstream_name` value // Validate that the generated nginx config contains the expected `proxy_upstream_name` value
f.WaitForNginxConfiguration( f.WaitForNginxConfiguration(
@ -168,7 +131,6 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
}) })
// Execute the test. Use the `external name` service to resolve a domain name. // Execute the test. Use the `external name` service to resolve a domain name.
ip := f.GetNginxIP()
resolver := net.Resolver{ resolver := net.Resolver{
PreferGo: true, PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) { Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
@ -203,4 +165,57 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error from DNS resolver") assert.Nil(ginkgo.GinkgoT(), err, "unexpected error from DNS resolver")
assert.Contains(ginkgo.GinkgoT(), ips, "8.8.4.4") assert.Contains(ginkgo.GinkgoT(), ips, "8.8.4.4")
}) })
ginkgo.It("should reload after an update in the configuration", func() {
ginkgo.By("setting up a first deployment")
f.NewEchoDeployment(framework.WithDeploymentName("first-service"))
cm := f.GetConfigMap(f.Namespace, "tcp-services")
cm.Data = map[string]string{
"8080": fmt.Sprintf("%v/first-service:80", f.Namespace),
}
f.EnsureConfigMap(cm)
checksumRegex := regexp.MustCompile(`Configuration checksum:\s+(\d+)`)
checksum := ""
f.WaitForNginxConfiguration(
func(cfg string) bool {
// before returning, extract the current checksum
match := checksumRegex.FindStringSubmatch(cfg)
if len(match) > 0 {
checksum = match[1]
}
return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-first-service-80"`,
f.Namespace))
})
assert.NotEmpty(ginkgo.GinkgoT(), checksum)
ginkgo.By("updating the tcp service to a second deployment")
f.NewEchoDeployment(framework.WithDeploymentName("second-service"))
cm = f.GetConfigMap(f.Namespace, "tcp-services")
cm.Data["8080"] = fmt.Sprintf("%v/second-service:80", f.Namespace)
f.EnsureConfigMap(cm)
newChecksum := ""
f.WaitForNginxConfiguration(
func(cfg string) bool {
match := checksumRegex.FindStringSubmatch(cfg)
if len(match) > 0 {
newChecksum = match[1]
}
return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-second-service-80"`,
f.Namespace))
})
assert.NotEqual(ginkgo.GinkgoT(), checksum, newChecksum)
logs, err := f.NginxLogs()
assert.Nil(ginkgo.GinkgoT(), err, "obtaining nginx logs")
assert.Contains(ginkgo.GinkgoT(), logs, "Backend successfully reloaded")
})
}) })