Rework Ginkgo usage (#9522)
* Rework Ginkgo usage Currently Ginkgo is launched multiple times with different options to accomodate various use-cases. In particular, some specs needs to be run sequentially because non-namespaced objects are created that conflicts with concurent Helm deployments. However Ginkgo is able to handle such cases natively, in particular specs that needs to be run sequentially are supported (Serial spec). This commit marks the specs that needs to be run sequentially as Serial specs and runs the whole test suite from a single Ginkgo invocation. As a result, a single JUnit report is now generated. Signed-off-by: Hervé Werner <dud225@hotmail.com> * Fix controller error in test Error getting ConfigMap "$NAMESPACE/tcp-services": no object matching key "$NAMESPACE/tcp-services" in local store Signed-off-by: Hervé Werner <dud225@hotmail.com> * Replace "go get" invocations by "go install" Executing "go get" changes the go.mod & go.sum files which is not the case of "go install". Signed-off-by: Hervé Werner <dud225@hotmail.com> * Always clean out the Helm deployment Signed-off-by: Hervé Werner <dud225@hotmail.com> * Add E2E test to verify that changes to one or more configmap trigger an update Signed-off-by: Hervé Werner <dud225@hotmail.com> --------- Signed-off-by: Hervé Werner <dud225@hotmail.com>
This commit is contained in:
parent
080c905fab
commit
d6bba85351
20 changed files with 165 additions and 252 deletions
2
Makefile
2
Makefile
|
@ -31,7 +31,7 @@ TAG ?= $(shell cat TAG)
|
|||
|
||||
# e2e settings
|
||||
# Allow limiting the scope of the e2e tests. By default run everything
|
||||
FOCUS ?= .*
|
||||
FOCUS ?=
|
||||
# number of parallel test
|
||||
E2E_NODES ?= 7
|
||||
# run e2e test suite with tests that check for memory leaks? (default is false)
|
||||
|
|
|
@ -701,7 +701,6 @@ func New(
|
|||
},
|
||||
}
|
||||
|
||||
// TODO: add e2e test to verify that changes to one or more configmap trigger an update
|
||||
changeTriggerUpdate := func(name string) bool {
|
||||
return name == configmap || name == tcp || name == udp
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP
|
|||
var (
|
||||
// IngressPodDetails hold information about the ingress-nginx pod
|
||||
IngressPodDetails *PodInfo
|
||||
// IngressNodeDetails old information about the node running ingress-nginx pod
|
||||
// IngressNodeDetails hold information about the node running ingress-nginx pod
|
||||
IngressNodeDetails *NodeInfo
|
||||
)
|
||||
|
||||
|
|
|
@ -14,70 +14,39 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
set -eu
|
||||
|
||||
NC='\e[0m'
|
||||
BGREEN='\e[32m'
|
||||
|
||||
#SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-"5s"}
|
||||
FOCUS=${FOCUS:-.*}
|
||||
E2E_NODES=${E2E_NODES:-5}
|
||||
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-""}
|
||||
|
||||
reportFile="report-e2e-test-suite.xml"
|
||||
ginkgo_args=(
|
||||
"-randomize-all"
|
||||
"-flake-attempts=2"
|
||||
"-fail-fast"
|
||||
"--show-node-events"
|
||||
"--fail-fast"
|
||||
"--flake-attempts=2"
|
||||
"--junit-report=${reportFile}"
|
||||
"--nodes=${E2E_NODES}"
|
||||
"--poll-progress-after=180s"
|
||||
# "-slow-spec-threshold=${SLOW_E2E_THRESHOLD}"
|
||||
"-succinct"
|
||||
"-timeout=75m"
|
||||
"--randomize-all"
|
||||
"--show-node-events"
|
||||
"--succinct"
|
||||
"--timeout=75m"
|
||||
)
|
||||
|
||||
# Variable for the prefix of report filenames
|
||||
reportFileNamePrefix="report-e2e-test-suite"
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite (FOCUS=${FOCUS})...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="${FOCUS}" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]|\[TopologyHints\]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
--junit-report=$reportFileNamePrefix.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
# Must be isolated, there is a collision if multiple helms tries to install same clusterRole at same time
|
||||
echo -e "${BGREEN}Running e2e test for topology aware hints...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[TopologyHints\]" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
--junit-report=$reportFileNamePrefix-topology.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that require serial execution...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[Serial\]" \
|
||||
-skip="\[MemoryLeak\]" \
|
||||
--junit-report=$reportFileNamePrefix-serial.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
if [[ ${E2E_CHECK_LEAKS} != "" ]]; then
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that check for memory leaks...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[MemoryLeak\]" \
|
||||
-skip="\[Serial\]" \
|
||||
--junit-report=$reportFileNamePrefix-memleak.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
if [ -n "${FOCUS}" ]; then
|
||||
ginkgo_args+=("--focus=${FOCUS}")
|
||||
fi
|
||||
|
||||
for rFile in `ls $reportFileNamePrefix*`
|
||||
do
|
||||
gzip -k $rFile
|
||||
kubectl create cm $rFile.gz --from-file $rFile.gz
|
||||
kubectl label cm $rFile.gz junitreport=true
|
||||
done
|
||||
if [ -z "${E2E_CHECK_LEAKS}" ]; then
|
||||
ginkgo_args+=("--skip=\[Memory Leak\]")
|
||||
fi
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite...${NC}"
|
||||
(set -x; ginkgo "${ginkgo_args[@]}" /e2e.test)
|
||||
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
gzip -k ${reportFile}
|
||||
kubectl create cm ${reportFile}.gz --from-file ${reportFile}.gz
|
||||
kubectl label cm ${reportFile}.gz junitreport=true
|
||||
|
|
|
@ -8,6 +8,7 @@ controller:
|
|||
digest:
|
||||
digestChroot:
|
||||
scope:
|
||||
# Necessary to allow the ingress controller to get the topology information from the nodes
|
||||
enabled: false
|
||||
config:
|
||||
worker-processes: "1"
|
||||
|
@ -19,12 +20,7 @@ controller:
|
|||
periodSeconds: 1
|
||||
service:
|
||||
type: NodePort
|
||||
electionID: ingress-controller-leader
|
||||
ingressClassResource:
|
||||
# We will create and remove each IC/ClusterRole/ClusterRoleBinding per test so there's no conflict
|
||||
enabled: false
|
||||
extraArgs:
|
||||
tcp-services-configmap: $NAMESPACE/tcp-services
|
||||
# e2e tests do not require information about ingress status
|
||||
update-status: "false"
|
||||
terminationGracePeriodSeconds: 1
|
||||
|
@ -33,19 +29,6 @@ controller:
|
|||
|
||||
enableTopologyAwareRouting: true
|
||||
|
||||
# ulimit -c unlimited
|
||||
# mkdir -p /tmp/coredump
|
||||
# chmod a+rwx /tmp/coredump
|
||||
# echo "/tmp/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
|
||||
extraVolumeMounts:
|
||||
- name: coredump
|
||||
mountPath: /tmp/coredump
|
||||
|
||||
extraVolumes:
|
||||
- name: coredump
|
||||
hostPath:
|
||||
path: /tmp/coredump
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
scope: false
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"k8s.io/ingress-nginx/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
|
||||
var _ = framework.IngressNginxDescribeSerial("[Admission] admission controller", func() {
|
||||
f := framework.NewDefaultFramework("admission")
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
@ -40,11 +40,6 @@ var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
|
|||
f.NewSlowEchoDeployment()
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err := uninstallChart(f)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
|
||||
})
|
||||
|
||||
ginkgo.It("reject ingress with global-rate-limit annotations when memcached is not configured", func() {
|
||||
host := "admission-test"
|
||||
|
||||
|
@ -216,16 +211,6 @@ var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
|
|||
})
|
||||
})
|
||||
|
||||
func uninstallChart(f *framework.Framework) error {
|
||||
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
validV1Ingress = `
|
||||
apiVersion: networking.k8s.io/v1
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -81,9 +80,7 @@ var _ = framework.DescribeAnnotation("backend-protocol - FastCGI", func() {
|
|||
},
|
||||
}
|
||||
|
||||
cm, err := f.EnsureConfigMap(configuration)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating configmap")
|
||||
assert.NotNil(ginkgo.GinkgoT(), cm, "expected a configmap but none returned")
|
||||
f.EnsureConfigMap(configuration)
|
||||
|
||||
host := "fastcgi-params-configmap"
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -33,7 +32,7 @@ import (
|
|||
"k8s.io/ingress-nginx/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing", func() {
|
||||
var _ = framework.IngressNginxDescribeSerial("[TopologyHints] topology aware routing", func() {
|
||||
f := framework.NewDefaultFramework("topology")
|
||||
host := "topology-svc.foo.com"
|
||||
|
||||
|
@ -41,12 +40,6 @@ var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing",
|
|||
f.NewEchoDeployment(framework.WithDeploymentReplicas(2), framework.WithSvcTopologyAnnotations())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
// we need to uninstall chart because of clusterRole which is not destroyed with namespace
|
||||
err := uninstallChart(f)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
|
||||
})
|
||||
|
||||
ginkgo.It("should return 200 when service has topology hints", func() {
|
||||
|
||||
annotations := make(map[string]string)
|
||||
|
@ -100,13 +93,3 @@ var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing",
|
|||
}
|
||||
})
|
||||
})
|
||||
|
||||
func uninstallChart(f *framework.Framework) error {
|
||||
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -152,6 +152,16 @@ func (f *Framework) KubectlProxy(port int) (int, *exec.Cmd, error) {
|
|||
return -1, cmd, fmt.Errorf("failed to parse port from proxy stdout: %s", output)
|
||||
}
|
||||
|
||||
func (f *Framework) UninstallChart() error {
|
||||
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
|
||||
stdout, err = cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
|
|
|
@ -150,7 +150,11 @@ func (f *Framework) AfterEach() {
|
|||
|
||||
defer func(kubeClient kubernetes.Interface, ingressclass string) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := deleteIngressClass(kubeClient, ingressclass)
|
||||
|
||||
err := f.UninstallChart()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
|
||||
|
||||
err = deleteIngressClass(kubeClient, ingressclass)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting IngressClass")
|
||||
}(f.KubeClientSet, f.IngressClass)
|
||||
|
||||
|
@ -192,6 +196,11 @@ func IngressNginxDescribe(text string, body func()) bool {
|
|||
return ginkgo.Describe(text, body)
|
||||
}
|
||||
|
||||
// IngressNginxDescribeSerial wrapper function for ginkgo describe. Adds namespacing.
|
||||
func IngressNginxDescribeSerial(text string, body func()) bool {
|
||||
return ginkgo.Describe(text, ginkgo.Serial, body)
|
||||
}
|
||||
|
||||
// DescribeAnnotation wrapper function for ginkgo describe. Adds namespacing.
|
||||
func DescribeAnnotation(text string, body func()) bool {
|
||||
return ginkgo.Describe("[Annotations] "+text, body)
|
||||
|
@ -202,11 +211,6 @@ func DescribeSetting(text string, body func()) bool {
|
|||
return ginkgo.Describe("[Setting] "+text, body)
|
||||
}
|
||||
|
||||
// MemoryLeakIt is wrapper function for ginkgo It. Adds "[MemoryLeak]" tag and makes static analysis easier.
|
||||
func MemoryLeakIt(text string, body interface{}) bool {
|
||||
return ginkgo.It(text+" [MemoryLeak]", body)
|
||||
}
|
||||
|
||||
// GetNginxIP returns the number of TCP port where NGINX is running
|
||||
func (f *Framework) GetNginxIP() string {
|
||||
s, err := f.KubeClientSet.
|
||||
|
@ -387,7 +391,7 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
|
|||
}
|
||||
|
||||
// WaitForReload calls the passed function and
|
||||
// asser it has caused at least 1 reload.
|
||||
// asserts it has caused at least 1 reload.
|
||||
func (f *Framework) WaitForReload(fn func()) {
|
||||
initialReloadCount := getReloadCount(f.pod, f.Namespace, f.KubeClientSet)
|
||||
|
||||
|
|
|
@ -68,9 +68,7 @@ func (f *Framework) NewInfluxDBDeployment() {
|
|||
},
|
||||
}
|
||||
|
||||
cm, err := f.EnsureConfigMap(configuration)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating an Influxdb deployment")
|
||||
assert.NotNil(ginkgo.GinkgoT(), cm, "expected a configmap but none returned")
|
||||
f.EnsureConfigMap(configuration)
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -136,7 +134,7 @@ func (f *Framework) NewInfluxDBDeployment() {
|
|||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
||||
err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for influxdb pod to become ready")
|
||||
|
|
|
@ -25,9 +25,7 @@ import (
|
|||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
api "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networking "k8s.io/api/networking/v1"
|
||||
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -36,8 +34,8 @@ import (
|
|||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// EnsureSecret creates a Secret object or returns it if it already exists.
|
||||
func (f *Framework) EnsureSecret(secret *api.Secret) *api.Secret {
|
||||
// EnsureSecret creates a Secret object or returns it.
|
||||
func (f *Framework) EnsureSecret(secret *core.Secret) *core.Secret {
|
||||
err := createSecretWithRetries(f.KubeClientSet, secret.Namespace, secret)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating secret")
|
||||
|
||||
|
@ -48,17 +46,30 @@ func (f *Framework) EnsureSecret(secret *api.Secret) *api.Secret {
|
|||
return s
|
||||
}
|
||||
|
||||
// EnsureConfigMap creates a ConfigMap object or returns it if it already exists.
|
||||
func (f *Framework) EnsureConfigMap(configMap *api.ConfigMap) (*api.ConfigMap, error) {
|
||||
cm, err := f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Create(context.TODO(), configMap, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
if k8sErrors.IsAlreadyExists(err) {
|
||||
return f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
|
||||
}
|
||||
return nil, err
|
||||
// GetConfigMap gets a ConfigMap object from the given namespace, name and returns it, throws error if it does not exist.
|
||||
func (f *Framework) GetConfigMap(namespace string, name string) *core.ConfigMap {
|
||||
cm, err := f.KubeClientSet.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting configmap")
|
||||
assert.NotNil(ginkgo.GinkgoT(), cm, "expected a configmap but none returned")
|
||||
return cm
|
||||
}
|
||||
|
||||
// EnsureConfigMap creates or updates an existing ConfigMap object or returns it.
|
||||
func (f *Framework) EnsureConfigMap(configMap *core.ConfigMap) *core.ConfigMap {
|
||||
cm := configMap.DeepCopy()
|
||||
// Clean out ResourceVersion field if present
|
||||
if cm.ObjectMeta.ResourceVersion != "" {
|
||||
cm.ObjectMeta.ResourceVersion = ""
|
||||
}
|
||||
|
||||
return cm, nil
|
||||
res, err := f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{})
|
||||
if k8sErrors.IsAlreadyExists(err) {
|
||||
res, err = f.KubeClientSet.CoreV1().ConfigMaps(configMap.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
|
||||
}
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configmap")
|
||||
assert.NotNil(ginkgo.GinkgoT(), res, "updating configmap")
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// GetIngress gets an Ingress object from the given namespace, name and returns it, throws error if it does not exists.
|
||||
|
@ -293,7 +304,7 @@ func createDeploymentWithRetries(c kubernetes.Interface, namespace string, obj *
|
|||
return retryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func createSecretWithRetries(c kubernetes.Interface, namespace string, obj *v1.Secret) error {
|
||||
func createSecretWithRetries(c kubernetes.Interface, namespace string, obj *core.Secret) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
|
@ -313,7 +324,7 @@ func createSecretWithRetries(c kubernetes.Interface, namespace string, obj *v1.S
|
|||
return retryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func createServiceWithRetries(c kubernetes.Interface, namespace string, obj *v1.Service) error {
|
||||
func createServiceWithRetries(c kubernetes.Interface, namespace string, obj *core.Service) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun
|
|||
f.NewEchoDeployment()
|
||||
})
|
||||
|
||||
framework.MemoryLeakIt("should not leak memory from ingress SSL certificates or configuration updates", func() {
|
||||
ginkgo.It("should not leak memory from ingress SSL certificates or configuration updates", func() {
|
||||
hostCount := 1000
|
||||
iterations := 10
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ fi
|
|||
|
||||
if [ "${SKIP_IMAGE_CREATION:-false}" = "false" ]; then
|
||||
if ! command -v ginkgo &> /dev/null; then
|
||||
go get github.com/onsi/ginkgo/v2/ginkgo@v2.6.1
|
||||
go install github.com/onsi/ginkgo/v2/ginkgo@v2.6.1
|
||||
fi
|
||||
echo "[dev-env] building image"
|
||||
make -C ${DIR}/../../ clean-image build image
|
||||
|
|
|
@ -49,15 +49,9 @@ if [ "$missing" = true ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-}
|
||||
FOCUS=${FOCUS:-.*}
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE)
|
||||
|
||||
export E2E_CHECK_LEAKS
|
||||
export FOCUS
|
||||
|
||||
echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}"
|
||||
kubectl create serviceaccount ingress-nginx-e2e || true
|
||||
kubectl create clusterrolebinding permissive-binding \
|
||||
|
@ -66,7 +60,6 @@ kubectl create clusterrolebinding permissive-binding \
|
|||
--user=kubelet \
|
||||
--serviceaccount=default:ingress-nginx-e2e || true
|
||||
|
||||
|
||||
VER=$(kubectl version --client=false -o json |jq '.serverVersion.minor |tonumber')
|
||||
if [ $VER -lt 24 ]; then
|
||||
echo -e "${BGREEN}Waiting service account...${NC}"; \
|
||||
|
@ -76,7 +69,6 @@ if [ $VER -lt 24 ]; then
|
|||
done
|
||||
fi
|
||||
|
||||
|
||||
echo -e "Starting the e2e test pod"
|
||||
|
||||
kubectl run --rm \
|
||||
|
@ -90,38 +82,10 @@ kubectl run --rm \
|
|||
e2e --image=nginx-ingress-controller:e2e
|
||||
|
||||
# Get the junit-reports stored in the configMaps created during e2etests
|
||||
echo "Getting the report files out now.."
|
||||
echo "Getting the report file out now.."
|
||||
reportsDir="test/junitreports"
|
||||
reportFileName="report-e2e-test-suite"
|
||||
[ ! -e ${reportsDir} ] && mkdir $reportsDir
|
||||
reportFile="report-e2e-test-suite.xml.gz"
|
||||
mkdir -p $reportsDir
|
||||
cd $reportsDir
|
||||
|
||||
# TODO: Seeking Rikatz help here to extract in a loop. Tried things like below without success
|
||||
#for cmName in `k get cm -l junitreport=true -o json | jq '.items[].binaryData | keys[]' | tr '\"' ' '`
|
||||
#do
|
||||
#
|
||||
#
|
||||
# kubectl get cm -l junitreport=true -o json | jq -r '[.items[].binaryData | to_entries[] | {"key": .key, "value": .value }] | from_entries'
|
||||
#
|
||||
|
||||
# Below lines successfully extract the report but they are one line per report.
|
||||
# We only have 3 ginkgo reports so its ok for now
|
||||
# But still, ideally this should be a loop as talked about in comments a few lines above
|
||||
kubectl get cm $reportFileName.xml.gz -o "jsonpath={.binaryData['report-e2e-test-suite\.xml\.gz']}" > $reportFileName.xml.gz.base64
|
||||
kubectl get cm $reportFileName-serial.xml.gz -o "jsonpath={.binaryData['report-e2e-test-suite-serial\.xml\.gz']}" > $reportFileName-serial.xml.gz.base64
|
||||
|
||||
cat $reportFileName.xml.gz.base64 | base64 -d > $reportFileName.xml.gz
|
||||
cat $reportFileName-serial.xml.gz.base64 | base64 -d > $reportFileName-serial.xml.gz
|
||||
|
||||
gzip -d $reportFileName.xml.gz
|
||||
gzip -d $reportFileName-serial.xml.gz
|
||||
|
||||
rm *.base64
|
||||
cd ../..
|
||||
|
||||
# TODO Temporary: if condition to check if the memleak cm exists and only then try the extract for the memleak report
|
||||
#
|
||||
#kubectl get cm $reportFileName-serial -o "jsonpath={.data['report-e2e-test-suite-memleak\.xml\.gz']}" > $reportFileName-memleak.base64
|
||||
#cat $reportFileName-memleak.base64 | base64 -d > $reportFileName-memleak.xml.gz
|
||||
#gzip -d $reportFileName-memleak.xml.gz
|
||||
echo "done getting the reports files out.."
|
||||
kubectl get cm $reportFile -o "jsonpath={.binaryData['${reportFile//\./\\.}']}" | base64 -d | gunzip > ${reportFile%\.gz}
|
||||
echo "done getting the report file out.."
|
||||
|
|
|
@ -95,7 +95,7 @@ fi
|
|||
|
||||
if [ "${SKIP_E2E_IMAGE_CREATION}" = "false" ]; then
|
||||
if ! command -v ginkgo &> /dev/null; then
|
||||
go get github.com/onsi/ginkgo/v2/ginkgo@v2.6.1
|
||||
go install github.com/onsi/ginkgo/v2/ginkgo@v2.6.1
|
||||
fi
|
||||
|
||||
echo "[dev-env] .. done building controller images"
|
||||
|
|
|
@ -73,5 +73,9 @@ var _ = framework.DescribeSetting("Configmap change", func() {
|
|||
return strings.ContainsAny(cfg, "error_log /var/log/nginx/error.log debug;")
|
||||
})
|
||||
assert.NotEqual(ginkgo.GinkgoT(), checksum, newChecksum)
|
||||
|
||||
logs, err := f.NginxLogs()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining nginx logs")
|
||||
assert.Contains(ginkgo.GinkgoT(), logs, "Backend successfully reloaded")
|
||||
})
|
||||
})
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/ingress-nginx/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func() {
|
||||
var _ = framework.IngressNginxDescribeSerial("[Flag] watch namespace selector", func() {
|
||||
f := framework.NewDefaultFramework("namespace-selector")
|
||||
notMatchedHost, matchedHost := "bar", "foo"
|
||||
var notMatchedNs string
|
||||
|
@ -45,7 +45,7 @@ var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func()
|
|||
|
||||
cleanupNamespace := func(ns string) {
|
||||
err := framework.DeleteKubeNamespace(f.KubeClientSet, ns)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting temporarily crated namespace")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting temporarily created namespace")
|
||||
}
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
@ -56,13 +56,6 @@ var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func()
|
|||
ginkgo.AfterEach(func() {
|
||||
cleanupNamespace(notMatchedNs)
|
||||
cleanupNamespace(matchedNs)
|
||||
|
||||
// cleanup clusterrole/clusterrolebinding created by installing chart with controller.scope.enabled=false
|
||||
err := f.KubeClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), "nginx-ingress", metav1.DeleteOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting clusterrole nginx-ingress")
|
||||
|
||||
err = f.KubeClientSet.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nginx-ingress", metav1.DeleteOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting clusterrolebinging nginx-ingress")
|
||||
})
|
||||
|
||||
ginkgo.Context("With specific watch-namespace-selector flags", func() {
|
||||
|
|
|
@ -85,7 +85,7 @@ var _ = framework.DescribeSetting("OCSP", func() {
|
|||
cfsslDB, err := os.ReadFile("empty.db")
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
cmap, err := f.EnsureConfigMap(&corev1.ConfigMap{
|
||||
f.EnsureConfigMap(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ocspserve",
|
||||
Namespace: f.Namespace,
|
||||
|
@ -95,8 +95,6 @@ var _ = framework.DescribeSetting("OCSP", func() {
|
|||
"db-config.json": []byte(`{"driver":"sqlite3","data_source":"/data/empty.db"}`),
|
||||
},
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), cmap)
|
||||
|
||||
d, s := ocspserveDeployment(f.Namespace)
|
||||
f.EnsureDeployment(d)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -36,58 +37,39 @@ import (
|
|||
|
||||
var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
|
||||
f := framework.NewDefaultFramework("tcp")
|
||||
var ip string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ip = f.GetNginxIP()
|
||||
})
|
||||
|
||||
ginkgo.It("should expose a TCP service", func() {
|
||||
f.NewEchoDeployment()
|
||||
|
||||
config, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Get(context.TODO(), "tcp-services", metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining tcp-services configmap")
|
||||
assert.NotNil(ginkgo.GinkgoT(), config, "expected a configmap but none returned")
|
||||
|
||||
if config.Data == nil {
|
||||
config.Data = map[string]string{}
|
||||
cm := f.GetConfigMap(f.Namespace, "tcp-services")
|
||||
cm.Data = map[string]string{
|
||||
"8080": fmt.Sprintf("%v/%v:80", f.Namespace, framework.EchoService),
|
||||
}
|
||||
f.EnsureConfigMap(cm)
|
||||
|
||||
config.Data["8080"] = fmt.Sprintf("%v/%v:80", f.Namespace, framework.EchoService)
|
||||
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), config, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating configmap")
|
||||
|
||||
svc, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
Services(f.Namespace).
|
||||
Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining ingress-nginx service")
|
||||
assert.NotNil(ginkgo.GinkgoT(), svc, "expected a service but none returned")
|
||||
|
||||
svc := f.GetService(f.Namespace, "nginx-ingress-controller")
|
||||
svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{
|
||||
Name: framework.EchoService,
|
||||
Port: 8080,
|
||||
TargetPort: intstr.FromInt(8080),
|
||||
})
|
||||
_, err = f.KubeClientSet.
|
||||
_, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
Services(f.Namespace).
|
||||
Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service")
|
||||
|
||||
// wait for update and nginx reload and new endpoint is available
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(
|
||||
func(cfg string) bool {
|
||||
return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-%v-80"`,
|
||||
f.Namespace, framework.EchoService))
|
||||
})
|
||||
|
||||
ip := f.GetNginxIP()
|
||||
|
||||
f.HTTPTestClient().
|
||||
GET("/").
|
||||
WithURL(fmt.Sprintf("http://%v:8080", ip)).
|
||||
|
@ -122,44 +104,25 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
|
|||
}
|
||||
f.EnsureService(externalService)
|
||||
|
||||
// Expose the `external name` port on the `ingress-nginx` service
|
||||
svc, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
Services(f.Namespace).
|
||||
Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining ingress-nginx service")
|
||||
assert.NotNil(ginkgo.GinkgoT(), svc, "expected a service but none returned")
|
||||
|
||||
// Expose the `external name` port on the `ingress-nginx-controller` service
|
||||
svc := f.GetService(f.Namespace, "nginx-ingress-controller")
|
||||
svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{
|
||||
Name: "dns-svc",
|
||||
Port: 5353,
|
||||
TargetPort: intstr.FromInt(5353),
|
||||
})
|
||||
_, err = f.KubeClientSet.
|
||||
_, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
Services(f.Namespace).
|
||||
Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service")
|
||||
|
||||
// Update the TCP configmap to link port 5353 to the DNS external name service
|
||||
config, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Get(context.TODO(), "tcp-services", metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error obtaining tcp-services configmap")
|
||||
assert.NotNil(ginkgo.GinkgoT(), config, "expected a configmap but none returned")
|
||||
|
||||
if config.Data == nil {
|
||||
config.Data = map[string]string{}
|
||||
config := f.GetConfigMap(f.Namespace, "tcp-services")
|
||||
config.Data = map[string]string{
|
||||
"5353": fmt.Sprintf("%v/dns-external-name-svc:5353", f.Namespace),
|
||||
}
|
||||
|
||||
config.Data["5353"] = fmt.Sprintf("%v/dns-external-name-svc:5353", f.Namespace)
|
||||
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), config, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating configmap")
|
||||
f.EnsureConfigMap(config)
|
||||
|
||||
// Validate that the generated nginx config contains the expected `proxy_upstream_name` value
|
||||
f.WaitForNginxConfiguration(
|
||||
|
@ -168,7 +131,6 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
|
|||
})
|
||||
|
||||
// Execute the test. Use the `external name` service to resolve a domain name.
|
||||
ip := f.GetNginxIP()
|
||||
resolver := net.Resolver{
|
||||
PreferGo: true,
|
||||
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
|
@ -203,4 +165,57 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error from DNS resolver")
|
||||
assert.Contains(ginkgo.GinkgoT(), ips, "8.8.4.4")
|
||||
})
|
||||
|
||||
ginkgo.It("should reload after an update in the configuration", func() {
|
||||
|
||||
ginkgo.By("setting up a first deployment")
|
||||
f.NewEchoDeployment(framework.WithDeploymentName("first-service"))
|
||||
|
||||
cm := f.GetConfigMap(f.Namespace, "tcp-services")
|
||||
cm.Data = map[string]string{
|
||||
"8080": fmt.Sprintf("%v/first-service:80", f.Namespace),
|
||||
}
|
||||
f.EnsureConfigMap(cm)
|
||||
|
||||
checksumRegex := regexp.MustCompile(`Configuration checksum:\s+(\d+)`)
|
||||
checksum := ""
|
||||
|
||||
f.WaitForNginxConfiguration(
|
||||
func(cfg string) bool {
|
||||
// before returning, extract the current checksum
|
||||
match := checksumRegex.FindStringSubmatch(cfg)
|
||||
if len(match) > 0 {
|
||||
checksum = match[1]
|
||||
}
|
||||
|
||||
return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-first-service-80"`,
|
||||
f.Namespace))
|
||||
})
|
||||
assert.NotEmpty(ginkgo.GinkgoT(), checksum)
|
||||
|
||||
ginkgo.By("updating the tcp service to a second deployment")
|
||||
f.NewEchoDeployment(framework.WithDeploymentName("second-service"))
|
||||
|
||||
cm = f.GetConfigMap(f.Namespace, "tcp-services")
|
||||
cm.Data["8080"] = fmt.Sprintf("%v/second-service:80", f.Namespace)
|
||||
f.EnsureConfigMap(cm)
|
||||
|
||||
newChecksum := ""
|
||||
f.WaitForNginxConfiguration(
|
||||
func(cfg string) bool {
|
||||
match := checksumRegex.FindStringSubmatch(cfg)
|
||||
if len(match) > 0 {
|
||||
newChecksum = match[1]
|
||||
}
|
||||
|
||||
return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-second-service-80"`,
|
||||
f.Namespace))
|
||||
})
|
||||
assert.NotEqual(ginkgo.GinkgoT(), checksum, newChecksum)
|
||||
|
||||
logs, err := f.NginxLogs()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining nginx logs")
|
||||
assert.Contains(ginkgo.GinkgoT(), logs, "Backend successfully reloaded")
|
||||
})
|
||||
|
||||
})
|
||||
|
|
Loading…
Reference in a new issue