Rework Ginkgo usage
Currently Ginkgo is launched multiple times with different options to accomodate various use-cases. In particular, some specs needs to be run sequentially because non-namespaced objects are created that conflicts with concurent Helm deployments. However Ginkgo is able to handle such cases natively, in particular specs that needs to be run sequentially are supported (Serial spec). This commit marks the specs that needs to be run sequentially as Serial specs and runs the whole test suite from a single Ginkgo invocation. As a result, a single JUnit report is now generated. Signed-off-by: Hervé Werner <dud225@hotmail.com>
This commit is contained in:
parent
1bf5317969
commit
4fc93d7f3f
11 changed files with 57 additions and 137 deletions
2
Makefile
2
Makefile
|
@ -31,7 +31,7 @@ TAG ?= $(shell cat TAG)
|
|||
|
||||
# e2e settings
|
||||
# Allow limiting the scope of the e2e tests. By default run everything
|
||||
FOCUS ?= .*
|
||||
FOCUS ?=
|
||||
# number of parallel test
|
||||
E2E_NODES ?= 7
|
||||
# run e2e test suite with tests that check for memory leaks? (default is false)
|
||||
|
|
|
@ -78,7 +78,7 @@ func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP
|
|||
var (
|
||||
// IngressPodDetails hold information about the ingress-nginx pod
|
||||
IngressPodDetails *PodInfo
|
||||
// IngressNodeDetails old information about the node running ingress-nginx pod
|
||||
// IngressNodeDetails hold information about the node running ingress-nginx pod
|
||||
IngressNodeDetails *NodeInfo
|
||||
)
|
||||
|
||||
|
|
|
@ -14,70 +14,39 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
set -eu
|
||||
|
||||
NC='\e[0m'
|
||||
BGREEN='\e[32m'
|
||||
|
||||
#SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-"5s"}
|
||||
FOCUS=${FOCUS:-.*}
|
||||
E2E_NODES=${E2E_NODES:-5}
|
||||
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-""}
|
||||
|
||||
reportFile="report-e2e-test-suite.xml"
|
||||
ginkgo_args=(
|
||||
"-randomize-all"
|
||||
"-flake-attempts=2"
|
||||
"-fail-fast"
|
||||
"--show-node-events"
|
||||
"--fail-fast"
|
||||
"--flake-attempts=2"
|
||||
"--junit-report=${reportFile}"
|
||||
"--nodes=${E2E_NODES}"
|
||||
"--poll-progress-after=180s"
|
||||
# "-slow-spec-threshold=${SLOW_E2E_THRESHOLD}"
|
||||
"-succinct"
|
||||
"-timeout=75m"
|
||||
"--randomize-all"
|
||||
"--show-node-events"
|
||||
"--succinct"
|
||||
"--timeout=75m"
|
||||
)
|
||||
|
||||
# Variable for the prefix of report filenames
|
||||
reportFileNamePrefix="report-e2e-test-suite"
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite (FOCUS=${FOCUS})...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="${FOCUS}" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]|\[TopologyHints\]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
--junit-report=$reportFileNamePrefix.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
# Must be isolated, there is a collision if multiple helms tries to install same clusterRole at same time
|
||||
echo -e "${BGREEN}Running e2e test for topology aware hints...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[TopologyHints\]" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
--junit-report=$reportFileNamePrefix-topology.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that require serial execution...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[Serial\]" \
|
||||
-skip="\[MemoryLeak\]" \
|
||||
--junit-report=$reportFileNamePrefix-serial.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
if [[ ${E2E_CHECK_LEAKS} != "" ]]; then
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that check for memory leaks...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[MemoryLeak\]" \
|
||||
-skip="\[Serial\]" \
|
||||
--junit-report=$reportFileNamePrefix-memleak.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
if [ -n "${FOCUS}" ]; then
|
||||
ginkgo_args+=("--focus=${FOCUS}")
|
||||
fi
|
||||
|
||||
for rFile in `ls $reportFileNamePrefix*`
|
||||
do
|
||||
gzip -k $rFile
|
||||
kubectl create cm $rFile.gz --from-file $rFile.gz
|
||||
kubectl label cm $rFile.gz junitreport=true
|
||||
done
|
||||
if [ -z "${E2E_CHECK_LEAKS}" ]; then
|
||||
ginkgo_args+=("--skip=\[Memory Leak\]")
|
||||
fi
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite...${NC}"
|
||||
(set -x; ginkgo "${ginkgo_args[@]}" /e2e.test)
|
||||
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
gzip -k ${reportFile}
|
||||
kubectl create cm ${reportFile}.gz --from-file ${reportFile}.gz
|
||||
kubectl label cm ${reportFile}.gz junitreport=true
|
||||
|
|
|
@ -8,6 +8,7 @@ controller:
|
|||
digest:
|
||||
digestChroot:
|
||||
scope:
|
||||
# Necessary to allow the ingress controller to get the topology information from the nodes
|
||||
enabled: false
|
||||
config:
|
||||
worker-processes: "1"
|
||||
|
|
|
@ -40,7 +40,7 @@ var (
|
|||
pathImplSpecific = networkingv1.PathTypeImplementationSpecific
|
||||
)
|
||||
|
||||
var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
|
||||
var _ = framework.IngressNginxDescribeSerial("[Admission] admission controller", func() {
|
||||
f := framework.NewDefaultFramework("admission")
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
@ -49,7 +49,7 @@ var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err := uninstallChart(f)
|
||||
err := f.UninstallChart()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
|
||||
})
|
||||
|
||||
|
@ -252,16 +252,6 @@ var _ = framework.IngressNginxDescribe("[Serial] admission controller", func() {
|
|||
})
|
||||
})
|
||||
|
||||
func uninstallChart(f *framework.Framework) error {
|
||||
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
validV1Ingress = `
|
||||
apiVersion: networking.k8s.io/v1
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -33,7 +32,7 @@ import (
|
|||
"k8s.io/ingress-nginx/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing", func() {
|
||||
var _ = framework.IngressNginxDescribeSerial("[TopologyHints] topology aware routing", func() {
|
||||
f := framework.NewDefaultFramework("topology")
|
||||
host := "topology-svc.foo.com"
|
||||
|
||||
|
@ -42,8 +41,8 @@ var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing",
|
|||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
// we need to uninstall chart because of clusterRole which is not destroyed with namespace
|
||||
err := uninstallChart(f)
|
||||
// we need to uninstall chart because of clusterRole (controller.scope.enabled=false) which is not destroyed with namespace
|
||||
err := f.UninstallChart()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
|
||||
})
|
||||
|
||||
|
@ -100,13 +99,3 @@ var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing",
|
|||
}
|
||||
})
|
||||
})
|
||||
|
||||
func uninstallChart(f *framework.Framework) error {
|
||||
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -152,6 +152,16 @@ func (f *Framework) KubectlProxy(port int) (int, *exec.Cmd, error) {
|
|||
return -1, cmd, fmt.Errorf("failed to parse port from proxy stdout: %s", output)
|
||||
}
|
||||
|
||||
func (f *Framework) UninstallChart() error {
|
||||
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
|
||||
stdout, err = cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
|
|
|
@ -192,6 +192,11 @@ func IngressNginxDescribe(text string, body func()) bool {
|
|||
return ginkgo.Describe(text, body)
|
||||
}
|
||||
|
||||
// IngressNginxDescribeSerial wrapper function for ginkgo describe. Adds namespacing.
|
||||
func IngressNginxDescribeSerial(text string, body func()) bool {
|
||||
return ginkgo.Describe(text, ginkgo.Serial, body)
|
||||
}
|
||||
|
||||
// DescribeAnnotation wrapper function for ginkgo describe. Adds namespacing.
|
||||
func DescribeAnnotation(text string, body func()) bool {
|
||||
return ginkgo.Describe("[Annotations] "+text, body)
|
||||
|
@ -202,11 +207,6 @@ func DescribeSetting(text string, body func()) bool {
|
|||
return ginkgo.Describe("[Setting] "+text, body)
|
||||
}
|
||||
|
||||
// MemoryLeakIt is wrapper function for ginkgo It. Adds "[MemoryLeak]" tag and makes static analysis easier.
|
||||
func MemoryLeakIt(text string, body interface{}) bool {
|
||||
return ginkgo.It(text+" [MemoryLeak]", body)
|
||||
}
|
||||
|
||||
// GetNginxIP returns the number of TCP port where NGINX is running
|
||||
func (f *Framework) GetNginxIP() string {
|
||||
s, err := f.KubeClientSet.
|
||||
|
|
|
@ -39,7 +39,7 @@ var _ = framework.IngressNginxDescribe("[Memory Leak] Dynamic Certificates", fun
|
|||
f.NewEchoDeployment()
|
||||
})
|
||||
|
||||
framework.MemoryLeakIt("should not leak memory from ingress SSL certificates or configuration updates", func() {
|
||||
ginkgo.It("should not leak memory from ingress SSL certificates or configuration updates", func() {
|
||||
hostCount := 1000
|
||||
iterations := 10
|
||||
|
||||
|
|
|
@ -49,15 +49,9 @@ if [ "$missing" = true ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-}
|
||||
FOCUS=${FOCUS:-.*}
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE)
|
||||
|
||||
export E2E_CHECK_LEAKS
|
||||
export FOCUS
|
||||
|
||||
echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}"
|
||||
kubectl create serviceaccount ingress-nginx-e2e || true
|
||||
kubectl create clusterrolebinding permissive-binding \
|
||||
|
@ -66,7 +60,6 @@ kubectl create clusterrolebinding permissive-binding \
|
|||
--user=kubelet \
|
||||
--serviceaccount=default:ingress-nginx-e2e || true
|
||||
|
||||
|
||||
VER=$(kubectl version --client=false -o json |jq '.serverVersion.minor |tonumber')
|
||||
if [ $VER -lt 24 ]; then
|
||||
echo -e "${BGREEN}Waiting service account...${NC}"; \
|
||||
|
@ -76,7 +69,6 @@ if [ $VER -lt 24 ]; then
|
|||
done
|
||||
fi
|
||||
|
||||
|
||||
echo -e "Starting the e2e test pod"
|
||||
|
||||
kubectl run --rm \
|
||||
|
@ -90,38 +82,10 @@ kubectl run --rm \
|
|||
e2e --image=nginx-ingress-controller:e2e
|
||||
|
||||
# Get the junit-reports stored in the configMaps created during e2etests
|
||||
echo "Getting the report files out now.."
|
||||
echo "Getting the report file out now.."
|
||||
reportsDir="test/junitreports"
|
||||
reportFileName="report-e2e-test-suite"
|
||||
[ ! -e ${reportsDir} ] && mkdir $reportsDir
|
||||
reportFile="report-e2e-test-suite.xml.gz"
|
||||
mkdir -p $reportsDir
|
||||
cd $reportsDir
|
||||
|
||||
# TODO: Seeking Rikatz help here to extract in a loop. Tried things like below without success
|
||||
#for cmName in `k get cm -l junitreport=true -o json | jq '.items[].binaryData | keys[]' | tr '\"' ' '`
|
||||
#do
|
||||
#
|
||||
#
|
||||
# kubectl get cm -l junitreport=true -o json | jq -r '[.items[].binaryData | to_entries[] | {"key": .key, "value": .value }] | from_entries'
|
||||
#
|
||||
|
||||
# Below lines successfully extract the report but they are one line per report.
|
||||
# We only have 3 ginkgo reports so its ok for now
|
||||
# But still, ideally this should be a loop as talked about in comments a few lines above
|
||||
kubectl get cm $reportFileName.xml.gz -o "jsonpath={.binaryData['report-e2e-test-suite\.xml\.gz']}" > $reportFileName.xml.gz.base64
|
||||
kubectl get cm $reportFileName-serial.xml.gz -o "jsonpath={.binaryData['report-e2e-test-suite-serial\.xml\.gz']}" > $reportFileName-serial.xml.gz.base64
|
||||
|
||||
cat $reportFileName.xml.gz.base64 | base64 -d > $reportFileName.xml.gz
|
||||
cat $reportFileName-serial.xml.gz.base64 | base64 -d > $reportFileName-serial.xml.gz
|
||||
|
||||
gzip -d $reportFileName.xml.gz
|
||||
gzip -d $reportFileName-serial.xml.gz
|
||||
|
||||
rm *.base64
|
||||
cd ../..
|
||||
|
||||
# TODO Temporary: if condition to check if the memleak cm exists and only then try the extract for the memleak report
|
||||
#
|
||||
#kubectl get cm $reportFileName-serial -o "jsonpath={.data['report-e2e-test-suite-memleak\.xml\.gz']}" > $reportFileName-memleak.base64
|
||||
#cat $reportFileName-memleak.base64 | base64 -d > $reportFileName-memleak.xml.gz
|
||||
#gzip -d $reportFileName-memleak.xml.gz
|
||||
echo "done getting the reports files out.."
|
||||
kubectl get cm $reportFile -o "jsonpath={.binaryData['${reportFile//\./\\.}']}" | base64 -d | gunzip > ${reportFile%\.gz}
|
||||
echo "done getting the report file out.."
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/ingress-nginx/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func() {
|
||||
var _ = framework.IngressNginxDescribeSerial("[Flag] watch namespace selector", func() {
|
||||
f := framework.NewDefaultFramework("namespace-selector")
|
||||
notMatchedHost, matchedHost := "bar", "foo"
|
||||
var notMatchedNs string
|
||||
|
@ -45,7 +45,7 @@ var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func()
|
|||
|
||||
cleanupNamespace := func(ns string) {
|
||||
err := framework.DeleteKubeNamespace(f.KubeClientSet, ns)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting temporarily crated namespace")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting temporarily created namespace")
|
||||
}
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
@ -57,12 +57,9 @@ var _ = framework.IngressNginxDescribe("[Flag] watch namespace selector", func()
|
|||
cleanupNamespace(notMatchedNs)
|
||||
cleanupNamespace(matchedNs)
|
||||
|
||||
// cleanup clusterrole/clusterrolebinding created by installing chart with controller.scope.enabled=false
|
||||
err := f.KubeClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), "nginx-ingress", metav1.DeleteOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting clusterrole nginx-ingress")
|
||||
|
||||
err = f.KubeClientSet.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nginx-ingress", metav1.DeleteOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting clusterrolebinging nginx-ingress")
|
||||
// we need to uninstall chart because of clusterRole (controller.scope.enabled=false) which is not destroyed with namespace
|
||||
err := f.UninstallChart()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
|
||||
})
|
||||
|
||||
ginkgo.Context("With specific watch-namespace-selector flags", func() {
|
||||
|
|
Loading…
Reference in a new issue