Replace gomega with testify
This commit is contained in:
parent
9cf4154f4f
commit
046e2d959d
9 changed files with 158 additions and 147 deletions
|
@ -53,6 +53,7 @@ func RunE2ETests(t *testing.T) {
|
|||
defer logs.FlushLogs()
|
||||
|
||||
gomega.RegisterFailHandler(ginkgo.Fail)
|
||||
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
|
||||
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
|
||||
|
|
|
@ -19,8 +19,8 @@ package framework
|
|||
import (
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -84,7 +84,7 @@ func (f *Framework) NewEchoDeploymentWithNameAndReplicas(name string, replicas i
|
|||
f.EnsureService(service)
|
||||
|
||||
err := WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for endpoints to become ready")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready")
|
||||
}
|
||||
|
||||
// NewSlowEchoDeployment creates a new deployment of the slow echo server image in a particular namespace.
|
||||
|
@ -117,7 +117,7 @@ server {
|
|||
},
|
||||
Data: data,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a deployment")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating configmap")
|
||||
|
||||
deployment := newDeployment(SlowEchoService, f.Namespace, "openresty/openresty:1.15.8.2-alpine", 80, 1,
|
||||
nil,
|
||||
|
@ -167,7 +167,7 @@ server {
|
|||
f.EnsureService(service)
|
||||
|
||||
err = WaitForEndpoints(f.KubeClientSet, DefaultTimeout, SlowEchoService, f.Namespace, 1)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for endpoints to become ready")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready")
|
||||
}
|
||||
|
||||
// NewGRPCBinDeployment creates a new deployment of the
|
||||
|
@ -176,8 +176,8 @@ func (f *Framework) NewGRPCBinDeployment() {
|
|||
name := "grpcbin"
|
||||
|
||||
probe := &corev1.Probe{
|
||||
InitialDelaySeconds: 5,
|
||||
PeriodSeconds: 10,
|
||||
InitialDelaySeconds: 1,
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
TimeoutSeconds: 1,
|
||||
Handler: corev1.Handler{
|
||||
|
@ -260,14 +260,14 @@ func (f *Framework) NewGRPCBinDeployment() {
|
|||
f.EnsureService(service)
|
||||
|
||||
err := WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, 1)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for endpoints to become ready")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready")
|
||||
}
|
||||
|
||||
func newDeployment(name, namespace, image string, port int32, replicas int32, command []string,
|
||||
volumeMounts []corev1.VolumeMount, volumes []corev1.Volume) *appsv1.Deployment {
|
||||
probe := &corev1.Probe{
|
||||
InitialDelaySeconds: 1,
|
||||
PeriodSeconds: 10,
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
TimeoutSeconds: 1,
|
||||
Handler: corev1.Handler{
|
||||
|
@ -361,15 +361,15 @@ func (f *Framework) NewDeployment(name, image string, port int32, replicas int32
|
|||
f.EnsureService(service)
|
||||
|
||||
err := WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, int(replicas))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for endpoints to become ready")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready")
|
||||
}
|
||||
|
||||
// DeleteDeployment deletes a deployment with a particular name and waits for the pods to be deleted
|
||||
func (f *Framework) DeleteDeployment(name string) error {
|
||||
d, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get a deployment")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting deployment")
|
||||
err = f.KubeClientSet.AppsV1().Deployments(f.Namespace).Delete(name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete a deployment")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting deployment")
|
||||
return WaitForPodsDeleted(f.KubeClientSet, time.Second*60, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: labelSelectorToString(d.Spec.Selector.MatchLabels),
|
||||
})
|
||||
|
@ -378,15 +378,15 @@ func (f *Framework) DeleteDeployment(name string) error {
|
|||
// ScaleDeploymentToZero scales a deployment with a particular name and waits for the pods to be deleted
|
||||
func (f *Framework) ScaleDeploymentToZero(name string) {
|
||||
d, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get a deployment")
|
||||
Expect(d).NotTo(BeNil(), "expected a deployment but none returned")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting deployment")
|
||||
assert.Nil(ginkgo.GinkgoT(), d, "expected a deployment but none returned")
|
||||
|
||||
d.Spec.Replicas = NewInt32(0)
|
||||
|
||||
d, err = f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(d)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get a deployment")
|
||||
Expect(d).NotTo(BeNil(), "expected a deployment but none returned")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting deployment")
|
||||
assert.Nil(ginkgo.GinkgoT(), d, "expected a deployment but none returned")
|
||||
|
||||
err = WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, 0)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for no endpoints")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for no endpoints")
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -78,7 +78,7 @@ func (f *Framework) NewNewFastCGIHelloServerDeploymentWithReplicas(replicas int3
|
|||
err := WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for to become ready")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "failed to wait for to become ready")
|
||||
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -14,11 +14,15 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/gavv/httpexpect.v2"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networking "k8s.io/api/networking/v1beta1"
|
||||
|
@ -32,7 +36,6 @@ import (
|
|||
"k8s.io/klog"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// RequestScheme define a scheme used in a test request.
|
||||
|
@ -64,8 +67,21 @@ type Framework struct {
|
|||
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creting kubernetes API client configuration")
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating Kubernetes API client")
|
||||
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
BaseName: baseName,
|
||||
KubeConfig: kubeConfig,
|
||||
KubeClientSet: kubeClient,
|
||||
}
|
||||
|
||||
ginkgo.BeforeEach(f.BeforeEach)
|
||||
|
@ -76,29 +92,18 @@ func NewDefaultFramework(baseName string) *Framework {
|
|||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
f.KubeConfig = kubeConfig
|
||||
f.KubeClientSet, err = kubernetes.NewForConfig(kubeConfig)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ingressNamespace, err := CreateKubeNamespace(f.BaseName, f.KubeClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating namespace")
|
||||
|
||||
f.Namespace = ingressNamespace
|
||||
|
||||
err = f.newIngressController(f.Namespace, f.BaseName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller")
|
||||
|
||||
err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready")
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
|
@ -140,7 +145,7 @@ func (f *Framework) AfterEach() {
|
|||
}
|
||||
|
||||
err := DeleteKubeNamespace(f.KubeClientSet, f.Namespace)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error deleting namespace %v", f.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
|
||||
}
|
||||
|
||||
// IngressNginxDescribe wrapper function for ginkgo describe. Adds namespacing.
|
||||
|
@ -169,7 +174,7 @@ func (f *Framework) GetNginxIP() string {
|
|||
CoreV1().
|
||||
Services(f.Namespace).
|
||||
Get("nginx-ingress-controller", metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error obtaining NGINX IP address")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX IP address")
|
||||
return s.Spec.ClusterIP
|
||||
}
|
||||
|
||||
|
@ -179,7 +184,7 @@ func (f *Framework) GetNginxPodIP() []string {
|
|||
CoreV1().
|
||||
Endpoints(f.Namespace).
|
||||
Get("nginx-ingress-controller", metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error obtaining NGINX IP address")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX IP address")
|
||||
eips := make([]string, 0)
|
||||
for _, s := range e.Subsets {
|
||||
for _, a := range s.Addresses {
|
||||
|
@ -199,15 +204,14 @@ func (f *Framework) GetURL(scheme RequestScheme) string {
|
|||
// WaitForNginxServer waits until the nginx configuration contains a particular server section
|
||||
func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) {
|
||||
err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for nginx server condition/s")
|
||||
time.Sleep(5 * time.Second)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
// WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration
|
||||
func (f *Framework) WaitForNginxConfiguration(matcher func(cfg string) bool) {
|
||||
err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions("", matcher))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for nginx server condition/s")
|
||||
time.Sleep(5 * time.Second)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
|
||||
}
|
||||
|
||||
func nginxLogs(client kubernetes.Interface, namespace string) (string, error) {
|
||||
|
@ -247,24 +251,13 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b
|
|||
return false, nil
|
||||
}
|
||||
|
||||
var match bool
|
||||
errs := gomega.InterceptGomegaFailures(func() {
|
||||
if klog.V(10) && len(o) > 0 {
|
||||
klog.Infof("nginx.conf:\n%v", o)
|
||||
}
|
||||
|
||||
// passes the nginx config to the passed function
|
||||
if matcher(strings.Join(strings.Fields(o), " ")) {
|
||||
match = true
|
||||
}
|
||||
})
|
||||
|
||||
if match {
|
||||
return true, nil
|
||||
if klog.V(10) && len(o) > 0 {
|
||||
klog.Infof("nginx.conf:\n%v", o)
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
klog.V(2).Infof("Errors waiting for conditions: %v", errs)
|
||||
// passes the nginx config to the passed function
|
||||
if matcher(strings.Join(strings.Fields(o), " ")) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
|
@ -294,8 +287,8 @@ func (f *Framework) getConfigMap(name string) (*v1.ConfigMap, error) {
|
|||
// SetNginxConfigMapData sets ingress-nginx's nginx-ingress-controller configMap data
|
||||
func (f *Framework) SetNginxConfigMapData(cmData map[string]string) {
|
||||
cfgMap, err := f.getConfigMap("nginx-ingress-controller")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(cfgMap).NotTo(gomega.BeNil(), "expected a configmap but none returned")
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), cfgMap, "expected a configmap but none returned")
|
||||
|
||||
cfgMap.Data = cmData
|
||||
|
||||
|
@ -303,9 +296,9 @@ func (f *Framework) SetNginxConfigMapData(cmData map[string]string) {
|
|||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(cfgMap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error updating configuration configmap")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
func (f *Framework) CreateConfigMap(name string, data map[string]string) {
|
||||
|
@ -316,14 +309,14 @@ func (f *Framework) CreateConfigMap(name string, data map[string]string) {
|
|||
},
|
||||
Data: data,
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configMap")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "failed to create configMap")
|
||||
}
|
||||
|
||||
// UpdateNginxConfigMapData updates single field in ingress-nginx's nginx-ingress-controller map data
|
||||
func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
|
||||
config, err := f.getConfigMap("nginx-ingress-controller")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(config).NotTo(gomega.BeNil(), "expected a configmap but none returned")
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), config, "expected a configmap but none returned")
|
||||
|
||||
config.Data[key] = value
|
||||
|
||||
|
@ -331,8 +324,9 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
|
|||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(config)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error updating configuration configmap")
|
||||
time.Sleep(5 * time.Second)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
// DeleteNGINXPod deletes the currently running pod. It waits for the replacement pod to be up.
|
||||
|
@ -340,10 +334,10 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
|
|||
func (f *Framework) DeleteNGINXPod(grace int64) {
|
||||
ns := f.Namespace
|
||||
pod, err := getIngressNGINXPod(ns, f.KubeClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "expected ingress nginx pod to be running")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "expected ingress nginx pod to be running")
|
||||
|
||||
err = f.KubeClientSet.CoreV1().Pods(ns).Delete(pod.GetName(), metav1.NewDeleteOptions(grace))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error deleting ingress nginx pod")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting ingress nginx pod")
|
||||
|
||||
err = wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
pod, err := getIngressNGINXPod(ns, f.KubeClientSet)
|
||||
|
@ -352,7 +346,42 @@ func (f *Framework) DeleteNGINXPod(grace int64) {
|
|||
}
|
||||
return pod.GetName() != "", nil
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error while waiting for ingress nginx pod to come up again")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "while waiting for ingress nginx pod to come up again")
|
||||
}
|
||||
|
||||
func (f *Framework) HTTPTestClient() *httpexpect.Expect {
|
||||
return f.newTestClient(nil)
|
||||
}
|
||||
|
||||
func (f *Framework) HTTPTestClientWithTLSConfig(config *tls.Config) *httpexpect.Expect {
|
||||
return f.newTestClient(config)
|
||||
}
|
||||
|
||||
func (f *Framework) newTestClient(config *tls.Config) *httpexpect.Expect {
|
||||
if config == nil {
|
||||
config = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
|
||||
return httpexpect.WithConfig(httpexpect.Config{
|
||||
BaseURL: f.GetURL(HTTP),
|
||||
Client: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: config,
|
||||
},
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
},
|
||||
Reporter: httpexpect.NewAssertReporter(
|
||||
httpexpect.NewAssertReporter(ginkgo.GinkgoT()),
|
||||
),
|
||||
Printers: []httpexpect.Printer{
|
||||
// TODO: enable conditionally?
|
||||
// httpexpect.NewDebugPrinter(ginkgo.GinkgoT(), false),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeployment runs the given updateFunc on the deployment and waits for it to be updated
|
||||
|
@ -368,20 +397,6 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
|
|||
}
|
||||
}
|
||||
|
||||
if *deployment.Spec.Replicas != int32(replicas) {
|
||||
klog.Infof("updating replica count from %v to %v...", *deployment.Spec.Replicas, replicas)
|
||||
deployment, err := kubeClientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deployment.Spec.Replicas = NewInt32(int32(replicas))
|
||||
_, err = kubeClientSet.AppsV1().Deployments(namespace).Update(deployment)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "scaling the number of replicas to %v", replicas)
|
||||
}
|
||||
}
|
||||
|
||||
err = WaitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(deployment.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
|
@ -444,7 +459,6 @@ func NewSingleIngressWithMultiplePaths(name string, paths []string, host, ns, se
|
|||
}
|
||||
|
||||
func newSingleIngressWithRules(name, path, host, ns, service string, port int, annotations map[string]string, tlsHosts []string) *networking.Ingress {
|
||||
|
||||
spec := networking.IngressSpec{
|
||||
Rules: []networking.IngressRule{
|
||||
{
|
||||
|
@ -524,10 +538,6 @@ func NewSingleCatchAllIngress(name, ns, service string, port int, annotations ma
|
|||
}
|
||||
|
||||
func newSingleIngress(name, ns string, annotations map[string]string, spec networking.IngressSpec) *networking.Ingress {
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
ing := &networking.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
@ -535,6 +545,11 @@ func newSingleIngress(name, ns string, annotations map[string]string, spec netwo
|
|||
},
|
||||
Spec: spec,
|
||||
}
|
||||
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
ing.SetAnnotations(annotations)
|
||||
|
||||
return ing
|
||||
|
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -78,7 +78,7 @@ func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32
|
|||
err := WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for to become ready")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "failed to wait for to become ready")
|
||||
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -69,9 +69,8 @@ func (f *Framework) NewInfluxDBDeployment() {
|
|||
}
|
||||
|
||||
cm, err := f.EnsureConfigMap(configuration)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create an Influxdb deployment")
|
||||
|
||||
Expect(cm).NotTo(BeNil(), "expected a configmap but none returned")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating an Influxdb deployment")
|
||||
assert.NotNil(ginkgo.GinkgoT(), cm, "expected a configmap but none returned")
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -140,5 +139,5 @@ func (f *Framework) NewInfluxDBDeployment() {
|
|||
err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for influxdb to become ready")
|
||||
assert.NotNil(ginkgo.GinkgoT(), err, "failed to wait for influxdb to become ready")
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
api "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
@ -33,18 +33,17 @@ import (
|
|||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
// EnsureSecret creates a Secret object or returns it if it already exists.
|
||||
func (f *Framework) EnsureSecret(secret *api.Secret) *api.Secret {
|
||||
err := createSecretWithRetries(f.KubeClientSet, f.Namespace, secret)
|
||||
Expect(err).To(BeNil(), "unexpected error creating secret")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating secret")
|
||||
|
||||
s, err := f.KubeClientSet.CoreV1().Secrets(secret.Namespace).Get(secret.Name, metav1.GetOptions{})
|
||||
Expect(s).NotTo(BeNil())
|
||||
Expect(s.ObjectMeta).NotTo(BeNil())
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting secret")
|
||||
assert.NotNil(ginkgo.GinkgoT(), s, "getting secret")
|
||||
|
||||
return s
|
||||
}
|
||||
|
@ -64,30 +63,19 @@ func (f *Framework) EnsureConfigMap(configMap *api.ConfigMap) (*api.ConfigMap, e
|
|||
|
||||
// EnsureIngress creates an Ingress object or returns it if it already exists.
|
||||
func (f *Framework) EnsureIngress(ingress *networking.Ingress) *networking.Ingress {
|
||||
ing, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Create(ingress)
|
||||
if err != nil {
|
||||
if k8sErrors.IsAlreadyExists(err) {
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
var err error
|
||||
ing, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(ingress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := createIngressWithRetries(f.KubeClientSet, f.Namespace, ingress)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating ingress")
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
Expect(ing).NotTo(BeNil(), "expected an ingress but none returned")
|
||||
ing, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Get(ingress.Name, metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting ingress")
|
||||
assert.NotNil(ginkgo.GinkgoT(), ing, "expected an ingress but none returned")
|
||||
|
||||
if ing.Annotations == nil {
|
||||
ing.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// creating an ingress requires a reload.
|
||||
time.Sleep(4 * time.Second)
|
||||
|
||||
return ing
|
||||
}
|
||||
|
@ -95,12 +83,11 @@ func (f *Framework) EnsureIngress(ingress *networking.Ingress) *networking.Ingre
|
|||
// EnsureService creates a Service object or returns it if it already exists.
|
||||
func (f *Framework) EnsureService(service *core.Service) *core.Service {
|
||||
err := createServiceWithRetries(f.KubeClientSet, f.Namespace, service)
|
||||
Expect(err).To(BeNil(), "unexpected error creating service")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating service")
|
||||
|
||||
s, err := f.KubeClientSet.CoreV1().Services(f.Namespace).Get(service.Name, metav1.GetOptions{})
|
||||
Expect(err).To(BeNil(), "unexpected error searching service")
|
||||
Expect(s).NotTo(BeNil())
|
||||
Expect(s.ObjectMeta).NotTo(BeNil())
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting service")
|
||||
assert.NotNil(ginkgo.GinkgoT(), s, "expected a service but none returned")
|
||||
|
||||
return s
|
||||
}
|
||||
|
@ -108,14 +95,13 @@ func (f *Framework) EnsureService(service *core.Service) *core.Service {
|
|||
// EnsureDeployment creates a Deployment object or returns it if it already exists.
|
||||
func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) *appsv1.Deployment {
|
||||
err := createDeploymentWithRetries(f.KubeClientSet, f.Namespace, deployment)
|
||||
Expect(err).To(BeNil(), "unexpected error creating deployment")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating deployment")
|
||||
|
||||
s, err := f.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(BeNil(), "unexpected error searching deployment")
|
||||
Expect(s).NotTo(BeNil())
|
||||
Expect(s.ObjectMeta).NotTo(BeNil())
|
||||
d, err := f.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting deployment")
|
||||
assert.NotNil(ginkgo.GinkgoT(), d, "expected a deployment but none returned")
|
||||
|
||||
return s
|
||||
return d
|
||||
}
|
||||
|
||||
// WaitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
||||
|
@ -168,7 +154,7 @@ func WaitForEndpoints(kubeClientSet kubernetes.Interface, timeout time.Duration,
|
|||
return false, nil
|
||||
}
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "getting endpoints")
|
||||
|
||||
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
|
||||
return false, nil
|
||||
|
@ -286,6 +272,24 @@ func createServiceWithRetries(c kubernetes.Interface, namespace string, obj *v1.
|
|||
return retryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func createIngressWithRetries(c kubernetes.Interface, namespace string, obj *networking.Ingress) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.NetworkingV1beta1().Ingresses(namespace).Create(obj)
|
||||
if err == nil || k8sErrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
|
||||
return retryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
const (
|
||||
// Parameters for retrying with exponential backoff.
|
||||
retryBackoffInitialDuration = 100 * time.Millisecond
|
||||
|
@ -315,5 +319,6 @@ func isRetryableAPIError(err error) bool {
|
|||
if _, shouldRetry := k8sErrors.SuggestsClientDelay(err); shouldRetry {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -32,8 +32,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -144,7 +144,7 @@ func CreateIngressMASecret(client kubernetes.Interface, host string, secretName,
|
|||
// WaitForTLS waits until the TLS handshake with a given server completes successfully.
|
||||
func WaitForTLS(url string, tlsConfig *tls.Config) {
|
||||
err := wait.Poll(Poll, DefaultTimeout, matchTLSServerName(url, tlsConfig))
|
||||
Expect(err).NotTo(HaveOccurred(), "timeout waiting for TLS configuration in URL %s", url)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for TLS configuration in URL %s", url)
|
||||
}
|
||||
|
||||
// generateRSACert generates a basic self signed certificate using a key length
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -39,7 +38,7 @@ const (
|
|||
Poll = 2 * time.Second
|
||||
|
||||
// DefaultTimeout time to wait for operations to complete
|
||||
DefaultTimeout = 2 * time.Minute
|
||||
DefaultTimeout = 90 * time.Second
|
||||
)
|
||||
|
||||
func nowStamp() string {
|
||||
|
@ -125,14 +124,6 @@ func DeleteKubeNamespace(c kubernetes.Interface, namespace string) error {
|
|||
})
|
||||
}
|
||||
|
||||
// ExpectNoError tests whether an error occurred.
|
||||
func ExpectNoError(err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// WaitForKubeNamespaceNotExist waits until a namespaces is not present in the cluster
|
||||
func WaitForKubeNamespaceNotExist(c kubernetes.Interface, namespace string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, namespaceNotExist(c, namespace))
|
||||
|
|
Loading…
Reference in a new issue