Add events for NGINX reloads
This commit is contained in:
parent
c7b041fb9e
commit
29ea30a4e8
26 changed files with 319 additions and 326 deletions
|
@ -135,6 +135,11 @@ func main() {
|
|||
|
||||
conf.Client = kubeClient
|
||||
|
||||
err = k8s.GetIngressPod(kubeClient)
|
||||
if err != nil {
|
||||
klog.Fatalf("Unexpected error obtaining ingress-nginx pod: %v", err)
|
||||
}
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
reg.MustRegister(prometheus.NewGoCollector())
|
||||
|
|
|
@ -26,11 +26,13 @@ import (
|
|||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"k8s.io/ingress-nginx/internal/ingress/controller"
|
||||
"k8s.io/ingress-nginx/internal/k8s"
|
||||
"k8s.io/ingress-nginx/internal/nginx"
|
||||
)
|
||||
|
||||
|
@ -56,6 +58,16 @@ func TestHandleSigterm(t *testing.T) {
|
|||
namespace = "test"
|
||||
)
|
||||
|
||||
k8s.IngressNGINXPod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
|
||||
createConfigMap(clientSet, namespace, t)
|
||||
|
|
|
@ -43,6 +43,7 @@ import (
|
|||
"k8s.io/ingress-nginx/internal/k8s"
|
||||
"k8s.io/ingress-nginx/internal/nginx"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -154,12 +155,15 @@ func (n *NGINXController) syncIngress(interface{}) error {
|
|||
n.metricCollector.IncReloadErrorCount()
|
||||
n.metricCollector.ConfigSuccess(hash, false)
|
||||
klog.Errorf("Unexpected failure reloading the backend:\n%v", err)
|
||||
n.recorder.Eventf(k8s.IngressNGINXPod, core.EventTypeWarning, "RELOAD", fmt.Sprintf("Error reloading NGINX: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infof("Backend successfully reloaded.")
|
||||
n.metricCollector.ConfigSuccess(hash, true)
|
||||
n.metricCollector.IncReloadCount()
|
||||
|
||||
n.recorder.Eventf(k8s.IngressNGINXPod, core.EventTypeNormal, "RELOAD", "NGINX reload triggered due to a change in configuration")
|
||||
}
|
||||
|
||||
isFirstSync := n.runningConfig.Equal(&ingress.Configuration{})
|
||||
|
|
|
@ -1662,13 +1662,6 @@ func testConfigMap(ns string) *v1.ConfigMap {
|
|||
|
||||
func newNGINXController(t *testing.T) *NGINXController {
|
||||
ns := v1.NamespaceDefault
|
||||
pod := &k8s.PodInfo{
|
||||
Name: "testpod",
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
}
|
||||
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
|
||||
|
@ -1684,6 +1677,16 @@ func newNGINXController(t *testing.T) *NGINXController {
|
|||
t.Fatalf("error creating the configuration map: %v", err)
|
||||
}
|
||||
|
||||
k8s.IngressNGINXPod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
storer := store.New(
|
||||
ns,
|
||||
fmt.Sprintf("%v/config", ns),
|
||||
|
@ -1693,7 +1696,6 @@ func newNGINXController(t *testing.T) *NGINXController {
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
channels.NewRingChannel(10),
|
||||
pod,
|
||||
false)
|
||||
|
||||
sslCert := ssl.GetFakeSSLCert()
|
||||
|
@ -1724,13 +1726,6 @@ func fakeX509Cert(dnsNames []string) *x509.Certificate {
|
|||
|
||||
func newDynamicNginxController(t *testing.T, setConfigMap func(string) *v1.ConfigMap) *NGINXController {
|
||||
ns := v1.NamespaceDefault
|
||||
pod := &k8s.PodInfo{
|
||||
Name: "testpod",
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
}
|
||||
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
configMap := setConfigMap(ns)
|
||||
|
@ -1740,6 +1735,16 @@ func newDynamicNginxController(t *testing.T, setConfigMap func(string) *v1.Confi
|
|||
t.Fatalf("error creating the configuration map: %v", err)
|
||||
}
|
||||
|
||||
k8s.IngressNGINXPod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
storer := store.New(
|
||||
ns,
|
||||
fmt.Sprintf("%v/config", ns),
|
||||
|
@ -1749,7 +1754,6 @@ func newDynamicNginxController(t *testing.T, setConfigMap func(string) *v1.Confi
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
channels.NewRingChannel(10),
|
||||
pod,
|
||||
false)
|
||||
|
||||
sslCert := ssl.GetFakeSSLCert()
|
||||
|
|
|
@ -56,7 +56,6 @@ import (
|
|||
ngx_template "k8s.io/ingress-nginx/internal/ingress/controller/template"
|
||||
"k8s.io/ingress-nginx/internal/ingress/metric"
|
||||
"k8s.io/ingress-nginx/internal/ingress/status"
|
||||
"k8s.io/ingress-nginx/internal/k8s"
|
||||
ing_net "k8s.io/ingress-nginx/internal/net"
|
||||
"k8s.io/ingress-nginx/internal/net/dns"
|
||||
"k8s.io/ingress-nginx/internal/net/ssl"
|
||||
|
@ -118,12 +117,6 @@ func NewNGINXController(config *Configuration, mc metric.Collector) *NGINXContro
|
|||
}
|
||||
}
|
||||
|
||||
pod, err := k8s.GetPodDetails(config.Client)
|
||||
if err != nil {
|
||||
klog.Fatalf("unexpected error obtaining pod information: %v", err)
|
||||
}
|
||||
n.podInfo = pod
|
||||
|
||||
n.store = store.New(
|
||||
config.Namespace,
|
||||
config.ConfigMapName,
|
||||
|
@ -133,13 +126,12 @@ func NewNGINXController(config *Configuration, mc metric.Collector) *NGINXContro
|
|||
config.ResyncPeriod,
|
||||
config.Client,
|
||||
n.updateCh,
|
||||
pod,
|
||||
config.DisableCatchAll)
|
||||
|
||||
n.syncQueue = task.NewTaskQueue(n.syncIngress)
|
||||
|
||||
if config.UpdateStatus {
|
||||
n.syncStatus = status.NewStatusSyncer(pod, status.Config{
|
||||
n.syncStatus = status.NewStatusSyncer(status.Config{
|
||||
Client: config.Client,
|
||||
PublishService: config.PublishService,
|
||||
PublishStatusAddress: config.PublishStatusAddress,
|
||||
|
@ -213,8 +205,6 @@ Error loading new template: %v
|
|||
|
||||
// NGINXController describes a NGINX Ingress controller.
|
||||
type NGINXController struct {
|
||||
podInfo *k8s.PodInfo
|
||||
|
||||
cfg *Configuration
|
||||
|
||||
recorder record.EventRecorder
|
||||
|
@ -287,8 +277,6 @@ func (n *NGINXController) Start() {
|
|||
OnStoppedLeading: func() {
|
||||
n.metricCollector.OnStoppedLeading(electionID)
|
||||
},
|
||||
PodName: n.podInfo.Name,
|
||||
PodNamespace: n.podInfo.Namespace,
|
||||
})
|
||||
|
||||
cmd := n.command.ExecCommand()
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"k8s.io/ingress-nginx/internal/k8s"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
|
@ -33,9 +34,6 @@ import (
|
|||
)
|
||||
|
||||
type leaderElectionConfig struct {
|
||||
PodName string
|
||||
PodNamespace string
|
||||
|
||||
Client clientset.Interface
|
||||
|
||||
ElectionID string
|
||||
|
@ -95,17 +93,21 @@ func setupLeaderElection(config *leaderElectionConfig) {
|
|||
Host: hostname,
|
||||
})
|
||||
|
||||
ingressPod, err := k8s.GetPodDetails()
|
||||
if err != nil {
|
||||
klog.Fatalf("unexpected error starting leader election: %v", err)
|
||||
}
|
||||
|
||||
lock := resourcelock.ConfigMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{Namespace: config.PodNamespace, Name: config.ElectionID},
|
||||
ConfigMapMeta: metav1.ObjectMeta{Namespace: ingressPod.Namespace, Name: config.ElectionID},
|
||||
Client: config.Client.CoreV1(),
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: config.PodName,
|
||||
Identity: ingressPod.Name,
|
||||
EventRecorder: recorder,
|
||||
},
|
||||
}
|
||||
|
||||
ttl := 30 * time.Second
|
||||
var err error
|
||||
|
||||
elector, err = leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||
Lock: &lock,
|
||||
|
|
|
@ -220,8 +220,6 @@ type k8sStore struct {
|
|||
backendConfigMu *sync.RWMutex
|
||||
|
||||
defaultSSLCertificate string
|
||||
|
||||
pod *k8s.PodInfo
|
||||
}
|
||||
|
||||
// New creates a new object store to be used in the ingress controller
|
||||
|
@ -230,7 +228,6 @@ func New(
|
|||
resyncPeriod time.Duration,
|
||||
client clientset.Interface,
|
||||
updateCh *channels.RingChannel,
|
||||
pod *k8s.PodInfo,
|
||||
disableCatchAll bool) Storer {
|
||||
|
||||
store := &k8sStore{
|
||||
|
@ -243,7 +240,6 @@ func New(
|
|||
backendConfigMu: &sync.RWMutex{},
|
||||
secretIngressMap: NewObjectRefMap(),
|
||||
defaultSSLCertificate: defaultSSLCertificate,
|
||||
pod: pod,
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
|
@ -294,16 +290,17 @@ func New(
|
|||
store.informers.Service = infFactory.Core().V1().Services().Informer()
|
||||
store.listers.Service.Store = store.informers.Service.GetStore()
|
||||
|
||||
labelSelector := labels.SelectorFromSet(store.pod.Labels)
|
||||
ingressPodInfo, _ := k8s.GetPodDetails()
|
||||
labelSelector := labels.SelectorFromSet(ingressPodInfo.Labels)
|
||||
store.informers.Pod = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (k8sruntime.Object, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
return client.CoreV1().Pods(store.pod.Namespace).List(context.TODO(), options)
|
||||
return client.CoreV1().Pods(ingressPodInfo.Namespace).List(context.TODO(), options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
return client.CoreV1().Pods(store.pod.Namespace).Watch(context.TODO(), options)
|
||||
return client.CoreV1().Pods(ingressPodInfo.Namespace).Watch(context.TODO(), options)
|
||||
},
|
||||
},
|
||||
&corev1.Pod{},
|
||||
|
|
|
@ -47,12 +47,14 @@ import (
|
|||
func TestStore(t *testing.T) {
|
||||
k8s.IsNetworkingIngressAvailable = true
|
||||
|
||||
pod := &k8s.PodInfo{
|
||||
k8s.IngressNGINXPod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
//TODO: move env definition to docker image?
|
||||
|
@ -94,7 +96,6 @@ func TestStore(t *testing.T) {
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
updateCh,
|
||||
pod,
|
||||
false)
|
||||
|
||||
storer.Run(stopCh)
|
||||
|
@ -172,7 +173,6 @@ func TestStore(t *testing.T) {
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
updateCh,
|
||||
pod,
|
||||
false)
|
||||
|
||||
storer.Run(stopCh)
|
||||
|
@ -320,7 +320,6 @@ func TestStore(t *testing.T) {
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
updateCh,
|
||||
pod,
|
||||
false)
|
||||
|
||||
storer.Run(stopCh)
|
||||
|
@ -424,7 +423,6 @@ func TestStore(t *testing.T) {
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
updateCh,
|
||||
pod,
|
||||
false)
|
||||
|
||||
storer.Run(stopCh)
|
||||
|
@ -511,7 +509,6 @@ func TestStore(t *testing.T) {
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
updateCh,
|
||||
pod,
|
||||
false)
|
||||
|
||||
storer.Run(stopCh)
|
||||
|
@ -620,7 +617,6 @@ func TestStore(t *testing.T) {
|
|||
10*time.Minute,
|
||||
clientSet,
|
||||
updateCh,
|
||||
pod,
|
||||
false)
|
||||
|
||||
storer.Run(stopCh)
|
||||
|
@ -777,12 +773,14 @@ func deleteIngress(ingress *networking.Ingress, clientSet kubernetes.Interface,
|
|||
// newStore creates a new mock object store for tests which do not require the
|
||||
// use of Informers.
|
||||
func newStore(t *testing.T) *k8sStore {
|
||||
pod := &k8s.PodInfo{
|
||||
k8s.IngressNGINXPod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-1",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &k8sStore{
|
||||
|
@ -797,7 +795,6 @@ func newStore(t *testing.T) *k8sStore {
|
|||
syncSecretMu: new(sync.Mutex),
|
||||
backendConfigMu: new(sync.RWMutex),
|
||||
secretIngressMap: NewObjectRefMap(),
|
||||
pod: pod,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1011,15 +1008,18 @@ func TestGetRunningControllerPodsCount(t *testing.T) {
|
|||
os.Setenv("POD_NAMESPACE", "testns")
|
||||
os.Setenv("POD_NAME", "ingress-1")
|
||||
|
||||
s := newStore(t)
|
||||
s.pod = &k8s.PodInfo{
|
||||
k8s.IngressNGINXPod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-1",
|
||||
Namespace: "testns",
|
||||
Labels: map[string]string{
|
||||
"pod-template-hash": "1234",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s := newStore(t)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-1",
|
||||
|
|
|
@ -82,9 +82,6 @@ type Config struct {
|
|||
type statusSync struct {
|
||||
Config
|
||||
|
||||
// pod contains runtime information about this pod
|
||||
pod *k8s.PodInfo
|
||||
|
||||
// workqueue used to keep in sync the status IP/s
|
||||
// in the Ingress rules
|
||||
syncQueue *task.Queue
|
||||
|
@ -158,10 +155,8 @@ func (s statusSync) keyfunc(input interface{}) (interface{}, error) {
|
|||
}
|
||||
|
||||
// NewStatusSyncer returns a new Syncer instance
|
||||
func NewStatusSyncer(podInfo *k8s.PodInfo, config Config) Syncer {
|
||||
func NewStatusSyncer(config Config) Syncer {
|
||||
st := statusSync{
|
||||
pod: podInfo,
|
||||
|
||||
Config: config,
|
||||
}
|
||||
st.syncQueue = task.NewCustomTaskQueue(st.sync, st.keyfunc)
|
||||
|
@ -180,9 +175,14 @@ func (s *statusSync) runningAddresses() ([]string, error) {
|
|||
return statusAddressFromService(s.PublishService, s.Client)
|
||||
}
|
||||
|
||||
ingressPod, err := k8s.GetPodDetails()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get information about all the pods running the ingress controller
|
||||
pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(),
|
||||
pods, err := s.Client.CoreV1().Pods(ingressPod.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(ingressPod.Labels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -205,8 +205,13 @@ func (s *statusSync) runningAddresses() ([]string, error) {
|
|||
}
|
||||
|
||||
func (s *statusSync) isRunningMultiplePods() bool {
|
||||
pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(),
|
||||
ingressPod, err := k8s.GetPodDetails()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
pods, err := s.Client.CoreV1().Pods(ingressPod.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(ingressPod.Labels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networking "k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -264,13 +265,6 @@ func buildIngressLister() ingressLister {
|
|||
|
||||
func buildStatusSync() statusSync {
|
||||
return statusSync{
|
||||
pod: &k8s.PodInfo{
|
||||
Name: "foo_base_pod",
|
||||
Namespace: apiv1.NamespaceDefault,
|
||||
Labels: map[string]string{
|
||||
"lable_sig": "foo_pod",
|
||||
},
|
||||
},
|
||||
syncQueue: task.NewTaskQueue(fakeSynFn),
|
||||
Config: Config{
|
||||
Client: buildSimpleClientSet(),
|
||||
|
@ -291,14 +285,18 @@ func TestStatusActions(t *testing.T) {
|
|||
UpdateStatusOnShutdown: true,
|
||||
}
|
||||
|
||||
// create object
|
||||
fkSync := NewStatusSyncer(&k8s.PodInfo{
|
||||
k8s.IngressNGINXPod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo_base_pod",
|
||||
Namespace: apiv1.NamespaceDefault,
|
||||
Labels: map[string]string{
|
||||
"lable_sig": "foo_pod",
|
||||
},
|
||||
}, c)
|
||||
},
|
||||
}
|
||||
|
||||
// create object
|
||||
fkSync := NewStatusSyncer(c)
|
||||
if fkSync == nil {
|
||||
t.Fatalf("expected a valid Sync")
|
||||
}
|
||||
|
|
|
@ -75,6 +75,9 @@ func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP
|
|||
return defaultOrInternalIP
|
||||
}
|
||||
|
||||
// IngressNGINXPod hold information about the ingress-nginx pod
|
||||
var IngressNGINXPod *apiv1.Pod
|
||||
|
||||
// PodInfo contains runtime information about the pod running the Ingres controller
|
||||
type PodInfo struct {
|
||||
Name string
|
||||
|
@ -84,25 +87,34 @@ type PodInfo struct {
|
|||
Labels map[string]string
|
||||
}
|
||||
|
||||
// GetPodDetails returns runtime information about the pod:
|
||||
// name, namespace and IP of the node where it is running
|
||||
func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) {
|
||||
// GetIngressPod load the ingress-nginx pod
|
||||
func GetIngressPod(kubeClient clientset.Interface) error {
|
||||
podName := os.Getenv("POD_NAME")
|
||||
podNs := os.Getenv("POD_NAMESPACE")
|
||||
|
||||
if podName == "" || podNs == "" {
|
||||
return nil, fmt.Errorf("unable to get POD information (missing POD_NAME or POD_NAMESPACE environment variable")
|
||||
return fmt.Errorf("unable to get POD information (missing POD_NAME or POD_NAMESPACE environment variable")
|
||||
}
|
||||
|
||||
pod, _ := kubeClient.CoreV1().Pods(podNs).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if pod == nil {
|
||||
return nil, fmt.Errorf("unable to get POD information")
|
||||
IngressNGINXPod, _ = kubeClient.CoreV1().Pods(podNs).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if IngressNGINXPod == nil {
|
||||
return fmt.Errorf("unable to get POD information")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPodDetails returns runtime information about the pod:
|
||||
// name, namespace and IP of the node where it is running
|
||||
func GetPodDetails() (*PodInfo, error) {
|
||||
if IngressNGINXPod == nil {
|
||||
return nil, fmt.Errorf("no ingress-nginx pod details available")
|
||||
}
|
||||
|
||||
return &PodInfo{
|
||||
Name: podName,
|
||||
Namespace: podNs,
|
||||
Labels: pod.GetLabels(),
|
||||
Name: IngressNGINXPod.Name,
|
||||
Namespace: IngressNGINXPod.Namespace,
|
||||
Labels: IngressNGINXPod.GetLabels(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -218,7 +218,7 @@ func TestGetPodDetails(t *testing.T) {
|
|||
// POD_NAME & POD_NAMESPACE not exist
|
||||
os.Setenv("POD_NAME", "")
|
||||
os.Setenv("POD_NAMESPACE", "")
|
||||
_, err1 := GetPodDetails(testclient.NewSimpleClientset())
|
||||
err1 := GetIngressPod(testclient.NewSimpleClientset())
|
||||
if err1 == nil {
|
||||
t.Errorf("expected an error but returned nil")
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ func TestGetPodDetails(t *testing.T) {
|
|||
// POD_NAME not exist
|
||||
os.Setenv("POD_NAME", "")
|
||||
os.Setenv("POD_NAMESPACE", apiv1.NamespaceDefault)
|
||||
_, err2 := GetPodDetails(testclient.NewSimpleClientset())
|
||||
err2 := GetIngressPod(testclient.NewSimpleClientset())
|
||||
if err2 == nil {
|
||||
t.Errorf("expected an error but returned nil")
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func TestGetPodDetails(t *testing.T) {
|
|||
// POD_NAMESPACE not exist
|
||||
os.Setenv("POD_NAME", "testpod")
|
||||
os.Setenv("POD_NAMESPACE", "")
|
||||
_, err3 := GetPodDetails(testclient.NewSimpleClientset())
|
||||
err3 := GetIngressPod(testclient.NewSimpleClientset())
|
||||
if err3 == nil {
|
||||
t.Errorf("expected an error but returned nil")
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ func TestGetPodDetails(t *testing.T) {
|
|||
// POD not exist
|
||||
os.Setenv("POD_NAME", "testpod")
|
||||
os.Setenv("POD_NAMESPACE", apiv1.NamespaceDefault)
|
||||
_, err4 := GetPodDetails(testclient.NewSimpleClientset())
|
||||
err4 := GetIngressPod(testclient.NewSimpleClientset())
|
||||
if err4 == nil {
|
||||
t.Errorf("expected an error but returned nil")
|
||||
}
|
||||
|
@ -273,13 +273,9 @@ func TestGetPodDetails(t *testing.T) {
|
|||
},
|
||||
}}})
|
||||
|
||||
epi, err5 := GetPodDetails(fkClient)
|
||||
err5 := GetIngressPod(fkClient)
|
||||
if err5 != nil {
|
||||
t.Errorf("expected a PodInfo but returned error")
|
||||
return
|
||||
}
|
||||
|
||||
if epi == nil {
|
||||
t.Errorf("expected a PodInfo but returned nil")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,7 @@ var _ = framework.IngressNginxDescribe("[Default Backend] custom service", func(
|
|||
ginkgo.It("uses custom default backend that returns 200 as status code", func() {
|
||||
f.NewEchoDeployment()
|
||||
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err := f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := deployment.Spec.Template.Spec.Containers[0].Args
|
||||
args = append(args, fmt.Sprintf("--default-backend-service=%v/%v", f.Namespace, framework.EchoService))
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||
|
|
|
@ -409,3 +409,13 @@ func (f *Framework) ScaleDeploymentToZero(name string) {
|
|||
err = WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, 0)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for no endpoints")
|
||||
}
|
||||
|
||||
// UpdateIngressControllerDeployment updates the ingress-nginx deployment
|
||||
func (f *Framework) UpdateIngressControllerDeployment(fn func(deployment *appsv1.Deployment) error) error {
|
||||
err := UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.updateIngressNGINXPod()
|
||||
}
|
||||
|
|
|
@ -55,12 +55,7 @@ func (f *Framework) GetLbAlgorithm(serviceName string, servicePort int) (string,
|
|||
|
||||
// ExecIngressPod executes a command inside the first container in ingress controller running pod
|
||||
func (f *Framework) ExecIngressPod(command string) (string, error) {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return f.ExecCommand(pod, command)
|
||||
return f.ExecCommand(f.pod, command)
|
||||
}
|
||||
|
||||
// ExecCommand executes a command inside a the first container in a running pod
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networking "k8s.io/api/networking/v1beta1"
|
||||
apiextcs "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
|
@ -39,9 +40,11 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/ingress-nginx/internal/k8s"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
kubeframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
|
@ -71,6 +74,8 @@ type Framework struct {
|
|||
APIExtensionsClientSet apiextcs.Interface
|
||||
|
||||
Namespace string
|
||||
|
||||
pod *corev1.Pod
|
||||
}
|
||||
|
||||
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
|
@ -108,6 +113,9 @@ func (f *Framework) BeforeEach() {
|
|||
err = f.newIngressController(f.Namespace, f.BaseName)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller")
|
||||
|
||||
err = f.updateIngressNGINXPod()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress controller pod information")
|
||||
|
||||
f.WaitForNginxListening(80)
|
||||
}
|
||||
|
||||
|
@ -125,14 +133,8 @@ func (f *Framework) AfterEach() {
|
|||
return
|
||||
}
|
||||
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
Logf("Unexpected error searching for ingress controller pod: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf")
|
||||
o, err := f.ExecCommand(pod, cmd)
|
||||
o, err := f.ExecCommand(f.pod, cmd)
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining nginx.conf file: %v", err)
|
||||
return
|
||||
|
@ -192,9 +194,7 @@ func (f *Framework) GetNginxIP() string {
|
|||
|
||||
// GetNginxPodIP returns the IP addresses of the running pods
|
||||
func (f *Framework) GetNginxPodIP() string {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX Pod")
|
||||
return pod.Status.PodIP
|
||||
return f.pod.Status.PodIP
|
||||
}
|
||||
|
||||
// GetURL returns the URL should be used to make a request to NGINX
|
||||
|
@ -203,6 +203,18 @@ func (f *Framework) GetURL(scheme RequestScheme) string {
|
|||
return fmt.Sprintf("%v://%v", scheme, ip)
|
||||
}
|
||||
|
||||
// GetIngressNGINXPod returns the ingress controller running pod
|
||||
func (f *Framework) GetIngressNGINXPod() *corev1.Pod {
|
||||
return f.pod
|
||||
}
|
||||
|
||||
// UpdateIngressNGINXPod search and updates the ingress controller running pod
|
||||
func (f *Framework) updateIngressNGINXPod() error {
|
||||
var err error
|
||||
f.pod, err = getIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForNginxServer waits until the nginx configuration contains a particular server section
|
||||
func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) {
|
||||
err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher))
|
||||
|
@ -223,31 +235,17 @@ func (f *Framework) WaitForNginxCustomConfiguration(from string, to string, matc
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
|
||||
}
|
||||
|
||||
func nginxLogs(client kubernetes.Interface, namespace string) (string, error) {
|
||||
pod, err := GetIngressNGINXPod(namespace, client)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if isRunning, err := podRunningReady(pod); err == nil && isRunning {
|
||||
return Logs(pod)
|
||||
// NginxLogs returns the logs of the nginx ingress controller pod running
|
||||
func (f *Framework) NginxLogs() (string, error) {
|
||||
if isRunning, err := podRunningReady(f.pod); err == nil && isRunning {
|
||||
return Logs(f.KubeClientSet, f.Namespace, f.pod.Name)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no nginx ingress controller pod is running (logs)")
|
||||
}
|
||||
|
||||
// NginxLogs returns the logs of the nginx ingress controller pod running
|
||||
func (f *Framework) NginxLogs() (string, error) {
|
||||
return nginxLogs(f.KubeClientSet, f.Namespace)
|
||||
}
|
||||
|
||||
func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) bool) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var cmd string
|
||||
if name == "" {
|
||||
cmd = fmt.Sprintf("cat /etc/nginx/nginx.conf")
|
||||
|
@ -255,7 +253,7 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b
|
|||
cmd = fmt.Sprintf("cat /etc/nginx/nginx.conf | awk '/## start server %v/,/## end server %v/'", name, name)
|
||||
}
|
||||
|
||||
o, err := f.ExecCommand(pod, cmd)
|
||||
o, err := f.ExecCommand(f.pod, cmd)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -275,14 +273,9 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b
|
|||
|
||||
func (f *Framework) matchNginxCustomConditions(from string, to string, matcher func(cfg string) bool) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf| awk '/%v/,/%v/'", from, to)
|
||||
|
||||
o, err := f.ExecCommand(pod, cmd)
|
||||
o, err := f.ExecCommand(f.pod, cmd)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -371,39 +364,37 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
|
|||
}
|
||||
|
||||
func (f *Framework) waitForReload(fn func()) {
|
||||
reloadCount := f.getReloadCount()
|
||||
reloadCount := getReloadCount(f.pod, f.Namespace, f.KubeClientSet)
|
||||
|
||||
fn()
|
||||
|
||||
var count int
|
||||
err := wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
count := 0
|
||||
err := wait.Poll(2*time.Second, DefaultTimeout, func() (bool, error) {
|
||||
// most of the cases reload the ingress controller
|
||||
// in cases where the value is not modified we could wait forever
|
||||
if count > 10 {
|
||||
if count > 3 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
count++
|
||||
|
||||
return (f.getReloadCount() > reloadCount), nil
|
||||
return (getReloadCount(f.pod, f.Namespace, f.KubeClientSet) > reloadCount), nil
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "while waiting for ingress controller reload")
|
||||
}
|
||||
|
||||
func (f *Framework) getReloadCount() int {
|
||||
ip := f.GetNginxPodIP()
|
||||
func getReloadCount(pod *corev1.Pod, namespace string, client kubernetes.Interface) int {
|
||||
evnts, err := client.CoreV1().Events(namespace).Search(scheme.Scheme, pod)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX Pod")
|
||||
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
if err != nil {
|
||||
return 0
|
||||
reloadCount := 0
|
||||
for _, e := range evnts.Items {
|
||||
if e.Reason == "RELOAD" && e.Type == core.EventTypeNormal {
|
||||
reloadCount++
|
||||
}
|
||||
}
|
||||
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
rc0, err := extractReloadCount(mf)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
return int(rc0)
|
||||
return reloadCount
|
||||
}
|
||||
|
||||
func extractReloadCount(mf *dto.MetricFamily) (float64, error) {
|
||||
|
@ -422,18 +413,16 @@ func extractReloadCount(mf *dto.MetricFamily) (float64, error) {
|
|||
// Grace period to wait for pod shutdown is in seconds.
|
||||
func (f *Framework) DeleteNGINXPod(grace int64) {
|
||||
ns := f.Namespace
|
||||
pod, err := GetIngressNGINXPod(ns, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "expected ingress nginx pod to be running")
|
||||
|
||||
err = f.KubeClientSet.CoreV1().Pods(ns).Delete(context.TODO(), pod.GetName(), *metav1.NewDeleteOptions(grace))
|
||||
err := f.KubeClientSet.CoreV1().Pods(ns).Delete(context.TODO(), f.pod.GetName(), *metav1.NewDeleteOptions(grace))
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting ingress nginx pod")
|
||||
|
||||
err = wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
pod, err := GetIngressNGINXPod(ns, f.KubeClientSet)
|
||||
if err != nil || pod == nil {
|
||||
err := f.updateIngressNGINXPod()
|
||||
if err != nil || f.pod == nil {
|
||||
return false, nil
|
||||
}
|
||||
return pod.GetName() != "", nil
|
||||
return f.pod.GetName() != "", nil
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "while waiting for ingress nginx pod to come up again")
|
||||
}
|
||||
|
|
|
@ -217,10 +217,10 @@ func podRunningReady(p *core.Pod) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// GetIngressNGINXPod returns the ingress controller running pod
|
||||
func GetIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Pod, error) {
|
||||
// getIngressNGINXPod returns the ingress controller running pod
|
||||
func getIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Pod, error) {
|
||||
var pod *core.Pod
|
||||
err := wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
err := wait.PollImmediate(Poll, DefaultTimeout, func() (bool, error) {
|
||||
l, err := kubeClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
|
|
|
@ -17,36 +17,23 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// Logs returns the log entries of a given Pod.
|
||||
func Logs(pod *corev1.Pod) (string, error) {
|
||||
var (
|
||||
execOut bytes.Buffer
|
||||
execErr bytes.Buffer
|
||||
)
|
||||
|
||||
if len(pod.Spec.Containers) != 1 {
|
||||
return "", fmt.Errorf("could not determine which container to use")
|
||||
}
|
||||
|
||||
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%v logs --namespace %s %s", KubectlPath, pod.Namespace, pod.Name))
|
||||
cmd.Stdout = &execOut
|
||||
cmd.Stderr = &execErr
|
||||
|
||||
err := cmd.Run()
|
||||
func Logs(client kubernetes.Interface, namespace, podName string) (string, error) {
|
||||
logs, err := client.CoreV1().RESTClient().Get().
|
||||
Resource("pods").
|
||||
Namespace(namespace).
|
||||
Name(podName).SubResource("log").
|
||||
Param("container", "controller").
|
||||
Do(context.TODO()).
|
||||
Raw()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not execute '%s %s': %v", cmd.Path, cmd.Args, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if execErr.Len() > 0 {
|
||||
return "", fmt.Errorf("stderr: %v", execErr.String())
|
||||
}
|
||||
|
||||
return execOut.String(), nil
|
||||
return string(logs), nil
|
||||
}
|
||||
|
|
|
@ -64,8 +64,7 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
ginkgo.It("should shutdown after waiting 60 seconds for pending connections to be closed", func(done ginkgo.Done) {
|
||||
defer close(done)
|
||||
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err := f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
grace := int64(3600)
|
||||
deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &grace
|
||||
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
|
@ -110,8 +109,7 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
ginkgo.It("should shutdown after waiting 150 seconds for pending connections to be closed", func(done ginkgo.Done) {
|
||||
defer close(done)
|
||||
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err := f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
grace := int64(3600)
|
||||
deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &grace
|
||||
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
|
|
|
@ -207,7 +207,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.Equal(ginkgo.GinkgoT(), output, `{"controllerPodsCount":1}`)
|
||||
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil)
|
||||
err = f.UpdateIngressControllerDeployment(nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
output, err = f.ExecIngressPod(curlCmd)
|
||||
|
|
|
@ -47,8 +47,7 @@ var _ = framework.IngressNginxDescribe("[SSL] [Flag] default-ssl-certificate", f
|
|||
f.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err = f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := deployment.Spec.Template.Spec.Containers[0].Args
|
||||
args = append(args, "--default-ssl-certificate=$(POD_NAMESPACE)/"+secretName)
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||
|
|
|
@ -37,8 +37,7 @@ var _ = framework.IngressNginxDescribe("[Flag] disable-catch-all", func() {
|
|||
ginkgo.BeforeEach(func() {
|
||||
f.NewEchoDeploymentWithReplicas(1)
|
||||
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err := f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := deployment.Spec.Template.Spec.Containers[0].Args
|
||||
args = append(args, "--disable-catch-all=true")
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||
|
|
|
@ -100,8 +100,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
|
||||
ginkgo.Context("With a specific ingress-class", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err := f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := []string{}
|
||||
for _, v := range deployment.Spec.Template.Spec.Containers[0].Args {
|
||||
if strings.Contains(v, "--ingress-class") {
|
||||
|
@ -214,8 +213,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "creating IngressClass")
|
||||
}
|
||||
|
||||
pod, err := framework.GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "searching ingress controller pod")
|
||||
pod := f.GetIngressNGINXPod()
|
||||
serviceAccount := pod.Spec.ServiceAccountName
|
||||
|
||||
crb, err := f.KubeClientSet.RbacV1().ClusterRoleBindings().Get(context.Background(), "ingress-nginx-class", metav1.GetOptions{})
|
||||
|
@ -232,8 +230,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
_, err = f.KubeClientSet.RbacV1().ClusterRoleBindings().Update(context.Background(), crb, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "searching cluster role binding")
|
||||
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err = f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := []string{}
|
||||
for _, v := range deployment.Spec.Template.Spec.Containers[0].Args {
|
||||
if strings.Contains(v, "--ingress-class") {
|
||||
|
|
|
@ -62,8 +62,7 @@ var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies", func(
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress controller cluster role to use a pod security policy")
|
||||
|
||||
// update the deployment just to trigger a rolling update and the use of the security policy
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err = f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := deployment.Spec.Template.Spec.Containers[0].Args
|
||||
args = append(args, "--v=2")
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||
|
|
|
@ -57,8 +57,7 @@ var _ = framework.IngressNginxDescribe("[Security] Pod Security Policies with vo
|
|||
_, err = f.KubeClientSet.RbacV1().Roles(f.Namespace).Update(context.TODO(), role, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress controller cluster role to use a pod security policy")
|
||||
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err = f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := deployment.Spec.Template.Spec.Containers[0].Args
|
||||
args = append(args, "--v=2")
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||
|
|
|
@ -44,8 +44,7 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() {
|
|||
port, cmd, err := f.KubectlProxy(0)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error starting kubectl proxy")
|
||||
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
err = f.UpdateIngressControllerDeployment(func(deployment *appsv1.Deployment) error {
|
||||
args := []string{}
|
||||
// flags --publish-service and --publish-status-address are mutually exclusive
|
||||
|
||||
|
|
Loading…
Reference in a new issue