This commit is contained in:
Timofey Titovets 2025-02-17 09:50:35 -08:00 committed by GitHub
commit 55693ce10c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 88 additions and 14 deletions

View file

@ -173,6 +173,30 @@ func nameOrIPToLoadBalancerIngress(nameOrIP string) v1.IngressLoadBalancerIngres
return v1.IngressLoadBalancerIngress{Hostname: nameOrIP} return v1.IngressLoadBalancerIngress{Hostname: nameOrIP}
} }
func filterLabels(toFilter map[string]string) map[string]string {
// As a standard, app.kubernetes.io are "reserved well-known" labels.
// In our case, we add those labels as identifiers of the Ingress
// deployment in this namespace, so we can select it as a set of Ingress instances.
// As those labels are also generated as part of a HELM deployment, we can be "safe" they
// cover 95% of the cases
podLabels := make(map[string]string)
for k, v := range toFilter {
switch k {
case "pod-template-hash":
case "controller-revision-hash":
case "pod-template-generation":
// Handle helm upgrade
case "app.kubernetes.io/version":
case "helm.sh/chart":
default:
podLabels[k] = v
}
}
return podLabels
}
// runningAddresses returns a list of IP addresses and/or FQDN where the // runningAddresses returns a list of IP addresses and/or FQDN where the
// ingress controller is currently running // ingress controller is currently running
func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error) { func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error) {
@ -190,9 +214,11 @@ func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error)
return statusAddressFromService(s.PublishService, s.Client) return statusAddressFromService(s.PublishService, s.Client)
} }
podLabels := filterLabels(k8s.IngressPodDetails.Labels)
// get information about all the pods running the ingress controller // get information about all the pods running the ingress controller
pods, err := s.Client.CoreV1().Pods(k8s.IngressPodDetails.Namespace).List(context.TODO(), metav1.ListOptions{ pods, err := s.Client.CoreV1().Pods(k8s.IngressPodDetails.Namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(k8s.IngressPodDetails.Labels).String(), LabelSelector: labels.SelectorFromSet(podLabels).String(),
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -220,6 +246,11 @@ func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error)
continue continue
} }
if pod.GetDeletionTimestamp() != nil {
klog.InfoS("POD is terminating", "pod", klog.KObj(&pod), "node", pod.Spec.NodeName)
continue
}
name := k8s.GetNodeIPOrName(s.Client, pod.Spec.NodeName, s.UseNodeInternalIP) name := k8s.GetNodeIPOrName(s.Client, pod.Spec.NodeName, s.UseNodeInternalIP)
if !stringInIngresses(name, addrs) { if !stringInIngresses(name, addrs) {
addrs = append(addrs, nameOrIPToLoadBalancerIngress(name)) addrs = append(addrs, nameOrIPToLoadBalancerIngress(name))
@ -230,26 +261,27 @@ func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error)
} }
func (s *statusSync) isRunningMultiplePods() bool { func (s *statusSync) isRunningMultiplePods() bool {
// As a standard, app.kubernetes.io are "reserved well-known" labels. podLabels := filterLabels(k8s.IngressPodDetails.Labels)
// In our case, we add those labels as identifiers of the Ingress
// deployment in this namespace, so we can select it as a set of Ingress instances.
// As those labels are also generated as part of a HELM deployment, we can be "safe" they
// cover 95% of the cases
podLabel := make(map[string]string)
for k, v := range k8s.IngressPodDetails.Labels {
if k != "pod-template-hash" && k != "controller-revision-hash" && k != "pod-template-generation" {
podLabel[k] = v
}
}
pods, err := s.Client.CoreV1().Pods(k8s.IngressPodDetails.Namespace).List(context.TODO(), metav1.ListOptions{ pods, err := s.Client.CoreV1().Pods(k8s.IngressPodDetails.Namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(podLabel).String(), LabelSelector: labels.SelectorFromSet(podLabels).String(),
}) })
if err != nil { if err != nil {
return false return false
} }
return len(pods.Items) > 1 runningPods := make([]apiv1.Pod, 0)
for i := range pods.Items {
pod := pods.Items[i]
if pod.GetDeletionTimestamp() != nil {
klog.InfoS("POD is terminating", "pod", klog.KObj(&pod), "node", pod.Spec.NodeName)
continue
}
runningPods = append(runningPods, pod)
}
return len(runningPods) > 1
} }
// standardizeLoadBalancerIngresses sorts the list of loadbalancer by // standardizeLoadBalancerIngresses sorts the list of loadbalancer by

View file

@ -101,6 +101,31 @@ func buildSimpleClientSet() *testclient.Clientset {
}, },
}, },
}, },
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_terminating",
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{
"label_sig": "foo_pod",
"app.kubernetes.io/version": "x.x.x",
"pod-template-hash": "hash-value",
"controller-revision-hash": "deadbeef",
},
DeletionTimestamp: &metav1.Time{},
},
Spec: apiv1.PodSpec{
NodeName: "foo_node_3",
},
Status: apiv1.PodStatus{
Phase: apiv1.PodRunning,
Conditions: []apiv1.PodCondition{
{
Type: apiv1.PodReady,
Status: apiv1.ConditionTrue,
},
},
},
},
{ {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "foo3", Name: "foo3",
@ -168,6 +193,23 @@ func buildSimpleClientSet() *testclient.Clientset {
}, },
}, },
}, },
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_node_3",
},
Status: apiv1.NodeStatus{
Addresses: []apiv1.NodeAddress{
{
Type: apiv1.NodeInternalIP,
Address: "12.0.0.1",
},
{
Type: apiv1.NodeExternalIP,
Address: "12.0.0.2",
},
},
},
},
}}, }},
&apiv1.EndpointsList{Items: []apiv1.Endpoints{ &apiv1.EndpointsList{Items: []apiv1.Endpoints{
{ {