fix: status.go - use filtered labels for pod discovery
Co-authored-by: Marco Ebert <marco_ebert@icloud.com> Signed-off-by: Timofey Titovets <nefelim4ag@gmail.com>
This commit is contained in:
parent
a26733760d
commit
c4766f7660
1 changed files with 29 additions and 13 deletions
|
@ -173,6 +173,30 @@ func nameOrIPToLoadBalancerIngress(nameOrIP string) v1.IngressLoadBalancerIngres
|
|||
return v1.IngressLoadBalancerIngress{Hostname: nameOrIP}
|
||||
}
|
||||
|
||||
func filterLabels(toFilter map[string]string) map[string]string {
|
||||
// As a standard, app.kubernetes.io are "reserved well-known" labels.
|
||||
// In our case, we add those labels as identifiers of the Ingress
|
||||
// deployment in this namespace, so we can select it as a set of Ingress instances.
|
||||
// As those labels are also generated as part of a HELM deployment, we can be "safe" they
|
||||
// cover 95% of the cases
|
||||
|
||||
podLabels := make(map[string]string)
|
||||
for k, v := range toFilter {
|
||||
switch k {
|
||||
case "pod-template-hash":
|
||||
case "controller-revision-hash":
|
||||
case "pod-template-generation":
|
||||
// Handle helm upgrade
|
||||
case "app.kubernetes.io/version":
|
||||
case "helm.sh/chart":
|
||||
default:
|
||||
podLabels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return podLabels
|
||||
}
|
||||
|
||||
// runningAddresses returns a list of IP addresses and/or FQDN where the
|
||||
// ingress controller is currently running
|
||||
func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error) {
|
||||
|
@ -190,9 +214,11 @@ func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error)
|
|||
return statusAddressFromService(s.PublishService, s.Client)
|
||||
}
|
||||
|
||||
podLabels := filterLabels(k8s.IngressPodDetails.Labels)
|
||||
|
||||
// get information about all the pods running the ingress controller
|
||||
pods, err := s.Client.CoreV1().Pods(k8s.IngressPodDetails.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(k8s.IngressPodDetails.Labels).String(),
|
||||
LabelSelector: labels.SelectorFromSet(podLabels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -235,20 +261,10 @@ func (s *statusSync) runningAddresses() ([]v1.IngressLoadBalancerIngress, error)
|
|||
}
|
||||
|
||||
func (s *statusSync) isRunningMultiplePods() bool {
|
||||
// As a standard, app.kubernetes.io are "reserved well-known" labels.
|
||||
// In our case, we add those labels as identifiers of the Ingress
|
||||
// deployment in this namespace, so we can select it as a set of Ingress instances.
|
||||
// As those labels are also generated as part of a HELM deployment, we can be "safe" they
|
||||
// cover 95% of the cases
|
||||
podLabel := make(map[string]string)
|
||||
for k, v := range k8s.IngressPodDetails.Labels {
|
||||
if k != "pod-template-hash" && k != "controller-revision-hash" && k != "pod-template-generation" {
|
||||
podLabel[k] = v
|
||||
}
|
||||
}
|
||||
podLabels := filterLabels(k8s.IngressPodDetails.Labels)
|
||||
|
||||
pods, err := s.Client.CoreV1().Pods(k8s.IngressPodDetails.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(podLabel).String(),
|
||||
LabelSelector: labels.SelectorFromSet(podLabels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
|
|
Loading…
Reference in a new issue