ingress-nginx-helm/controllers/gce/controller/controller.go

522 lines
18 KiB
Go
Raw Normal View History

2016-02-22 00:13:08 +00:00
/*
Copyright 2015 The Kubernetes Authors.
2016-02-22 00:13:08 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"reflect"
"sync"
"time"
2017-04-01 14:38:58 +00:00
"github.com/golang/glog"
2016-11-10 23:31:49 +00:00
2017-08-18 23:24:56 +00:00
apiv1 "k8s.io/api/core/v1"
2017-07-16 19:30:20 +00:00
extensions "k8s.io/api/extensions/v1beta1"
2017-04-01 14:38:58 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2017-08-18 23:24:56 +00:00
informerv1 "k8s.io/client-go/informers/core/v1"
informerv1beta1 "k8s.io/client-go/informers/extensions/v1beta1"
2017-04-01 14:38:58 +00:00
"k8s.io/client-go/kubernetes"
2017-05-26 21:43:35 +00:00
scheme "k8s.io/client-go/kubernetes/scheme"
2017-04-01 14:38:58 +00:00
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
2016-02-22 00:13:08 +00:00
2017-04-01 14:38:58 +00:00
"k8s.io/ingress/controllers/gce/loadbalancers"
2016-02-22 00:13:08 +00:00
)
var (
2016-11-10 23:31:49 +00:00
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
2016-02-22 00:13:08 +00:00
// DefaultClusterUID is the uid to use for clusters resources created by an
// L7 controller created without specifying the --cluster-uid flag.
DefaultClusterUID = ""
2016-05-29 05:02:39 +00:00
// DefaultFirewallName is the name to user for firewall rules created
// by an L7 controller when the --fireall-rule is not used.
DefaultFirewallName = ""
2016-05-29 05:02:39 +00:00
// Frequency to poll on local stores to sync.
storeSyncPollPeriod = 5 * time.Second
2016-02-22 00:13:08 +00:00
)
2017-08-18 23:24:56 +00:00
// ControllerContext holds
type ControllerContext struct {
IngressInformer cache.SharedIndexInformer
ServiceInformer cache.SharedIndexInformer
PodInformer cache.SharedIndexInformer
NodeInformer cache.SharedIndexInformer
// Stop is the stop channel shared among controllers
StopCh chan struct{}
}
func NewControllerContext(kubeClient kubernetes.Interface, namespace string, resyncPeriod time.Duration) *ControllerContext {
return &ControllerContext{
IngressInformer: informerv1beta1.NewIngressInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
ServiceInformer: informerv1.NewServiceInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
PodInformer: informerv1.NewPodInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
NodeInformer: informerv1.NewNodeInformer(kubeClient, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
StopCh: make(chan struct{}),
}
}
func (ctx *ControllerContext) Start() {
go ctx.IngressInformer.Run(ctx.StopCh)
go ctx.ServiceInformer.Run(ctx.StopCh)
go ctx.PodInformer.Run(ctx.StopCh)
go ctx.NodeInformer.Run(ctx.StopCh)
}
2016-02-22 00:13:08 +00:00
// LoadBalancerController watches the kubernetes api and adds/removes services
// from the loadbalancer, via loadBalancerConfig.
type LoadBalancerController struct {
2017-08-18 23:24:56 +00:00
client kubernetes.Interface
ingressSynced cache.InformerSynced
serviceSynced cache.InformerSynced
podSynced cache.InformerSynced
nodeSynced cache.InformerSynced
ingLister StoreToIngressLister
nodeLister StoreToNodeLister
svcLister StoreToServiceLister
2016-05-29 05:02:39 +00:00
// Health checks are the readiness probes of containers on pods.
2017-04-01 14:38:58 +00:00
podLister StoreToPodLister
2016-05-29 05:02:39 +00:00
// TODO: Watch secrets
2016-02-22 00:13:08 +00:00
CloudClusterManager *ClusterManager
recorder record.EventRecorder
nodeQueue *taskQueue
ingQueue *taskQueue
tr *GCETranslator
stopCh chan struct{}
// stopLock is used to enforce only a single call to Stop is active.
// Needed because we allow stopping through an http endpoint and
// allowing concurrent stoppers leads to stack traces.
stopLock sync.Mutex
shutdown bool
// tlsLoader loads secrets from the Kubernetes apiserver for Ingresses.
tlsLoader tlsLoader
2016-05-29 05:02:39 +00:00
// hasSynced returns true if all associated sub-controllers have synced.
// Abstracted into a func for testing.
hasSynced func() bool
2016-02-22 00:13:08 +00:00
}
// NewLoadBalancerController creates a controller for gce loadbalancers.
// - kubeClient: A kubernetes REST client.
// - clusterManager: A ClusterManager capable of creating all cloud resources
// required for L7 loadbalancing.
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
2017-08-18 23:24:56 +00:00
func NewLoadBalancerController(kubeClient kubernetes.Interface, ctx *ControllerContext, clusterManager *ClusterManager) (*LoadBalancerController, error) {
2016-02-22 00:13:08 +00:00
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
2016-11-29 01:39:32 +00:00
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
2016-11-10 23:31:49 +00:00
Interface: kubeClient.Core().Events(""),
})
2016-02-22 00:13:08 +00:00
lbc := LoadBalancerController{
client: kubeClient,
CloudClusterManager: clusterManager,
2017-08-18 23:24:56 +00:00
stopCh: ctx.StopCh,
2017-05-26 21:43:35 +00:00
recorder: eventBroadcaster.NewRecorder(scheme.Scheme,
2017-08-18 23:24:56 +00:00
apiv1.EventSource{Component: "loadbalancer-controller"}),
2016-02-22 00:13:08 +00:00
}
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
lbc.ingQueue = NewTaskQueue(lbc.sync)
2016-05-29 05:02:39 +00:00
lbc.hasSynced = lbc.storesSynced
2016-02-22 00:13:08 +00:00
2017-08-18 23:24:56 +00:00
lbc.ingressSynced = ctx.IngressInformer.HasSynced
lbc.serviceSynced = ctx.ServiceInformer.HasSynced
lbc.podSynced = ctx.PodInformer.HasSynced
lbc.nodeSynced = ctx.NodeInformer.HasSynced
lbc.ingLister.Store = ctx.IngressInformer.GetStore()
lbc.svcLister.Indexer = ctx.ServiceInformer.GetIndexer()
lbc.podLister.Indexer = ctx.PodInformer.GetIndexer()
lbc.nodeLister.Indexer = ctx.NodeInformer.GetIndexer()
// ingress event handler
ctx.IngressInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
2016-02-22 00:13:08 +00:00
AddFunc: func(obj interface{}) {
addIng := obj.(*extensions.Ingress)
if !isGCEIngress(addIng) && !isGCEMultiClusterIngress(addIng) {
glog.Infof("Ignoring add for ingress %v based on annotation %v", addIng.Name, ingressClassKey)
return
}
2017-08-18 23:24:56 +00:00
lbc.recorder.Eventf(addIng, apiv1.EventTypeNormal, "ADD", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name))
2016-02-22 00:13:08 +00:00
lbc.ingQueue.enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
delIng := obj.(*extensions.Ingress)
if !isGCEIngress(delIng) && !isGCEMultiClusterIngress(delIng) {
glog.Infof("Ignoring delete for ingress %v based on annotation %v", delIng.Name, ingressClassKey)
return
}
glog.Infof("Delete notification received for Ingress %v/%v", delIng.Namespace, delIng.Name)
lbc.ingQueue.enqueue(obj)
},
2016-02-22 00:13:08 +00:00
UpdateFunc: func(old, cur interface{}) {
curIng := cur.(*extensions.Ingress)
if !isGCEIngress(curIng) && !isGCEMultiClusterIngress(curIng) {
return
}
2016-02-22 00:13:08 +00:00
if !reflect.DeepEqual(old, cur) {
glog.V(3).Infof("Ingress %v changed, syncing", curIng.Name)
2016-02-22 00:13:08 +00:00
}
lbc.ingQueue.enqueue(cur)
},
2017-08-18 23:24:56 +00:00
})
2016-02-22 00:13:08 +00:00
2017-08-18 23:24:56 +00:00
// service event handler
ctx.ServiceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
2016-02-22 00:13:08 +00:00
AddFunc: lbc.enqueueIngressForService,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
lbc.enqueueIngressForService(cur)
}
},
// Ingress deletes matter, service deletes don't.
2017-08-18 23:24:56 +00:00
})
2016-02-22 00:13:08 +00:00
2017-08-18 23:24:56 +00:00
// node event handler
ctx.NodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
2017-07-27 01:46:52 +00:00
AddFunc: lbc.nodeQueue.enqueue,
DeleteFunc: lbc.nodeQueue.enqueue,
// Nodes are updated every 10s and we don't care, so no update handler.
2017-08-18 23:24:56 +00:00
})
2016-02-22 00:13:08 +00:00
lbc.tr = &GCETranslator{&lbc}
lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client}
2016-02-22 00:13:08 +00:00
glog.V(3).Infof("Created new loadbalancer controller")
return &lbc, nil
}
// enqueueIngressForService enqueues all the Ingress' for a Service.
func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) {
2017-08-18 23:24:56 +00:00
svc := obj.(*apiv1.Service)
2016-02-22 00:13:08 +00:00
ings, err := lbc.ingLister.GetServiceIngress(svc)
if err != nil {
glog.V(5).Infof("ignoring service %v: %v", svc.Name, err)
return
}
for _, ing := range ings {
if !isGCEIngress(&ing) {
continue
}
2016-02-22 00:13:08 +00:00
lbc.ingQueue.enqueue(&ing)
}
}
// Run starts the loadbalancer controller.
func (lbc *LoadBalancerController) Run() {
glog.Infof("Starting loadbalancer controller")
go lbc.ingQueue.run(time.Second, lbc.stopCh)
go lbc.nodeQueue.run(time.Second, lbc.stopCh)
<-lbc.stopCh
glog.Infof("Shutting down Loadbalancer Controller")
}
// Stop stops the loadbalancer controller. It also deletes cluster resources
// if deleteAll is true.
func (lbc *LoadBalancerController) Stop(deleteAll bool) error {
// Stop is invoked from the http endpoint.
lbc.stopLock.Lock()
defer lbc.stopLock.Unlock()
// Only try draining the workqueue if we haven't already.
if !lbc.shutdown {
close(lbc.stopCh)
glog.Infof("Shutting down controller queues.")
lbc.ingQueue.shutdown()
lbc.nodeQueue.shutdown()
lbc.shutdown = true
}
// Deleting shared cluster resources is idempotent.
if deleteAll {
glog.Infof("Shutting down cluster manager.")
return lbc.CloudClusterManager.shutdown()
}
return nil
}
2016-05-29 05:02:39 +00:00
// storesSynced returns true if all the sub-controllers have finished their
// first sync with apiserver.
func (lbc *LoadBalancerController) storesSynced() bool {
return (
// wait for pods to sync so we don't allocate a default health check when
// an endpoint has a readiness probe.
2017-08-18 23:24:56 +00:00
lbc.podSynced() &&
2016-05-29 05:02:39 +00:00
// wait for services so we don't thrash on backend creation.
2017-08-18 23:24:56 +00:00
lbc.serviceSynced() &&
2016-05-29 05:02:39 +00:00
// wait for nodes so we don't disconnect a backend from an instance
// group just because we don't realize there are nodes in that zone.
2017-08-18 23:24:56 +00:00
lbc.nodeSynced() &&
2016-05-29 05:02:39 +00:00
// Wait for ingresses as a safety measure. We don't really need this.
2017-08-18 23:24:56 +00:00
lbc.ingressSynced())
2016-05-29 05:02:39 +00:00
}
2016-02-22 00:13:08 +00:00
// sync manages Ingress create/updates/deletes.
2016-06-28 03:06:35 +00:00
func (lbc *LoadBalancerController) sync(key string) (err error) {
2016-05-29 05:02:39 +00:00
if !lbc.hasSynced() {
time.Sleep(storeSyncPollPeriod)
return fmt.Errorf("waiting for stores to sync")
2016-05-29 05:02:39 +00:00
}
2016-02-22 00:13:08 +00:00
glog.V(3).Infof("Syncing %v", key)
2017-01-04 21:13:02 +00:00
ingresses, err := lbc.ingLister.List()
2016-02-22 00:13:08 +00:00
if err != nil {
2016-06-28 03:06:35 +00:00
return err
2016-02-22 00:13:08 +00:00
}
2017-01-04 21:13:02 +00:00
nodePorts := lbc.tr.toNodePorts(&ingresses)
2016-02-22 00:13:08 +00:00
lbNames := lbc.ingLister.Store.ListKeys()
lbs, err := lbc.ListRuntimeInfo()
if err != nil {
return err
}
2016-02-22 00:13:08 +00:00
nodeNames, err := lbc.getReadyNodeNames()
if err != nil {
2016-06-28 03:06:35 +00:00
return err
2016-02-22 00:13:08 +00:00
}
obj, ingExists, err := lbc.ingLister.Store.GetByKey(key)
if err != nil {
2016-06-28 03:06:35 +00:00
return err
2016-02-22 00:13:08 +00:00
}
// This performs a 2 phase checkpoint with the cloud:
// * Phase 1 creates/verifies resources are as expected. At the end of a
// successful checkpoint we know that existing L7s are WAI, and the L7
// for the Ingress associated with "key" is ready for a UrlMap update.
// If this encounters an error, eg for quota reasons, we want to invoke
// Phase 2 right away and retry checkpointing.
// * Phase 2 performs GC by refcounting shared resources. This needs to
// happen periodically whether or not stage 1 fails. At the end of a
// successful GC we know that there are no dangling cloud resources that
// don't have an associated Kubernetes Ingress/Service/Endpoint.
var syncError error
2016-02-22 00:13:08 +00:00
defer func() {
2016-06-28 03:06:35 +00:00
if deferErr := lbc.CloudClusterManager.GC(lbNames, nodePorts); deferErr != nil {
err = fmt.Errorf("error during sync %v, error during GC %v", syncError, deferErr)
2016-02-22 00:13:08 +00:00
}
glog.V(3).Infof("Finished syncing %v", key)
}()
if ingExists {
ing := obj.(*extensions.Ingress)
if isGCEMultiClusterIngress(ing) {
return lbc.syncMultiClusterIngress(ing, nodeNames)
}
}
2016-03-26 21:15:07 +00:00
// Record any errors during sync and throw a single error at the end. This
// allows us to free up associated cloud resources ASAP.
2016-02-22 00:13:08 +00:00
if err := lbc.CloudClusterManager.Checkpoint(lbs, nodeNames, nodePorts); err != nil {
// TODO: Implement proper backoff for the queue.
eventMsg := "GCE"
if ingExists {
2017-08-18 23:24:56 +00:00
lbc.recorder.Eventf(obj.(*extensions.Ingress), apiv1.EventTypeWarning, eventMsg, err.Error())
2016-02-22 00:13:08 +00:00
} else {
err = fmt.Errorf("%v, error: %v", eventMsg, err)
2016-02-22 00:13:08 +00:00
}
2016-03-26 21:15:07 +00:00
syncError = err
2016-02-22 00:13:08 +00:00
}
if !ingExists {
2016-06-28 03:06:35 +00:00
return syncError
2016-02-22 00:13:08 +00:00
}
// Update the UrlMap of the single loadbalancer that came through the watch.
l7, err := lbc.CloudClusterManager.l7Pool.Get(key)
if err != nil {
syncError = fmt.Errorf("%v, unable to get loadbalancer: %v", syncError, err)
return syncError
2016-02-22 00:13:08 +00:00
}
ing := *obj.(*extensions.Ingress)
2016-11-11 01:45:20 +00:00
if urlMap, err := lbc.tr.toURLMap(&ing); err != nil {
2016-03-26 21:15:07 +00:00
syncError = fmt.Errorf("%v, convert to url map error %v", syncError, err)
2016-02-22 00:13:08 +00:00
} else if err := l7.UpdateUrlMap(urlMap); err != nil {
2017-08-18 23:24:56 +00:00
lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "UrlMap", err.Error())
2016-03-26 21:15:07 +00:00
syncError = fmt.Errorf("%v, update url map error: %v", syncError, err)
2016-03-15 00:33:12 +00:00
} else if err := lbc.updateIngressStatus(l7, ing); err != nil {
2017-08-18 23:24:56 +00:00
lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "Status", err.Error())
2016-03-26 21:15:07 +00:00
syncError = fmt.Errorf("%v, update ingress error: %v", syncError, err)
}
2016-06-28 03:06:35 +00:00
return syncError
2016-02-22 00:13:08 +00:00
}
func (lbc *LoadBalancerController) syncMultiClusterIngress(ing *extensions.Ingress, nodeNames []string) error {
// For multi cluster ingress, we only need to manage the instance groups and named ports on those instance groups.
// Ensure that all the required instance groups exist with the required node ports.
nodePorts := lbc.tr.ingressToNodePorts(ing)
// Add the default backend node port.
nodePorts = append(nodePorts, lbc.CloudClusterManager.defaultBackendNodePort)
igs, err := lbc.CloudClusterManager.CreateInstanceGroups(nodePorts)
if err != nil {
return err
}
// Ensure that instance groups have the right nodes.
// This is also done whenever a node is added or removed from the cluster.
// We need it here as well since instance group is not created until first ingress is observed.
if err := lbc.CloudClusterManager.SyncNodesInInstanceGroups(nodeNames); err != nil {
return err
}
// Add instance group names as annotation on the ingress.
if ing.Annotations == nil {
ing.Annotations = map[string]string{}
}
err = setInstanceGroupsAnnotation(ing.Annotations, igs)
if err != nil {
return err
}
if err := lbc.updateAnnotations(ing.Name, ing.Namespace, ing.Annotations); err != nil {
return err
}
return nil
}
2016-02-22 00:13:08 +00:00
// updateIngressStatus updates the IP and annotations of a loadbalancer.
// The annotations are parsed by kubectl describe.
func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing extensions.Ingress) error {
2016-11-10 23:31:49 +00:00
ingClient := lbc.client.Extensions().Ingresses(ing.Namespace)
2016-02-22 00:13:08 +00:00
// Update IP through update/status endpoint
ip := l7.GetIP()
2017-04-01 14:38:58 +00:00
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
2016-02-22 00:13:08 +00:00
if err != nil {
return err
}
currIng.Status = extensions.IngressStatus{
2017-08-18 23:24:56 +00:00
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: []apiv1.LoadBalancerIngress{
2016-02-22 00:13:08 +00:00
{IP: ip},
},
},
}
if ip != "" {
lbIPs := ing.Status.LoadBalancer.Ingress
if len(lbIPs) == 0 || lbIPs[0].IP != ip {
// TODO: If this update fails it's probably resource version related,
// which means it's advantageous to retry right away vs requeuing.
glog.Infof("Updating loadbalancer %v/%v with IP %v", ing.Namespace, ing.Name, ip)
if _, err := ingClient.UpdateStatus(currIng); err != nil {
return err
}
2017-08-18 23:24:56 +00:00
lbc.recorder.Eventf(currIng, apiv1.EventTypeNormal, "CREATE", "ip: %v", ip)
2016-02-22 00:13:08 +00:00
}
}
annotations := loadbalancers.GetLBAnnotations(l7, currIng.Annotations, lbc.CloudClusterManager.backendPool)
if err := lbc.updateAnnotations(ing.Name, ing.Namespace, annotations); err != nil {
return err
}
return nil
}
func (lbc *LoadBalancerController) updateAnnotations(name, namespace string, annotations map[string]string) error {
2016-02-22 00:13:08 +00:00
// Update annotations through /update endpoint
ingClient := lbc.client.Extensions().Ingresses(namespace)
currIng, err := ingClient.Get(name, metav1.GetOptions{})
if err != nil {
return err
}
if !reflect.DeepEqual(currIng.Annotations, annotations) {
glog.V(3).Infof("Updating annotations of %v/%v", namespace, name)
currIng.Annotations = annotations
2016-02-22 00:13:08 +00:00
if _, err := ingClient.Update(currIng); err != nil {
return err
}
}
return nil
}
// ListRuntimeInfo lists L7RuntimeInfo as understood by the loadbalancer module.
func (lbc *LoadBalancerController) ListRuntimeInfo() (lbs []*loadbalancers.L7RuntimeInfo, err error) {
ingList, err := lbc.ingLister.List()
if err != nil {
return lbs, err
}
for _, ing := range ingList.Items {
k, err := keyFunc(&ing)
2016-02-22 00:13:08 +00:00
if err != nil {
glog.Warningf("Cannot get key for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
continue
}
var tls *loadbalancers.TLSCerts
annotations := ingAnnotations(ing.ObjectMeta.Annotations)
// Load the TLS cert from the API Spec if it is not specified in the annotation.
// TODO: enforce this with validation.
if annotations.useNamedTLS() == "" {
tls, err = lbc.tlsLoader.load(&ing)
if err != nil {
glog.Warningf("Cannot get certs for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
}
}
2016-02-22 00:13:08 +00:00
lbs = append(lbs, &loadbalancers.L7RuntimeInfo{
Name: k,
TLS: tls,
TLSName: annotations.useNamedTLS(),
AllowHTTP: annotations.allowHTTP(),
StaticIPName: annotations.staticIPName(),
2016-02-22 00:13:08 +00:00
})
}
return lbs, nil
}
// syncNodes manages the syncing of kubernetes nodes to gce instance groups.
// The instancegroups are referenced by loadbalancer backends.
2016-06-28 03:06:35 +00:00
func (lbc *LoadBalancerController) syncNodes(key string) error {
2016-02-22 00:13:08 +00:00
nodeNames, err := lbc.getReadyNodeNames()
if err != nil {
2016-06-28 03:06:35 +00:00
return err
2016-02-22 00:13:08 +00:00
}
if err := lbc.CloudClusterManager.instancePool.Sync(nodeNames); err != nil {
2016-06-28 03:06:35 +00:00
return err
2016-02-22 00:13:08 +00:00
}
2016-06-28 03:06:35 +00:00
return nil
2016-02-22 00:13:08 +00:00
}
2017-04-01 14:38:58 +00:00
func getNodeReadyPredicate() listers.NodeConditionPredicate {
2017-08-18 23:24:56 +00:00
return func(node *apiv1.Node) bool {
2016-07-12 03:43:36 +00:00
for ix := range node.Status.Conditions {
condition := &node.Status.Conditions[ix]
2017-08-18 23:24:56 +00:00
if condition.Type == apiv1.NodeReady {
return condition.Status == apiv1.ConditionTrue
2016-07-12 03:43:36 +00:00
}
2016-02-22 00:13:08 +00:00
}
2016-07-12 03:43:36 +00:00
return false
2016-02-22 00:13:08 +00:00
}
}
// getReadyNodeNames returns names of schedulable, ready nodes from the node lister.
func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) {
nodeNames := []string{}
2017-04-01 14:38:58 +00:00
nodes, err := listers.NewNodeLister(lbc.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
2016-02-22 00:13:08 +00:00
if err != nil {
return nodeNames, err
}
2016-08-10 19:00:16 +00:00
for _, n := range nodes {
2016-02-22 00:13:08 +00:00
if n.Spec.Unschedulable {
continue
}
nodeNames = append(nodeNames, n.Name)
}
return nodeNames, nil
}