Remove multiple calls to CreateInstanceGroups by reusing results from a single call

This commit is contained in:
nikhiljindal 2017-09-11 20:29:49 -07:00
parent 0f4f5c97d4
commit 937cde666e
10 changed files with 68 additions and 75 deletions

View file

@ -211,16 +211,23 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp Servic
} }
// Add will get or create a Backend for the given port. // Add will get or create a Backend for the given port.
func (b *Backends) Add(p ServicePort) error { // Uses the given instance groups if non-nil, else creates instance groups.
func (b *Backends) Add(p ServicePort, igs []*compute.InstanceGroup) error {
// We must track the port even if creating the backend failed, because // We must track the port even if creating the backend failed, because
// we might've created a health-check for it. // we might've created a health-check for it.
be := &compute.BackendService{} be := &compute.BackendService{}
defer func() { b.snapshotter.Add(portKey(p.Port), be) }() defer func() { b.snapshotter.Add(portKey(p.Port), be) }()
igs, namedPort, err := instances.CreateInstanceGroups(b.nodePool, b.namer, p.Port) var err error
// Ideally callers should pass the instance groups to prevent recomputing them here.
// Igs can be nil in scenarios where we do not have instance groups such as
// while syncing default backend service.
if igs == nil {
igs, _, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, p.Port)
if err != nil { if err != nil {
return err return err
} }
}
// Ensure health check for backend service exists // Ensure health check for backend service exists
hcLink, err := b.ensureHealthCheck(p) hcLink, err := b.ensureHealthCheck(p)
@ -232,6 +239,7 @@ func (b *Backends) Add(p ServicePort) error {
pName := b.namer.BeName(p.Port) pName := b.namer.BeName(p.Port)
be, _ = b.Get(p.Port) be, _ = b.Get(p.Port)
if be == nil { if be == nil {
namedPort := utils.GetNamedPort(p.Port)
glog.V(2).Infof("Creating backend service for port %v named port %v", p.Port, namedPort) glog.V(2).Infof("Creating backend service for port %v named port %v", p.Port, namedPort)
be, err = b.create(namedPort, hcLink, p, pName) be, err = b.create(namedPort, hcLink, p, pName)
if err != nil { if err != nil {
@ -381,12 +389,12 @@ func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGr
} }
// Sync syncs backend services corresponding to ports in the given list. // Sync syncs backend services corresponding to ports in the given list.
func (b *Backends) Sync(svcNodePorts []ServicePort) error { func (b *Backends) Sync(svcNodePorts []ServicePort, igs []*compute.InstanceGroup) error {
glog.V(3).Infof("Sync: backends %v", svcNodePorts) glog.V(3).Infof("Sync: backends %v", svcNodePorts)
// create backends for new ports, perform an edge hop for existing ports // create backends for new ports, perform an edge hop for existing ports
for _, port := range svcNodePorts { for _, port := range svcNodePorts {
if err := b.Add(port); err != nil { if err := b.Add(port, igs); err != nil {
return err return err
} }
} }

View file

@ -80,7 +80,7 @@ func TestBackendPoolAdd(t *testing.T) {
// Add a backend for a port, then re-add the same port and // Add a backend for a port, then re-add the same port and
// make sure it corrects a broken link from the backend to // make sure it corrects a broken link from the backend to
// the instance group. // the instance group.
err := pool.Add(nodePort) err := pool.Add(nodePort, nil)
if err != nil { if err != nil {
t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", nodePort, err) t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", nodePort, err)
} }
@ -143,7 +143,7 @@ func TestHealthCheckMigration(t *testing.T) {
hcp.CreateHttpHealthCheck(legacyHC) hcp.CreateHttpHealthCheck(legacyHC)
// Add the service port to the backend pool // Add the service port to the backend pool
pool.Add(p) pool.Add(p, nil)
// Assert the proper health check was created // Assert the proper health check was created
hc, _ := pool.healthChecker.Get(p.Port) hc, _ := pool.healthChecker.Get(p.Port)
@ -168,7 +168,7 @@ func TestBackendPoolUpdate(t *testing.T) {
namer := utils.Namer{} namer := utils.Namer{}
p := ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP} p := ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP}
pool.Add(p) pool.Add(p, nil)
beName := namer.BeName(p.Port) beName := namer.BeName(p.Port)
be, err := f.GetGlobalBackendService(beName) be, err := f.GetGlobalBackendService(beName)
@ -188,7 +188,7 @@ func TestBackendPoolUpdate(t *testing.T) {
// Update service port to encrypted // Update service port to encrypted
p.Protocol = utils.ProtocolHTTPS p.Protocol = utils.ProtocolHTTPS
pool.Sync([]ServicePort{p}) pool.Sync([]ServicePort{p}, nil)
be, err = f.GetGlobalBackendService(beName) be, err = f.GetGlobalBackendService(beName)
if err != nil { if err != nil {
@ -214,7 +214,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
namer := utils.Namer{} namer := utils.Namer{}
nodePort := ServicePort{Port: 8080, Protocol: utils.ProtocolHTTP} nodePort := ServicePort{Port: 8080, Protocol: utils.ProtocolHTTP}
pool.Add(nodePort) pool.Add(nodePort, nil)
beName := namer.BeName(nodePort.Port) beName := namer.BeName(nodePort.Port)
be, _ := f.GetGlobalBackendService(beName) be, _ := f.GetGlobalBackendService(beName)
@ -227,7 +227,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
f.calls = []int{} f.calls = []int{}
f.UpdateGlobalBackendService(be) f.UpdateGlobalBackendService(be)
pool.Add(nodePort) pool.Add(nodePort, nil)
for _, call := range f.calls { for _, call := range f.calls {
if call == utils.Create { if call == utils.Create {
t.Fatalf("Unexpected create for existing backend service") t.Fatalf("Unexpected create for existing backend service")
@ -260,9 +260,9 @@ func TestBackendPoolSync(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc) f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool, _ := newTestJig(f, fakeIGs, true) pool, _ := newTestJig(f, fakeIGs, true)
pool.Add(ServicePort{Port: 81}) pool.Add(ServicePort{Port: 81}, nil)
pool.Add(ServicePort{Port: 90}) pool.Add(ServicePort{Port: 90}, nil)
if err := pool.Sync(svcNodePorts); err != nil { if err := pool.Sync(svcNodePorts, nil); err != nil {
t.Errorf("Expected backend pool to sync, err: %v", err) t.Errorf("Expected backend pool to sync, err: %v", err)
} }
if err := pool.GC(svcNodePorts); err != nil { if err := pool.GC(svcNodePorts); err != nil {
@ -361,7 +361,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {
}) })
// Have pool sync the above backend service // Have pool sync the above backend service
bp.Add(ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}) bp.Add(ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}, nil)
// Verify the legacy health check has been deleted // Verify the legacy health check has been deleted
_, err = hcp.GetHttpHealthCheck(beName) _, err = hcp.GetHttpHealthCheck(beName)
@ -388,7 +388,7 @@ func TestBackendPoolShutdown(t *testing.T) {
namer := utils.Namer{} namer := utils.Namer{}
// Add a backend-service and verify that it doesn't exist after Shutdown() // Add a backend-service and verify that it doesn't exist after Shutdown()
pool.Add(ServicePort{Port: 80}) pool.Add(ServicePort{Port: 80}, nil)
pool.Shutdown() pool.Shutdown()
if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil { if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil {
t.Fatalf("%v", err) t.Fatalf("%v", err)
@ -402,7 +402,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
namer := utils.Namer{} namer := utils.Namer{}
// This will add the instance group k8s-ig to the instance pool // This will add the instance group k8s-ig to the instance pool
pool.Add(ServicePort{Port: 80}) pool.Add(ServicePort{Port: 80}, nil)
be, err := f.GetGlobalBackendService(namer.BeName(80)) be, err := f.GetGlobalBackendService(namer.BeName(80))
if err != nil { if err != nil {
@ -420,7 +420,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
} }
// Make sure repeated adds don't clobber the inserted instance group // Make sure repeated adds don't clobber the inserted instance group
pool.Add(ServicePort{Port: 80}) pool.Add(ServicePort{Port: 80}, nil)
be, err = f.GetGlobalBackendService(namer.BeName(80)) be, err = f.GetGlobalBackendService(namer.BeName(80))
if err != nil { if err != nil {
t.Fatalf("%v", err) t.Fatalf("%v", err)
@ -462,7 +462,7 @@ func TestBackendCreateBalancingMode(t *testing.T) {
return nil return nil
} }
pool.Add(nodePort) pool.Add(nodePort, nil)
be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port)) be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port))
if err != nil { if err != nil {
t.Fatalf("%v", err) t.Fatalf("%v", err)

View file

@ -30,10 +30,10 @@ type probeProvider interface {
// as gce backendServices, and sync them through the BackendServices interface. // as gce backendServices, and sync them through the BackendServices interface.
type BackendPool interface { type BackendPool interface {
Init(p probeProvider) Init(p probeProvider)
Add(port ServicePort) error Add(port ServicePort, igs []*compute.InstanceGroup) error
Get(port int64) (*compute.BackendService, error) Get(port int64) (*compute.BackendService, error)
Delete(port int64) error Delete(port int64) error
Sync(ports []ServicePort) error Sync(ports []ServicePort, igs []*compute.InstanceGroup) error
GC(ports []ServicePort) error GC(ports []ServicePort) error
Shutdown() error Shutdown() error
Status(name string) string Status(name string) string

View file

@ -116,9 +116,10 @@ func (c *ClusterManager) shutdown() error {
// instance groups. // instance groups.
// - backendServicePorts are the ports for which we require BackendServices. // - backendServicePorts are the ports for which we require BackendServices.
// - namedPorts are the ports which must be opened on instance groups. // - namedPorts are the ports which must be opened on instance groups.
// Returns the list of all instance groups corresponding to the given loadbalancers.
// If in performing the checkpoint the cluster manager runs out of quota, a // If in performing the checkpoint the cluster manager runs out of quota, a
// googleapi 403 is returned. // googleapi 403 is returned.
func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeNames []string, backendServicePorts []backends.ServicePort, namedPorts []backends.ServicePort) error { func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeNames []string, backendServicePorts []backends.ServicePort, namedPorts []backends.ServicePort) ([]*compute.InstanceGroup, error) {
if len(namedPorts) != 0 { if len(namedPorts) != 0 {
// Add the default backend node port to the list of named ports for instance groups. // Add the default backend node port to the list of named ports for instance groups.
namedPorts = append(namedPorts, c.defaultBackendNodePort) namedPorts = append(namedPorts, c.defaultBackendNodePort)
@ -129,19 +130,18 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName
namedPorts = uniq(namedPorts) namedPorts = uniq(namedPorts)
backendServicePorts = uniq(backendServicePorts) backendServicePorts = uniq(backendServicePorts)
// Create Instance Groups. // Create Instance Groups.
_, err := c.CreateInstanceGroups(namedPorts) igs, err := c.EnsureInstanceGroupsAndPorts(namedPorts)
if err != nil { if err != nil {
return err return igs, err
} }
if err := c.backendPool.Sync(backendServicePorts); err != nil { if err := c.backendPool.Sync(backendServicePorts, igs); err != nil {
return err return igs, err
} }
if err := c.instancePool.Sync(nodeNames); err != nil {
if err := c.SyncNodesInInstanceGroups(nodeNames); err != nil { return igs, err
return err
} }
if err := c.l7Pool.Sync(lbs); err != nil { if err := c.l7Pool.Sync(lbs); err != nil {
return err return igs, err
} }
// TODO: Manage default backend and its firewall rule in a centralized way. // TODO: Manage default backend and its firewall rule in a centralized way.
@ -160,22 +160,22 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName
np = append(np, p.Port) np = append(np, p.Port)
} }
if err := c.firewallPool.Sync(np, nodeNames); err != nil { if err := c.firewallPool.Sync(np, nodeNames); err != nil {
return err return igs, err
} }
return nil return igs, nil
} }
func (c *ClusterManager) CreateInstanceGroups(servicePorts []backends.ServicePort) ([]*compute.InstanceGroup, error) { func (c *ClusterManager) EnsureInstanceGroupsAndPorts(servicePorts []backends.ServicePort) ([]*compute.InstanceGroup, error) {
var igs []*compute.InstanceGroup var igs []*compute.InstanceGroup
var err error var err error
for _, p := range servicePorts { for _, p := range servicePorts {
// CreateInstanceGroups always returns all the instance groups, so we can return // EnsureInstanceGroupsAndPorts always returns all the instance groups, so we can return
// the output of any call, no need to append the return from all calls. // the output of any call, no need to append the return from all calls.
// TODO: Ideally, we want to call CreateInstaceGroups only the first time and // TODO: Ideally, we want to call CreateInstaceGroups only the first time and
// then call AddNamedPort multiple times. Need to update the interface to // then call AddNamedPort multiple times. Need to update the interface to
// achieve this. // achieve this.
igs, _, err = instances.CreateInstanceGroups(c.instancePool, c.ClusterNamer, p.Port) igs, _, err = instances.EnsureInstanceGroupsAndPorts(c.instancePool, c.ClusterNamer, p.Port)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -183,13 +183,6 @@ func (c *ClusterManager) CreateInstanceGroups(servicePorts []backends.ServicePor
return igs, nil return igs, nil
} }
func (c *ClusterManager) SyncNodesInInstanceGroups(nodeNames []string) error {
if err := c.instancePool.Sync(nodeNames); err != nil {
return err
}
return nil
}
// GC garbage collects unused resources. // GC garbage collects unused resources.
// - lbNames are the names of L7 loadbalancers we wish to exist. Those not in // - lbNames are the names of L7 loadbalancers we wish to exist. Those not in
// this list are removed from the cloud. // this list are removed from the cloud.

View file

@ -36,7 +36,6 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/ingress/controllers/gce/backends"
"k8s.io/ingress/controllers/gce/loadbalancers" "k8s.io/ingress/controllers/gce/loadbalancers"
) )
@ -288,7 +287,7 @@ func (lbc *LoadBalancerController) sync(key string) (err error) {
allNodePorts := lbc.tr.toNodePorts(&allIngresses) allNodePorts := lbc.tr.toNodePorts(&allIngresses)
gceNodePorts := lbc.tr.toNodePorts(&gceIngresses) gceNodePorts := lbc.tr.toNodePorts(&gceIngresses)
lbNames := lbc.ingLister.Store.ListKeys() lbNames := lbc.ingLister.Store.ListKeys()
lbs, err := lbc.ListGCERuntimeInfo() lbs, err := lbc.toRuntimeInfo(gceIngresses)
if err != nil { if err != nil {
return err return err
} }
@ -319,10 +318,10 @@ func (lbc *LoadBalancerController) sync(key string) (err error) {
} }
glog.V(3).Infof("Finished syncing %v", key) glog.V(3).Infof("Finished syncing %v", key)
}() }()
// Record any errors during sync and throw a single error at the end. This // Record any errors during sync and throw a single error at the end. This
// allows us to free up associated cloud resources ASAP. // allows us to free up associated cloud resources ASAP.
if err := lbc.CloudClusterManager.Checkpoint(lbs, nodeNames, gceNodePorts, allNodePorts); err != nil { igs, err := lbc.CloudClusterManager.Checkpoint(lbs, nodeNames, gceNodePorts, allNodePorts)
if err != nil {
// TODO: Implement proper backoff for the queue. // TODO: Implement proper backoff for the queue.
eventMsg := "GCE" eventMsg := "GCE"
if ingExists { if ingExists {
@ -336,19 +335,12 @@ func (lbc *LoadBalancerController) sync(key string) (err error) {
if !ingExists { if !ingExists {
return syncError return syncError
} }
ing := obj.(*extensions.Ingress) ing := *obj.(*extensions.Ingress)
if isGCEMultiClusterIngress(ing) { if isGCEMultiClusterIngress(&ing) {
// Add instance group names as annotation on the ingress. // Add instance group names as annotation on the ingress.
if ing.Annotations == nil { if ing.Annotations == nil {
ing.Annotations = map[string]string{} ing.Annotations = map[string]string{}
} }
// Since we just created instance groups in Checkpoint, calling create
// instance groups again should just return names of the existing
// instance groups. It does not matter which nodePort we pass as argument.
igs, err := lbc.CloudClusterManager.CreateInstanceGroups([]backends.ServicePort{allNodePorts[0]})
if err != nil {
return fmt.Errorf("error in creating instance groups: %v", err)
}
err = setInstanceGroupsAnnotation(ing.Annotations, igs) err = setInstanceGroupsAnnotation(ing.Annotations, igs)
if err != nil { if err != nil {
return err return err
@ -366,13 +358,13 @@ func (lbc *LoadBalancerController) sync(key string) (err error) {
return syncError return syncError
} }
if urlMap, err := lbc.tr.toURLMap(ing); err != nil { if urlMap, err := lbc.tr.toURLMap(&ing); err != nil {
syncError = fmt.Errorf("%v, convert to url map error %v", syncError, err) syncError = fmt.Errorf("%v, convert to url map error %v", syncError, err)
} else if err := l7.UpdateUrlMap(urlMap); err != nil { } else if err := l7.UpdateUrlMap(urlMap); err != nil {
lbc.recorder.Eventf(ing, apiv1.EventTypeWarning, "UrlMap", err.Error()) lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "UrlMap", err.Error())
syncError = fmt.Errorf("%v, update url map error: %v", syncError, err) syncError = fmt.Errorf("%v, update url map error: %v", syncError, err)
} else if err := lbc.updateIngressStatus(l7, *ing); err != nil { } else if err := lbc.updateIngressStatus(l7, ing); err != nil {
lbc.recorder.Eventf(ing, apiv1.EventTypeWarning, "Status", err.Error()) lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "Status", err.Error())
syncError = fmt.Errorf("%v, update ingress error: %v", syncError, err) syncError = fmt.Errorf("%v, update ingress error: %v", syncError, err)
} }
return syncError return syncError
@ -432,13 +424,8 @@ func (lbc *LoadBalancerController) updateAnnotations(name, namespace string, ann
return nil return nil
} }
// ListGCERuntimeInfo lists L7RuntimeInfo as understood by the loadbalancer module. // toRuntimeInfo returns L7RuntimeInfo for the given ingresses.
// It returns runtime info only for gce ingresses and not for multi cluster ingresses. func (lbc *LoadBalancerController) toRuntimeInfo(ingList extensions.IngressList) (lbs []*loadbalancers.L7RuntimeInfo, err error) {
func (lbc *LoadBalancerController) ListGCERuntimeInfo() (lbs []*loadbalancers.L7RuntimeInfo, err error) {
ingList, err := lbc.ingLister.ListGCEIngresses()
if err != nil {
return lbs, err
}
for _, ing := range ingList.Items { for _, ing := range ingList.Items {
k, err := keyFunc(&ing) k, err := keyFunc(&ing)
if err != nil { if err != nil {

View file

@ -521,13 +521,13 @@ func (t *GCETranslator) ingressToNodePorts(ing *extensions.Ingress) []backends.S
for _, rule := range ing.Spec.Rules { for _, rule := range ing.Spec.Rules {
if rule.HTTP == nil { if rule.HTTP == nil {
glog.Errorf("ignoring non http Ingress rule") glog.Errorf("ignoring non http Ingress rule")
return knownPorts continue
} }
for _, path := range rule.HTTP.Paths { for _, path := range rule.HTTP.Paths {
port, err := t.getServiceNodePort(path.Backend, ing.Namespace) port, err := t.getServiceNodePort(path.Backend, ing.Namespace)
if err != nil { if err != nil {
glog.Infof("%v", err) glog.Infof("%v", err)
return knownPorts continue
} }
knownPorts = append(knownPorts, port) knownPorts = append(knownPorts, port)
} }
@ -680,7 +680,7 @@ func setInstanceGroupsAnnotation(existing map[string]string, igs []*compute.Inst
Name string Name string
Zone string Zone string
} }
instanceGroups := []Value{} var instanceGroups []Value
for _, ig := range igs { for _, ig := range igs {
instanceGroups = append(instanceGroups, Value{Name: ig.Name, Zone: ig.Zone}) instanceGroups = append(instanceGroups, Value{Name: ig.Name, Zone: ig.Zone})
} }
@ -698,7 +698,7 @@ func uniq(nodePorts []backends.ServicePort) []backends.ServicePort {
for _, p := range nodePorts { for _, p := range nodePorts {
portMap[p.Port] = p portMap[p.Port] = p
} }
nodePorts = []backends.ServicePort{} nodePorts = make([]backends.ServicePort, 0, len(portMap))
for _, sp := range portMap { for _, sp := range portMap {
nodePorts = append(nodePorts, sp) nodePorts = append(nodePorts, sp)
} }

View file

@ -63,8 +63,7 @@ func (i *Instances) Init(zl zoneLister) {
// all of which have the exact same named port. // all of which have the exact same named port.
func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) { func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
igs := []*compute.InstanceGroup{} igs := []*compute.InstanceGroup{}
// TODO: move port naming to namer namedPort := utils.GetNamedPort(port)
namedPort := &compute.NamedPort{Name: fmt.Sprintf("port%v", port), Port: port}
zones, err := i.ListZones() zones, err := i.ListZones()
if err != nil { if err != nil {

View file

@ -8,6 +8,6 @@ import (
// Helper method to create instance groups. // Helper method to create instance groups.
// This method exists to ensure that we are using the same logic at all places. // This method exists to ensure that we are using the same logic at all places.
func CreateInstanceGroups(nodePool NodePool, namer *utils.Namer, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) { func EnsureInstanceGroupsAndPorts(nodePool NodePool, namer *utils.Namer, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
return nodePool.AddInstanceGroup(namer.IGName(), port) return nodePool.AddInstanceGroup(namer.IGName(), port)
} }

View file

@ -169,7 +169,7 @@ func (l *L7s) Sync(lbs []*L7RuntimeInfo) error {
// Lazily create a default backend so we don't tax users who don't care // Lazily create a default backend so we don't tax users who don't care
// about Ingress by consuming 1 of their 3 GCE BackendServices. This // about Ingress by consuming 1 of their 3 GCE BackendServices. This
// BackendService is GC'd when there are no more Ingresses. // BackendService is GC'd when there are no more Ingresses.
if err := l.defaultBackendPool.Add(l.defaultBackendNodePort); err != nil { if err := l.defaultBackendPool.Add(l.defaultBackendNodePort, nil); err != nil {
return err return err
} }
defaultBackend, err := l.defaultBackendPool.Get(l.defaultBackendNodePort.Port) defaultBackend, err := l.defaultBackendPool.Get(l.defaultBackendNodePort.Port)

View file

@ -353,3 +353,9 @@ func CompareLinks(l1, l2 string) bool {
// FakeIngressRuleValueMap is a convenience type used by multiple submodules // FakeIngressRuleValueMap is a convenience type used by multiple submodules
// that share the same testing methods. // that share the same testing methods.
type FakeIngressRuleValueMap map[string]string type FakeIngressRuleValueMap map[string]string
// GetNamedPort creates the NamedPort API object for the given port.
func GetNamedPort(port int64) *compute.NamedPort {
// TODO: move port naming to namer
return &compute.NamedPort{Name: fmt.Sprintf("port%v", port), Port: port}
}