Updating instances interface to accept all named ports at once

This commit is contained in:
nikhiljindal 2017-10-02 19:07:53 -07:00
parent abc8b9de51
commit 9a8789d523
10 changed files with 80 additions and 76 deletions

View file

@ -210,24 +210,43 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp Servic
return b.Get(namedPort.Port)
}
// Add will get or create a Backend for the given port.
// Add will get or create Backends for the given ports.
// Uses the given instance groups if non-nil, else creates instance groups.
func (b *Backends) Add(p ServicePort, igs []*compute.InstanceGroup) error {
// We must track the port even if creating the backend failed, because
// we might've created a health-check for it.
be := &compute.BackendService{}
defer func() { b.snapshotter.Add(portKey(p.Port), be) }()
var err error
func (b *Backends) Add(svcPorts []ServicePort, igs []*compute.InstanceGroup) error {
glog.V(3).Infof("Sync: backends %v", svcPorts)
// Ideally callers should pass the instance groups to prevent recomputing them here.
// Igs can be nil in scenarios where we do not have instance groups such as
// while syncing default backend service.
if igs == nil {
igs, _, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, p.Port)
ports := []int64{}
for _, p := range svcPorts {
ports = append(ports, p.Port)
}
var err error
igs, _, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, ports)
if err != nil {
return err
}
}
// create backends for new ports, perform an edge hop for existing ports
for _, port := range svcPorts {
if err := b.addWithIGs(port, igs); err != nil {
return err
}
}
return nil
}
// addWithIGs will get or create a Backend for the given port.
// It assumes that the instance groups have been created and required named port has been added.
// If not, then Add should be called instead.
func (b *Backends) addWithIGs(p ServicePort, igs []*compute.InstanceGroup) error {
// We must track the ports even if creating the backends failed, because
// we might've created health-check for them.
be := &compute.BackendService{}
defer func() { b.snapshotter.Add(portKey(p.Port), be) }()
var err error
// Ensure health check for backend service exists
hcLink, err := b.ensureHealthCheck(p)
@ -388,19 +407,6 @@ func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGr
return fmt.Errorf("received errors when updating backend service: %v", strings.Join(errs, "\n"))
}
// Sync syncs backend services corresponding to ports in the given list.
func (b *Backends) Sync(svcNodePorts []ServicePort, igs []*compute.InstanceGroup) error {
glog.V(3).Infof("Sync: backends %v", svcNodePorts)
// create backends for new ports, perform an edge hop for existing ports
for _, port := range svcNodePorts {
if err := b.Add(port, igs); err != nil {
return err
}
}
return nil
}
// GC garbage collects services corresponding to ports in the given list.
func (b *Backends) GC(svcNodePorts []ServicePort) error {
knownPorts := sets.NewString()

View file

@ -80,7 +80,7 @@ func TestBackendPoolAdd(t *testing.T) {
// Add a backend for a port, then re-add the same port and
// make sure it corrects a broken link from the backend to
// the instance group.
err := pool.Add(nodePort, nil)
err := pool.Add([]ServicePort{nodePort}, nil)
if err != nil {
t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", nodePort, err)
}
@ -143,7 +143,7 @@ func TestHealthCheckMigration(t *testing.T) {
hcp.CreateHttpHealthCheck(legacyHC)
// Add the service port to the backend pool
pool.Add(p, nil)
pool.Add([]ServicePort{p}, nil)
// Assert the proper health check was created
hc, _ := pool.healthChecker.Get(p.Port)
@ -168,7 +168,7 @@ func TestBackendPoolUpdate(t *testing.T) {
namer := utils.Namer{}
p := ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP}
pool.Add(p, nil)
pool.Add([]ServicePort{p}, nil)
beName := namer.BeName(p.Port)
be, err := f.GetGlobalBackendService(beName)
@ -188,7 +188,7 @@ func TestBackendPoolUpdate(t *testing.T) {
// Update service port to encrypted
p.Protocol = utils.ProtocolHTTPS
pool.Sync([]ServicePort{p}, nil)
pool.Add([]ServicePort{p}, nil)
be, err = f.GetGlobalBackendService(beName)
if err != nil {
@ -214,7 +214,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
namer := utils.Namer{}
nodePort := ServicePort{Port: 8080, Protocol: utils.ProtocolHTTP}
pool.Add(nodePort, nil)
pool.Add([]ServicePort{nodePort}, nil)
beName := namer.BeName(nodePort.Port)
be, _ := f.GetGlobalBackendService(beName)
@ -227,7 +227,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
f.calls = []int{}
f.UpdateGlobalBackendService(be)
pool.Add(nodePort, nil)
pool.Add([]ServicePort{nodePort}, nil)
for _, call := range f.calls {
if call == utils.Create {
t.Fatalf("Unexpected create for existing backend service")
@ -260,10 +260,10 @@ func TestBackendPoolSync(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool, _ := newTestJig(f, fakeIGs, true)
pool.Add(ServicePort{Port: 81}, nil)
pool.Add(ServicePort{Port: 90}, nil)
if err := pool.Sync(svcNodePorts, nil); err != nil {
t.Errorf("Expected backend pool to sync, err: %v", err)
pool.Add([]ServicePort{ServicePort{Port: 81}}, nil)
pool.Add([]ServicePort{ServicePort{Port: 90}}, nil)
if err := pool.Add(svcNodePorts, nil); err != nil {
t.Errorf("Expected backend pool to add node ports, err: %v", err)
}
if err := pool.GC(svcNodePorts); err != nil {
t.Errorf("Expected backend pool to GC, err: %v", err)
@ -361,7 +361,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {
})
// Have pool sync the above backend service
bp.Add(ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}, nil)
bp.Add([]ServicePort{ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}}, nil)
// Verify the legacy health check has been deleted
_, err = hcp.GetHttpHealthCheck(beName)
@ -388,7 +388,7 @@ func TestBackendPoolShutdown(t *testing.T) {
namer := utils.Namer{}
// Add a backend-service and verify that it doesn't exist after Shutdown()
pool.Add(ServicePort{Port: 80}, nil)
pool.Add([]ServicePort{ServicePort{Port: 80}}, nil)
pool.Shutdown()
if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil {
t.Fatalf("%v", err)
@ -402,7 +402,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
namer := utils.Namer{}
// This will add the instance group k8s-ig to the instance pool
pool.Add(ServicePort{Port: 80}, nil)
pool.Add([]ServicePort{ServicePort{Port: 80}}, nil)
be, err := f.GetGlobalBackendService(namer.BeName(80))
if err != nil {
@ -420,7 +420,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
}
// Make sure repeated adds don't clobber the inserted instance group
pool.Add(ServicePort{Port: 80}, nil)
pool.Add([]ServicePort{ServicePort{Port: 80}}, nil)
be, err = f.GetGlobalBackendService(namer.BeName(80))
if err != nil {
t.Fatalf("%v", err)
@ -462,7 +462,7 @@ func TestBackendCreateBalancingMode(t *testing.T) {
return nil
}
pool.Add(nodePort, nil)
pool.Add([]ServicePort{nodePort}, nil)
be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port))
if err != nil {
t.Fatalf("%v", err)

View file

@ -30,10 +30,9 @@ type probeProvider interface {
// as gce backendServices, and sync them through the BackendServices interface.
type BackendPool interface {
Init(p probeProvider)
Add(port ServicePort, igs []*compute.InstanceGroup) error
Add(ports []ServicePort, igs []*compute.InstanceGroup) error
Get(port int64) (*compute.BackendService, error)
Delete(port int64) error
Sync(ports []ServicePort, igs []*compute.InstanceGroup) error
GC(ports []ServicePort) error
Shutdown() error
Status(name string) string

View file

@ -134,7 +134,7 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName
if err != nil {
return igs, err
}
if err := c.backendPool.Sync(backendServicePorts, igs); err != nil {
if err := c.backendPool.Add(backendServicePorts, igs); err != nil {
return igs, err
}
if err := c.instancePool.Sync(nodeNames); err != nil {
@ -167,20 +167,12 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName
}
func (c *ClusterManager) EnsureInstanceGroupsAndPorts(servicePorts []backends.ServicePort) ([]*compute.InstanceGroup, error) {
var igs []*compute.InstanceGroup
var err error
ports := []int64{}
for _, p := range servicePorts {
// EnsureInstanceGroupsAndPorts always returns all the instance groups, so we can return
// the output of any call, no need to append the return from all calls.
// TODO: Ideally, we want to call CreateInstaceGroups only the first time and
// then call AddNamedPort multiple times. Need to update the interface to
// achieve this.
igs, _, err = instances.EnsureInstanceGroupsAndPorts(c.instancePool, c.ClusterNamer, p.Port)
if err != nil {
return nil, err
}
ports = append(ports, p.Port)
}
return igs, nil
igs, _, err := instances.EnsureInstanceGroupsAndPorts(c.instancePool, c.ClusterNamer, ports)
return igs, err
}
// GC garbage collects unused resources.

View file

@ -71,14 +71,14 @@ func TestInstancesAddedToZones(t *testing.T) {
// Create 2 igs, one per zone.
testIG := "test-ig"
testPort := int64(3001)
testPort := []int64{int64(3001)}
lbc.CloudClusterManager.instancePool.AddInstanceGroup(testIG, testPort)
// node pool syncs kube-nodes, this will add them to both igs.
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()
if cm.fakeIGs.Ports[0] != testPort {
if cm.fakeIGs.Ports[0] != testPort[0] {
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
}

View file

@ -59,15 +59,18 @@ func (i *Instances) Init(zl zoneLister) {
}
// AddInstanceGroup creates or gets an instance group if it doesn't exist
// and adds the given port to it. Returns a list of one instance group per zone,
// all of which have the exact same named port.
func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
// and adds the given ports to it. Returns a list of one instance group per zone,
// all of which have the exact same named ports.
func (i *Instances) AddInstanceGroup(name string, ports []int64) ([]*compute.InstanceGroup, []*compute.NamedPort, error) {
igs := []*compute.InstanceGroup{}
namedPort := utils.GetNamedPort(port)
namedPorts := []*compute.NamedPort{}
for _, port := range ports {
namedPorts = append(namedPorts, utils.GetNamedPort(port))
}
zones, err := i.ListZones()
if err != nil {
return igs, namedPort, err
return igs, namedPorts, err
}
defer i.snapshotter.Add(name, struct{}{})
@ -99,23 +102,27 @@ func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.Instan
glog.V(3).Infof("Instance group %v already exists in zone %v", name, zone)
}
found := false
existingPorts := map[int64]bool{}
for _, np := range ig.NamedPorts {
if np.Port == port {
glog.V(3).Infof("Instance group %v already has named port %+v", ig.Name, np)
found = true
break
}
existingPorts[np.Port] = true
}
if !found {
glog.V(3).Infof("Instance group %v/%v does not have port %+v, adding it now.", zone, name, namedPort)
if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPort)); err != nil {
newPorts := []*compute.NamedPort{}
for _, np := range namedPorts {
if existingPorts[np.Port] {
glog.V(3).Infof("Instance group %v already has named port %+v", ig.Name, np)
continue
}
newPorts = append(newPorts, np)
}
if len(newPorts) > 0 {
glog.V(3).Infof("Instance group %v/%v does not have ports %+v, adding them now.", zone, name, namedPorts)
if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPorts...)); err != nil {
return nil, nil, err
}
}
igs = append(igs, ig)
}
return igs, namedPort, nil
return igs, namedPorts, nil
}
// DeleteInstanceGroup deletes the given IG by name, from all zones.

View file

@ -34,7 +34,7 @@ func TestNodePoolSync(t *testing.T) {
f := NewFakeInstanceGroups(sets.NewString(
[]string{"n1", "n2"}...))
pool := newNodePool(f, defaultZone)
pool.AddInstanceGroup("test", 80)
pool.AddInstanceGroup("test", []int64{80})
// KubeNodes: n1
// GCENodes: n1, n2
@ -53,7 +53,7 @@ func TestNodePoolSync(t *testing.T) {
f = NewFakeInstanceGroups(sets.NewString([]string{"n1"}...))
pool = newNodePool(f, defaultZone)
pool.AddInstanceGroup("test", 80)
pool.AddInstanceGroup("test", []int64{80})
f.calls = []int{}
kubeNodes = sets.NewString([]string{"n1", "n2"}...)
@ -69,7 +69,7 @@ func TestNodePoolSync(t *testing.T) {
f = NewFakeInstanceGroups(sets.NewString([]string{"n1", "n2"}...))
pool = newNodePool(f, defaultZone)
pool.AddInstanceGroup("test", 80)
pool.AddInstanceGroup("test", []int64{80})
f.calls = []int{}
kubeNodes = sets.NewString([]string{"n1", "n2"}...)

View file

@ -32,7 +32,7 @@ type NodePool interface {
Init(zl zoneLister)
// The following 2 methods operate on instance groups.
AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error)
AddInstanceGroup(name string, ports []int64) ([]*compute.InstanceGroup, []*compute.NamedPort, error)
DeleteInstanceGroup(name string) error
// TODO: Refactor for modularity

View file

@ -8,6 +8,6 @@ import (
// Helper method to create instance groups.
// This method exists to ensure that we are using the same logic at all places.
func EnsureInstanceGroupsAndPorts(nodePool NodePool, namer *utils.Namer, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
return nodePool.AddInstanceGroup(namer.IGName(), port)
func EnsureInstanceGroupsAndPorts(nodePool NodePool, namer *utils.Namer, ports []int64) ([]*compute.InstanceGroup, []*compute.NamedPort, error) {
return nodePool.AddInstanceGroup(namer.IGName(), ports)
}

View file

@ -169,7 +169,7 @@ func (l *L7s) Sync(lbs []*L7RuntimeInfo) error {
// Lazily create a default backend so we don't tax users who don't care
// about Ingress by consuming 1 of their 3 GCE BackendServices. This
// BackendService is GC'd when there are no more Ingresses.
if err := l.defaultBackendPool.Add(l.defaultBackendNodePort, nil); err != nil {
if err := l.defaultBackendPool.Add([]backends.ServicePort{l.defaultBackendNodePort}, nil); err != nil {
return err
}
defaultBackend, err := l.defaultBackendPool.Get(l.defaultBackendNodePort.Port)