Get rid of default-zone everywhere.
This commit is contained in:
parent
3bed62f51e
commit
61558f4d19
6 changed files with 21 additions and 15 deletions
|
@ -27,10 +27,12 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const defaultZone = "zone-a"
|
||||
|
||||
func newBackendPool(f BackendServices, fakeIGs instances.InstanceGroups, syncWithCloud bool) BackendPool {
|
||||
namer := &utils.Namer{}
|
||||
nodePool := instances.NewNodePool(fakeIGs, "default-zone")
|
||||
nodePool.Init(&instances.FakeZoneLister{[]string{"zone-a"}})
|
||||
nodePool := instances.NewNodePool(fakeIGs)
|
||||
nodePool.Init(&instances.FakeZoneLister{[]string{defaultZone}})
|
||||
healthChecks := healthchecks.NewHealthChecker(healthchecks.NewFakeHealthChecks(), "/", namer)
|
||||
healthChecks.Init(&healthchecks.FakeHealthCheckGetter{nil})
|
||||
return NewBackendPool(
|
||||
|
@ -82,8 +84,14 @@ func TestBackendPoolAdd(t *testing.T) {
|
|||
t.Fatalf("Unexpected create for existing backend service")
|
||||
}
|
||||
}
|
||||
gotBackend, _ := f.GetBackendService(beName)
|
||||
gotGroup, _ := fakeIGs.GetInstanceGroup(namer.IGName(), "default-zone")
|
||||
gotBackend, err := f.GetBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to find a backend with name %v: %v", beName, err)
|
||||
}
|
||||
gotGroup, err := fakeIGs.GetInstanceGroup(namer.IGName(), defaultZone)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to find instance group %v", namer.IGName())
|
||||
}
|
||||
if gotBackend.Backends[0].Group != gotGroup.SelfLink {
|
||||
t.Fatalf(
|
||||
"Broken instance group link: %v %v",
|
||||
|
|
|
@ -261,13 +261,9 @@ func NewClusterManager(
|
|||
|
||||
// Names are fundamental to the cluster, the uid allocator makes sure names don't collide.
|
||||
cluster := ClusterManager{ClusterNamer: &utils.Namer{name}}
|
||||
zone, err := cloud.GetZone()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NodePool stores GCE vms that are in this Kubernetes cluster.
|
||||
cluster.instancePool = instances.NewNodePool(cloud, zone.FailureDomain)
|
||||
cluster.instancePool = instances.NewNodePool(cloud)
|
||||
|
||||
// BackendPool creates GCE BackendServices and associated health checks.
|
||||
healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
|
||||
|
|
|
@ -50,7 +50,7 @@ func NewFakeClusterManager(clusterName string) *fakeClusterManager {
|
|||
fakeHCs := healthchecks.NewFakeHealthChecks()
|
||||
namer := &utils.Namer{clusterName}
|
||||
|
||||
nodePool := instances.NewNodePool(fakeIGs, defaultZone)
|
||||
nodePool := instances.NewNodePool(fakeIGs)
|
||||
nodePool.Init(&instances.FakeZoneLister{[]string{"zone-a"}})
|
||||
|
||||
healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
|
||||
|
|
|
@ -46,7 +46,7 @@ type Instances struct {
|
|||
// NewNodePool creates a new node pool.
|
||||
// - cloud: implements InstanceGroups, used to sync Kubernetes nodes with
|
||||
// members of the cloud InstanceGroup.
|
||||
func NewNodePool(cloud InstanceGroups, defaultZone string) NodePool {
|
||||
func NewNodePool(cloud InstanceGroups) NodePool {
|
||||
return &Instances{cloud, storage.NewInMemoryPool(), nil}
|
||||
}
|
||||
|
||||
|
@ -146,6 +146,8 @@ func (i *Instances) Get(name, zone string) (*compute.InstanceGroup, error) {
|
|||
return ig, nil
|
||||
}
|
||||
|
||||
// splitNodesByZones takes a list of node names and returns a map of zone:node names.
|
||||
// It figures out the zones by asking the zoneLister.
|
||||
func (i *Instances) splitNodesByZone(names []string) map[string][]string {
|
||||
nodesByZone := map[string][]string{}
|
||||
for _, name := range names {
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
const defaultZone = "default-zone"
|
||||
|
||||
func newNodePool(f *FakeInstanceGroups, zone string) NodePool {
|
||||
pool := NewNodePool(f, zone)
|
||||
pool := NewNodePool(f)
|
||||
pool.Init(&FakeZoneLister{[]string{zone}})
|
||||
return pool
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
const (
|
||||
testDefaultBeNodePort = int64(3000)
|
||||
defaultZone = "default-zone"
|
||||
defaultZone = "zone-a"
|
||||
)
|
||||
|
||||
func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T) LoadBalancerPool {
|
||||
|
@ -39,8 +39,8 @@ func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T) LoadBalancerPool {
|
|||
namer := &utils.Namer{}
|
||||
healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
|
||||
healthChecker.Init(&healthchecks.FakeHealthCheckGetter{nil})
|
||||
nodePool := instances.NewNodePool(fakeIGs, defaultZone)
|
||||
nodePool.Init(&instances.FakeZoneLister{[]string{"zone-a"}})
|
||||
nodePool := instances.NewNodePool(fakeIGs)
|
||||
nodePool.Init(&instances.FakeZoneLister{[]string{defaultZone}})
|
||||
backendPool := backends.NewBackendPool(
|
||||
fakeBackends, healthChecker, nodePool, namer, []int64{}, false)
|
||||
return NewLoadBalancerPool(f, backendPool, testDefaultBeNodePort, namer)
|
||||
|
|
Loading…
Reference in a new issue