From b259c9b349486ed2aae0b03968bae1e87d4f9e58 Mon Sep 17 00:00:00 2001 From: Christian Bell Date: Tue, 14 Feb 2017 16:48:07 -0800 Subject: [PATCH] First stab at extending the "uid" configmap to store firewall rule information. --- controllers/gce/controller/controller.go | 4 + controllers/gce/controller/controller_test.go | 12 +- controllers/gce/controller/fakes.go | 4 +- controllers/gce/controller/util_test.go | 10 +- controllers/gce/main.go | 113 +++++++++++++----- controllers/gce/storage/configmaps.go | 78 +++++++----- controllers/gce/storage/configmaps_test.go | 46 +++++-- controllers/gce/utils/utils.go | 41 ++++++- 8 files changed, 217 insertions(+), 91 deletions(-) diff --git a/controllers/gce/controller/controller.go b/controllers/gce/controller/controller.go index c4d15e2fd..cb125fbc0 100644 --- a/controllers/gce/controller/controller.go +++ b/controllers/gce/controller/controller.go @@ -46,6 +46,10 @@ var ( // L7 controller created without specifying the --cluster-uid flag. DefaultClusterUID = "" + // DefaultFirewallName is the name to user for firewall rules created + // by an L7 controller when the --fireall-rule is not used. + DefaultFirewallName = "" + // Frequency to poll on local stores to sync. storeSyncPollPeriod = 5 * time.Second ) diff --git a/controllers/gce/controller/controller_test.go b/controllers/gce/controller/controller_test.go index cc58e94b5..f8d905b44 100644 --- a/controllers/gce/controller/controller_test.go +++ b/controllers/gce/controller/controller_test.go @@ -199,7 +199,8 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo } func TestLbCreateDelete(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + testFirewallName := "quux" + cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap1 := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { @@ -240,6 +241,7 @@ func TestLbCreateDelete(t *testing.T) { unexpected := []int{pm.portMap["foo2svc"], pm.portMap["bar2svc"]} expected := []int{pm.portMap["foo1svc"], pm.portMap["bar1svc"]} firewallPorts := sets.NewString() + pm.namer.SetFirewallName(testFirewallName) firewallName := pm.namer.FrName(pm.namer.FrSuffix()) if firewallRule, err := cm.firewallPool.(*firewalls.FirewallRules).GetFirewall(firewallName); err != nil { @@ -290,7 +292,7 @@ func TestLbCreateDelete(t *testing.T) { } func TestLbFaultyUpdate(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { @@ -327,7 +329,7 @@ func TestLbFaultyUpdate(t *testing.T) { } func TestLbDefaulting(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") // Make sure the controller plugs in the default values accepted by GCE. ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}}) @@ -345,7 +347,7 @@ func TestLbDefaulting(t *testing.T) { } func TestLbNoService(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { @@ -389,7 +391,7 @@ func TestLbNoService(t *testing.T) { } func TestLbChangeStaticIP(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { diff --git a/controllers/gce/controller/fakes.go b/controllers/gce/controller/fakes.go index ae97f0d9c..52927bd99 100644 --- a/controllers/gce/controller/fakes.go +++ b/controllers/gce/controller/fakes.go @@ -44,12 +44,12 @@ type fakeClusterManager struct { } // NewFakeClusterManager creates a new fake ClusterManager. -func NewFakeClusterManager(clusterName string) *fakeClusterManager { +func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager { fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName) fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil }) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) fakeHCs := healthchecks.NewFakeHealthChecks() - namer := utils.NewNamer(clusterName) + namer := utils.NewNamerWithFirewall(clusterName, firewallName) nodePool := instances.NewNodePool(fakeIGs) nodePool.Init(&instances.FakeZoneLister{Zones: []string{"zone-a"}}) diff --git a/controllers/gce/controller/util_test.go b/controllers/gce/controller/util_test.go index a3bbbe120..38f969c63 100644 --- a/controllers/gce/controller/util_test.go +++ b/controllers/gce/controller/util_test.go @@ -32,7 +32,7 @@ import ( var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC) func TestZoneListing(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") zoneToNode := map[string][]string{ "zone-1": {"n1"}, @@ -57,7 +57,7 @@ func TestZoneListing(t *testing.T) { } func TestInstancesAddedToZones(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") zoneToNode := map[string][]string{ "zone-1": {"n1", "n2"}, @@ -92,7 +92,7 @@ func TestInstancesAddedToZones(t *testing.T) { } func TestProbeGetter(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") nodePortToHealthCheck := map[int64]string{ 3001: "/healthz", @@ -110,7 +110,7 @@ func TestProbeGetter(t *testing.T) { } func TestProbeGetterNamedPort(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") nodePortToHealthCheck := map[int64]string{ 3001: "/healthz", @@ -133,7 +133,7 @@ func TestProbeGetterNamedPort(t *testing.T) { } func TestProbeGetterCrossNamespace(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") firstPod := &api.Pod{ diff --git a/controllers/gce/main.go b/controllers/gce/main.go index db2eebfff..ac9bd457d 100644 --- a/controllers/gce/main.go +++ b/controllers/gce/main.go @@ -215,7 +215,7 @@ func main() { if *inCluster || *useRealCloud { // Create cluster manager - namer, err := newNamer(kubeClient, *clusterName) + namer, err := newNamer(kubeClient, *clusterName, controller.DefaultFirewallName) if err != nil { glog.Fatalf("%v", err) } @@ -225,7 +225,7 @@ func main() { } } else { // Create fake cluster manager - clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager + clusterManager = controller.NewFakeClusterManager(*clusterName, controller.DefaultFirewallName).ClusterManager } // Start loadbalancer controller @@ -247,32 +247,95 @@ func main() { } } -func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, error) { +func newNamer(kubeClient client.Interface, clusterName string, fwName string) (*utils.Namer, error) { name, err := getClusterUID(kubeClient, clusterName) if err != nil { return nil, err } + fw_name, err := getFirewallName(kubeClient, fwName, name) + if err != nil { + return nil, err + } - namer := utils.NewNamer(name) - vault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) + namer := utils.NewNamerWithFirewall(name, fw_name) + uidVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) // Start a goroutine to poll the cluster UID config map // We don't watch because we know exactly which configmap we want and this // controller already watches 5 other resources, so it isn't worth the cost // of another connection and complexity. go wait.Forever(func() { - uid, found, err := vault.Get() - existing := namer.GetClusterName() - if found && uid != existing { - glog.Infof("Cluster uid changed from %v -> %v", existing, uid) - namer.SetClusterName(uid) - } else if err != nil { - glog.Errorf("Failed to reconcile cluster uid %v, currently set to %v", err, existing) + for _, key := range [...]string{storage.UidDataKey, storage.ProviderDataKey} { + val, found, err := uidVault.Get(key) + if err != nil { + glog.Errorf("Can't read uidConfigMap %v", uidConfigMapName) + } else if !found { + glog.Errorf("Can't read %v from uidConfigMap %v", key, uidConfigMapName) + continue + } + + switch key { + case storage.UidDataKey: + if uid := namer.GetClusterName(); uid != val { + glog.Infof("Cluster uid changed from %v -> %v", uid, val) + namer.SetClusterName(val) + } + case storage.ProviderDataKey: + if fw_name := namer.GetFirewallName(); fw_name != val { + glog.Infof("Cluster firewall name changed from %v -> %v", fw_name, val) + namer.SetFirewallName(val) + } + } } }, 5*time.Second) return namer, nil } +// getFlagOrLookupVault returns the name to use associated to a flag and configmap. +// The returned value follows this priority: +// If the provided 'name' is not empty, that name is used. +// This is effectively a client override via a command line flag. +// else, check configmap under 'configmap_name' as a key and if found, use the associated value +// else, return an empty 'name' and pass along an error iff the configmap lookup is erroneous. +func getFlagOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key string, name string) (string, error) { + if name != "" { + glog.Infof("Using user provided %v %v", cm_key, name) + // Don't save the uid in the vault, so users can rollback through + // setting the accompany flag to "" + return name, nil + } + val, found, err := cfgVault.Get(cm_key) + if found { + glog.Infof("Using %v = %q saved in ConfigMap", cm_key, val) + return val, nil + } else if err != nil { + // This can fail because of: + // 1. No such config map - found=false, err=nil + // 2. No such key in config map - found=false, err=nil + // 3. Apiserver flake - found=false, err!=nil + // It is not safe to proceed in 3. + return "", fmt.Errorf("Failed to retrieve %v: %v, using %q as name", cm_key, err, name) + } + // Not found but safe to proceed. + return "", nil +} + +// getFirewallName returns the firewall rule name to use for this cluster. For +// backwards compatibility, the firewall name will default to the cluster UID. +// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name. +// else, use the cluster UID as a backup (this retains backwards compatibility). +func getFirewallName(kubeClient client.Interface, name string, cluster_uid string) (string, error) { + cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) + if fw_name, err := getFlagOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil { + return "", err + } else if fw_name != "" { + return fw_name, cfgVault.Put(storage.ProviderDataKey, fw_name) + } else { + glog.Infof("Using cluster UID %v as firewall name", cluster_uid) + return cluster_uid, cfgVault.Put(storage.ProviderDataKey, cluster_uid) + } +} + // getClusterUID returns the cluster UID. Rules for UID generation: // If the user specifies a --cluster-uid param it overwrites everything // else, check UID config map for a previously recorded uid @@ -281,26 +344,12 @@ func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, er // else, allocate a new uid func getClusterUID(kubeClient client.Interface, name string) (string, error) { cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) - if name != "" { - glog.Infof("Using user provided cluster uid %v", name) - // Don't save the uid in the vault, so users can rollback through - // --cluster-uid="" + if name, err := getFlagOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil { + return "", err + } else if name != "" { return name, nil } - existingUID, found, err := cfgVault.Get() - if found { - glog.Infof("Using saved cluster uid %q", existingUID) - return existingUID, nil - } else if err != nil { - // This can fail because of: - // 1. No such config map - found=false, err=nil - // 2. No such key in config map - found=false, err=nil - // 3. Apiserver flake - found=false, err!=nil - // It is not safe to proceed in 3. - return "", fmt.Errorf("Failed to retrieve current uid: %v, using %q as name", err, name) - } - // Check if the cluster has an Ingress with ip ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { @@ -311,10 +360,10 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) { if len(ing.Status.LoadBalancer.Ingress) != 0 { c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule")) if c.ClusterName != "" { - return c.ClusterName, cfgVault.Put(c.ClusterName) + return c.ClusterName, cfgVault.Put(storage.UidDataKey, c.ClusterName) } glog.Infof("Found a working Ingress, assuming uid is empty string") - return "", cfgVault.Put("") + return "", cfgVault.Put(storage.UidDataKey, "") } } @@ -329,7 +378,7 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) { return "", err } uid := fmt.Sprintf("%x", b) - return uid, cfgVault.Put(uid) + return uid, cfgVault.Put(storage.UidDataKey, uid) } // getNodePort waits for the Service, and returns it's first node port. diff --git a/controllers/gce/storage/configmaps.go b/controllers/gce/storage/configmaps.go index cfed347fc..7b4f1a42f 100644 --- a/controllers/gce/storage/configmaps.go +++ b/controllers/gce/storage/configmaps.go @@ -19,6 +19,7 @@ package storage import ( "fmt" "strings" + "sync" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" @@ -27,73 +28,86 @@ import ( client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" ) -// UIDVault stores UIDs. -type UIDVault interface { - Get() (string, bool, error) - Put(string) error - Delete() error -} - -// uidDataKey is the key used in config maps to store the UID. -const uidDataKey = "uid" +const ( + // UidDataKey is the key used in config maps to store the UID. + UidDataKey = "uid" + // ProviderDataKey is the key used in config maps to store the Provider + // UID which we use to ensure unique firewalls. + ProviderDataKey = "providerUid" +) // ConfigMapVault stores cluster UIDs in config maps. // It's a layer on top of ConfigMapStore that just implements the utils.uidVault // interface. type ConfigMapVault struct { + storeLock sync.Mutex ConfigMapStore cache.Store namespace string name string } -// Get retrieves the cluster UID from the cluster config map. +// Get retrieves the value associated to the provided 'key' from the cluster config map. // If this method returns an error, it's guaranteed to be apiserver flake. // If the error is a not found error it sets the boolean to false and // returns and error of nil instead. -func (c *ConfigMapVault) Get() (string, bool, error) { - key := fmt.Sprintf("%v/%v", c.namespace, c.name) - item, found, err := c.ConfigMapStore.GetByKey(key) +func (c *ConfigMapVault) Get(key string) (string, bool, error) { + keyStore := fmt.Sprintf("%v/%v", c.namespace, c.name) + item, found, err := c.ConfigMapStore.GetByKey(keyStore) if err != nil || !found { return "", false, err } - cfg := item.(*api.ConfigMap) - if k, ok := cfg.Data[uidDataKey]; ok { + data := item.(*api.ConfigMap).Data + c.storeLock.Lock() + defer c.storeLock.Unlock() + if k, ok := data[key]; ok { return k, true, nil } - return "", false, fmt.Errorf("Found config map %v but it doesn't contain uid key: %+v", key, cfg.Data) + glog.Infof("Found config map %v but it doesn't contain key %v: %+v", keyStore, key, data) + return "", false, nil } -// Put stores the given UID in the cluster config map. -func (c *ConfigMapVault) Put(uid string) error { +// Put inserts a key/value pair in the cluster config map. +// If the key already exists, the value provided is stored. +func (c *ConfigMapVault) Put(key, val string) error { + c.storeLock.Lock() + defer c.storeLock.Unlock() apiObj := &api.ConfigMap{ ObjectMeta: api.ObjectMeta{ Name: c.name, Namespace: c.namespace, }, - Data: map[string]string{uidDataKey: uid}, } cfgMapKey := fmt.Sprintf("%v/%v", c.namespace, c.name) item, exists, err := c.ConfigMapStore.GetByKey(cfgMapKey) if err == nil && exists { data := item.(*api.ConfigMap).Data - if k, ok := data[uidDataKey]; ok && k == uid { + existingVal, ok := data[key] + if ok && existingVal == val { + // duplicate, no need to update. return nil - } else if ok { - glog.Infof("Configmap %v has key %v but wrong value %v, updating", cfgMapKey, k, uid) } - + data[key] = val + apiObj.Data = data + if existingVal != val { + glog.Infof("Configmap %v has key %v but wrong value %v, updating to %v", cfgMapKey, key, existingVal, val) + } else { + glog.Infof("Configmap %v will be updated with %v = %v", cfgMapKey, key, val) + } if err := c.ConfigMapStore.Update(apiObj); err != nil { return fmt.Errorf("Failed to update %v: %v", cfgMapKey, err) } - } else if err := c.ConfigMapStore.Add(apiObj); err != nil { - return fmt.Errorf("Failed to add %v: %v", cfgMapKey, err) + } else { + apiObj.Data = map[string]string{key: val} + if err := c.ConfigMapStore.Add(apiObj); err != nil { + return fmt.Errorf("Failed to add %v: %v", cfgMapKey, err) + } } - glog.Infof("Successfully stored uid %q in config map %v", uid, cfgMapKey) + glog.Infof("Successfully stored key %v = %v in config map %v", key, val, cfgMapKey) return nil } -// Delete deletes the cluster UID storing config map. +// Delete deletes the ConfigMapStore. func (c *ConfigMapVault) Delete() error { cfgMapKey := fmt.Sprintf("%v/%v", c.namespace, c.name) item, _, err := c.ConfigMapStore.GetByKey(cfgMapKey) @@ -108,13 +122,19 @@ func (c *ConfigMapVault) Delete() error { // This client is essentially meant to abstract out the details of // configmaps and the API, and just store/retrieve a single value, the cluster uid. func NewConfigMapVault(c client.Interface, uidNs, uidConfigMapName string) *ConfigMapVault { - return &ConfigMapVault{NewConfigMapStore(c), uidNs, uidConfigMapName} + return &ConfigMapVault{ + ConfigMapStore: NewConfigMapStore(c), + namespace: uidNs, + name: uidConfigMapName} } // NewFakeConfigMapVault is an implementation of the ConfigMapStore that doesn't // persist configmaps. Only used in testing. func NewFakeConfigMapVault(ns, name string) *ConfigMapVault { - return &ConfigMapVault{cache.NewStore(cache.MetaNamespaceKeyFunc), ns, name} + return &ConfigMapVault{ + ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), + namespace: ns, + name: name} } // ConfigMapStore wraps the store interface. Implementations usually persist diff --git a/controllers/gce/storage/configmaps_test.go b/controllers/gce/storage/configmaps_test.go index 3b8404b89..8d25d6671 100644 --- a/controllers/gce/storage/configmaps_test.go +++ b/controllers/gce/storage/configmaps_test.go @@ -24,31 +24,51 @@ import ( func TestConfigMapUID(t *testing.T) { vault := NewFakeConfigMapVault(api.NamespaceSystem, "ingress-uid") - uid := "" - k, exists, err := vault.Get() + // Get value from an empty vault. + val, exists, err := vault.Get(UidDataKey) if exists { - t.Errorf("Got a key from an empyt vault") + t.Errorf("Got value from an empty vault") } - vault.Put(uid) - k, exists, err = vault.Get() + + // Store empty value for UidDataKey. + uid := "" + vault.Put(UidDataKey, uid) + val, exists, err = vault.Get(UidDataKey) if !exists || err != nil { - t.Errorf("Failed to retrieve value from vault") + t.Errorf("Failed to retrieve value from vault: %v", err) } - if k != "" { + if val != "" { t.Errorf("Failed to store empty string as a key in the vault") } - vault.Put("newuid") - k, exists, err = vault.Get() + + // Store actual value in key. + storedVal := "newuid" + vault.Put(UidDataKey, storedVal) + val, exists, err = vault.Get(UidDataKey) if !exists || err != nil { t.Errorf("Failed to retrieve value from vault") + } else if val != storedVal { + t.Errorf("Failed to store empty string as a key in the vault") } - if k != "newuid" { - t.Errorf("Failed to modify uid") + + // Store second value which will have the affect of updating to Store + // rather than adding. + secondVal := "bar" + vault.Put("foo", secondVal) + val, exists, err = vault.Get("foo") + if !exists || err != nil || val != secondVal { + t.Errorf("Failed to retrieve second value from vault") } + val, exists, err = vault.Get(UidDataKey) + if !exists || err != nil || val != storedVal { + t.Errorf("Failed to retrieve first value from vault") + } + + // Delete value. if err := vault.Delete(); err != nil { t.Errorf("Failed to delete uid %v", err) } - if uid, exists, _ := vault.Get(); exists { - t.Errorf("Found uid %v, expected none", uid) + if _, exists, _ := vault.Get(UidDataKey); exists { + t.Errorf("Found uid but expected none after deletion") } } diff --git a/controllers/gce/utils/utils.go b/controllers/gce/utils/utils.go index 33525ffa3..63caccdb3 100644 --- a/controllers/gce/utils/utils.go +++ b/controllers/gce/utils/utils.go @@ -92,8 +92,9 @@ const ( // Namer handles centralized naming for the cluster. type Namer struct { - clusterName string - nameLock sync.Mutex + clusterName string + firewallName string + nameLock sync.Mutex } // NewNamer creates a new namer. @@ -103,6 +104,14 @@ func NewNamer(clusterName string) *Namer { return namer } +// NewNamer creates a new namer with a Firewall Name +func NewNamerWithFirewall(clusterName string, firewallName string) *Namer { + namer := &Namer{} + namer.SetClusterName(clusterName) + namer.SetFirewallName(firewallName) + return namer +} + // NameComponents is a struct representing the components of a a GCE resource // name constructed by the namer. The format of such a name is: // k8s-resource---uid @@ -123,6 +132,16 @@ func (n *Namer) SetClusterName(name string) { n.clusterName = name } +// SetFirewallName sets the firewall name of this cluster. +func (n *Namer) SetFirewallName(firewall_name string) { + n.nameLock.Lock() + defer n.nameLock.Unlock() + if n.firewallName != firewall_name { + glog.Infof("Changing firewall name from %v to %v", n.firewallName, firewall_name) + n.firewallName = firewall_name + } +} + // GetClusterName returns the UID/name of this cluster. func (n *Namer) GetClusterName() string { n.nameLock.Lock() @@ -130,6 +149,18 @@ func (n *Namer) GetClusterName() string { return n.clusterName } +// GetFirewallName returns the firewall name of this cluster. +func (n *Namer) GetFirewallName() string { + n.nameLock.Lock() + defer n.nameLock.Unlock() + // Retain backwards compatible behavior where firewallName == clusterName. + if n.firewallName == "" { + return n.clusterName + } else { + return n.firewallName + } +} + // Truncate truncates the given key to a GCE length limit. func (n *Namer) Truncate(key string) string { if len(key) > nameLenLimit { @@ -216,12 +247,12 @@ func (n *Namer) IGName() string { // FrSuffix constructs the glbc specific suffix for the FirewallRule. func (n *Namer) FrSuffix() string { - clusterName := n.GetClusterName() + firewallName := n.GetFirewallName() // The entire cluster only needs a single firewall rule. - if clusterName == "" { + if firewallName == "" { return globalFirewallSuffix } - return n.Truncate(fmt.Sprintf("%v%v%v", globalFirewallSuffix, clusterNameDelimiter, clusterName)) + return n.Truncate(fmt.Sprintf("%v%v%v", globalFirewallSuffix, clusterNameDelimiter, firewallName)) } // FrName constructs the full firewall rule name, this is the name assigned by