Support migrating single cluster among others

This commit is contained in:
Nick Sardo 2017-04-18 16:24:55 -07:00
parent 8dc19e0066
commit 6a596f69e0
2 changed files with 27 additions and 42 deletions

View file

@ -31,19 +31,21 @@ There are two GCP requirements that complicate changing the backend service bala
#### How to run #### How to run
```shell ```shell
go run main.go {project-id} {region} {target-balance-mode} go run main.go {project-id} {cluster-id} {region} {target-balance-mode}
#Examples #Examples
# Fetch cluster id
CLUSTERID=`kubectl get configmaps ingress-uid -o jsonpath='{.data.uid}' --namespace=kube-system`
# for upgrading # for upgrading
go run main.go my-project us-central1 RATE go run main.go my-project $CLUSTERID us-central1 RATE
# for reversing # for reversing
go run main.go my-project us-central1 UTILIZATION go run main.go my-project $CLUSTERID us-central1 UTILIZATION
``` ```
**Example Run** **Example Run**
```shell ```shell
➜ go run mode-updater.go nicksardo-project us-central1 RATE ➜ go run mode-updater.go nicksardo-project c4424dd5f02d3cad us-central1 RATE
Backend-Service BalancingMode Updater 0.1 Backend-Service BalancingMode Updater 0.1
Backend Services: Backend Services:
@ -102,7 +104,7 @@ Step 8: Delete temporary instance groups
#### TODO #### TODO
- [x] If only one backend-service exists, just update it in place. - [x] If only one backend-service exists, just update it in place.
- [x] If all backend-services are already the target balancing mode, early return. - [x] If all backend-services are already the target balancing mode, early return.
- [ ] Wait for op completion instead of sleeping - [x] Wait for op completion instead of sleeping
- [ ] Adjust warning - [ ] Adjust warning
#### Warning #### Warning

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"errors"
"flag" "flag"
"fmt" "fmt"
"log" "log"
@ -20,6 +19,7 @@ import (
var ( var (
projectID string projectID string
clusterID string
regionName string regionName string
targetBalancingMode string targetBalancingMode string
@ -47,10 +47,10 @@ func main() {
flag.Parse() flag.Parse()
args := flag.Args() args := flag.Args()
if len(args) != 3 { if len(args) != 4 {
log.Fatalf("Expected three arguments: project_id region balancing_mode") log.Fatalf("Expected four arguments: project_id cluster_id region balancing_mode")
} }
projectID, regionName, targetBalancingMode = args[0], args[1], args[2] projectID, clusterID, regionName, targetBalancingMode = args[0], args[1], args[2], args[3]
switch targetBalancingMode { switch targetBalancingMode {
case balancingModeRATE, balancingModeUTIL: case balancingModeRATE, balancingModeUTIL:
@ -82,6 +82,12 @@ func main() {
} }
zones = zoneList.Items zones = zoneList.Items
if len(zones) == 0 {
panic(fmt.Errorf("Expected at least one zone in region: %v", regionName))
}
instanceGroupName = fmt.Sprintf("k8s-ig--%s", clusterID)
// Get instance groups // Get instance groups
for _, z := range zones { for _, z := range zones {
igl, err := s.InstanceGroups.List(projectID, z.Name).Do() igl, err := s.InstanceGroups.List(projectID, z.Name).Do()
@ -89,14 +95,10 @@ func main() {
panic(err) panic(err)
} }
for _, ig := range igl.Items { for _, ig := range igl.Items {
if !strings.HasPrefix(ig.Name, "k8s-ig--") { if instanceGroupName != ig.Name {
continue continue
} }
if instanceGroupName == "" {
instanceGroupName = ig.Name
}
// Note instances // Note instances
r := &compute.InstanceGroupsListInstancesRequest{InstanceState: "ALL"} r := &compute.InstanceGroupsListInstancesRequest{InstanceState: "ALL"}
instList, err := s.InstanceGroups.ListInstances(projectID, getResourceName(ig.Zone, "zones"), ig.Name, r).Do() instList, err := s.InstanceGroups.ListInstances(projectID, getResourceName(ig.Zone, "zones"), ig.Name, r).Do()
@ -118,8 +120,12 @@ func main() {
} }
} }
if instanceGroupName == "" { if len(igs) == 0 {
panic(errors.New("Could not determine k8s load balancer instance group")) panic(fmt.Errorf("Expected at least one instance group named: %v", instanceGroupName))
}
if len(instances) == 0 {
panic(fmt.Errorf("Expected at least one instance within instance group: %v", instanceGroupName))
} }
bs := getBackendServices() bs := getBackendServices()
@ -135,7 +141,7 @@ func main() {
// Early return for special cases // Early return for special cases
switch len(bs) { switch len(bs) {
case 0: case 0:
fmt.Println("There are 0 backend services - no action necessary") fmt.Println("\nThere are 0 backend services - no action necessary")
return return
case 1: case 1:
updateSingleBackend(bs[0]) updateSingleBackend(bs[0])
@ -144,14 +150,7 @@ func main() {
// Check there's work to be done // Check there's work to be done
if typeOfBackends(bs) == targetBalancingMode { if typeOfBackends(bs) == targetBalancingMode {
fmt.Println("Backends are already set to target mode") fmt.Println("\nBackends are already set to target mode")
return
}
// Check no orphan instance groups will throw us off
clusters := getIGClusterIds()
if len(clusters) != 1 {
fmt.Println("Expecting only cluster of instance groups in GCE, found", clusters)
return return
} }
@ -273,7 +272,7 @@ func getBackendServices() (bs []*compute.BackendService) {
for _, bsli := range bsl.Items { for _, bsli := range bsl.Items {
// Ignore regional backend-services and only grab Kubernetes resources // Ignore regional backend-services and only grab Kubernetes resources
if bsli.Region == "" && strings.HasPrefix(bsli.Name, "k8s-be-") { if bsli.Region == "" && strings.HasPrefix(bsli.Name, "k8s-be-") && strings.HasSuffix(bsli.Name, clusterID) {
bs = append(bs, bsli) bs = append(bs, bsli)
} }
} }
@ -363,22 +362,6 @@ func updateSingleBackend(bs *compute.BackendService) {
fmt.Println("Updated single backend service to target balancing mode.") fmt.Println("Updated single backend service to target balancing mode.")
} }
func getIGClusterIds() []string {
clusterIds := make(map[string]struct{})
for _, ig := range igs {
s := strings.Split(ig.Name, "--")
if len(s) > 2 {
panic(fmt.Errorf("Expected two parts to instance group name, got %v", s))
}
clusterIds[s[1]] = struct{}{}
}
var ids []string
for v, _ := range clusterIds {
ids = append(ids, v)
}
return ids
}
// Below operations are copied from the GCE CloudProvider and modified to be static // Below operations are copied from the GCE CloudProvider and modified to be static
func waitForOp(op *compute.Operation, getOperation func(operationName string) (*compute.Operation, error)) error { func waitForOp(op *compute.Operation, getOperation func(operationName string) (*compute.Operation, error)) error {