From 5d83fa45c26b67c97bab381beb942fda9ce01a3c Mon Sep 17 00:00:00 2001 From: Prashanth Balasubramanian Date: Tue, 24 May 2016 11:31:18 -0700 Subject: [PATCH] Pipe through custom GCE config file path --- controllers/gce/controller/cluster_manager.go | 21 ++++++++++++++++--- controllers/gce/main.go | 11 ++++++++-- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/controllers/gce/controller/cluster_manager.go b/controllers/gce/controller/cluster_manager.go index be3e899f5..860c8e20a 100644 --- a/controllers/gce/controller/cluster_manager.go +++ b/controllers/gce/controller/cluster_manager.go @@ -18,7 +18,9 @@ package controller import ( "fmt" + "io" "net/http" + "os" "time" "k8s.io/contrib/ingress/controllers/gce/backends" @@ -184,13 +186,13 @@ func defaultInstanceGroupName(clusterName string) string { return fmt.Sprintf("%v-%v", instanceGroupPrefix, clusterName) } -func getGCEClient() *gce.GCECloud { +func getGCEClient(config io.Reader) *gce.GCECloud { // Creating the cloud interface involves resolving the metadata server to get // an oauth token. If this fails, the token provider assumes it's not on GCE. // No errors are thrown. So we need to keep retrying till it works because // we know we're on GCE. for { - cloudInterface, err := cloudprovider.GetCloudProvider("gce", nil) + cloudInterface, err := cloudprovider.GetCloudProvider("gce", config) if err == nil { cloud := cloudInterface.(*gce.GCECloud) @@ -217,15 +219,28 @@ func getGCEClient() *gce.GCECloud { // the kubernetes Service that serves the 404 page if no urls match. // - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz" func NewClusterManager( + configFilePath string, name string, defaultBackendNodePort int64, defaultHealthCheckPath string) (*ClusterManager, error) { + var config *os.File + var err error + if configFilePath != "" { + glog.Infof("Reading config from path %v", configFilePath) + config, err = os.Open(configFilePath) + if err != nil { + return nil, err + } + defer config.Close() + } + // TODO: Make this more resilient. Currently we create the cloud client // and pass it through to all the pools. This makes unittesting easier. // However if the cloud client suddenly fails, we should try to re-create it // and continue. - cloud := getGCEClient() + cloud := getGCEClient(config) + glog.Infof("Successfully loaded cloudprovider using config %q", configFilePath) // Names are fundamental to the cluster, the uid allocator makes sure names don't collide. cluster := ClusterManager{ClusterNamer: &utils.Namer{name}} diff --git a/controllers/gce/main.go b/controllers/gce/main.go index 6095990e5..f4199147f 100644 --- a/controllers/gce/main.go +++ b/controllers/gce/main.go @@ -110,6 +110,13 @@ var ( verbose = flags.Bool("verbose", false, `If true, logs are displayed at V(4), otherwise V(2).`) + + configFilePath = flags.String("config-file-path", "", + `Path to a file containing the gce config. If left unspecified this + controller only works with default zones.`) + + healthzPort = flags.Int("healthz-port", lbApiPort, + `Port to run healthz server. Must match the health check port in yaml.`) ) func registerHandlers(lbc *controller.LoadBalancerController) { @@ -127,7 +134,7 @@ func registerHandlers(lbc *controller.LoadBalancerController) { lbc.Stop(true) }) - glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", lbApiPort), nil)) + glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", *healthzPort), nil)) } func handleSigterm(lbc *controller.LoadBalancerController, deleteAll bool) { @@ -196,7 +203,7 @@ func main() { if err != nil { glog.Fatalf("%v", err) } - clusterManager, err = controller.NewClusterManager(name, defaultBackendNodePort, *healthCheckPath) + clusterManager, err = controller.NewClusterManager(*configFilePath, name, defaultBackendNodePort, *healthCheckPath) if err != nil { glog.Fatalf("%v", err) }