Update GCE ingress controller
This commit is contained in:
parent
1bc383f9c5
commit
4d1887310b
21 changed files with 125 additions and 111 deletions
|
@ -26,10 +26,10 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// Backends implements BackendPool.
|
||||
|
|
|
@ -20,10 +20,10 @@ import (
|
|||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ package backends
|
|||
import (
|
||||
"fmt"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// NewFakeBackendServices creates a new fake backend services manager.
|
||||
|
|
|
@ -22,12 +22,12 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
|
|
|
@ -23,14 +23,15 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
@ -39,7 +40,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
|
||||
// DefaultClusterUID is the uid to use for clusters resources created by an
|
||||
// L7 controller created without specifying the --cluster-uid flag.
|
||||
|
@ -52,11 +53,11 @@ var (
|
|||
// LoadBalancerController watches the kubernetes api and adds/removes services
|
||||
// from the loadbalancer, via loadBalancerConfig.
|
||||
type LoadBalancerController struct {
|
||||
client *client.Client
|
||||
ingController *framework.Controller
|
||||
nodeController *framework.Controller
|
||||
svcController *framework.Controller
|
||||
podController *framework.Controller
|
||||
client client.Interface
|
||||
ingController *cache.Controller
|
||||
nodeController *cache.Controller
|
||||
svcController *cache.Controller
|
||||
podController *cache.Controller
|
||||
ingLister StoreToIngressLister
|
||||
nodeLister cache.StoreToNodeLister
|
||||
svcLister cache.StoreToServiceLister
|
||||
|
@ -86,11 +87,12 @@ type LoadBalancerController struct {
|
|||
// - clusterManager: A ClusterManager capable of creating all cloud resources
|
||||
// required for L7 loadbalancing.
|
||||
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
|
||||
func NewLoadBalancerController(kubeClient *client.Client, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||
func NewLoadBalancerController(kubeClient client.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
|
||||
eventBroadcaster.StartRecordingToSink(unversionedcore.EventSinkImpl{
|
||||
Interface: kubeClient.Core().Events(""),
|
||||
})
|
||||
lbc := LoadBalancerController{
|
||||
client: kubeClient,
|
||||
CloudClusterManager: clusterManager,
|
||||
|
@ -103,7 +105,7 @@ func NewLoadBalancerController(kubeClient *client.Client, clusterManager *Cluste
|
|||
lbc.hasSynced = lbc.storesSynced
|
||||
|
||||
// Ingress watch handlers
|
||||
pathHandlers := framework.ResourceEventHandlerFuncs{
|
||||
pathHandlers := cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
addIng := obj.(*extensions.Ingress)
|
||||
if !isGCEIngress(addIng) {
|
||||
|
@ -133,7 +135,7 @@ func NewLoadBalancerController(kubeClient *client.Client, clusterManager *Cluste
|
|||
lbc.ingQueue.enqueue(cur)
|
||||
},
|
||||
}
|
||||
lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
|
||||
lbc.ingLister.Store, lbc.ingController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: ingressListFunc(lbc.client, namespace),
|
||||
WatchFunc: ingressWatchFunc(lbc.client, namespace),
|
||||
|
@ -141,7 +143,7 @@ func NewLoadBalancerController(kubeClient *client.Client, clusterManager *Cluste
|
|||
&extensions.Ingress{}, resyncPeriod, pathHandlers)
|
||||
|
||||
// Service watch handlers
|
||||
svcHandlers := framework.ResourceEventHandlerFuncs{
|
||||
svcHandlers := cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.enqueueIngressForService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
|
@ -151,36 +153,39 @@ func NewLoadBalancerController(kubeClient *client.Client, clusterManager *Cluste
|
|||
// Ingress deletes matter, service deletes don't.
|
||||
}
|
||||
|
||||
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
|
||||
cache.NewListWatchFromClient(
|
||||
lbc.client, "services", namespace, fields.Everything()),
|
||||
&api.Service{}, resyncPeriod, svcHandlers)
|
||||
|
||||
lbc.podLister.Indexer, lbc.podController = framework.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client, "pods", namespace, fields.Everything()),
|
||||
&api.Pod{},
|
||||
lbc.svcLister.Indexer, lbc.svcController = cache.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "services", namespace, fields.Everything()),
|
||||
&api.Service{},
|
||||
resyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{},
|
||||
svcHandlers,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
nodeHandlers := framework.ResourceEventHandlerFuncs{
|
||||
lbc.podLister.Indexer, lbc.podController = cache.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "pods", namespace, fields.Everything()),
|
||||
&api.Pod{},
|
||||
resyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
nodeHandlers := cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.nodeQueue.enqueue,
|
||||
DeleteFunc: lbc.nodeQueue.enqueue,
|
||||
// Nodes are updated every 10s and we don't care, so no update handler.
|
||||
}
|
||||
// Node watch handlers
|
||||
lbc.nodeLister.Store, lbc.nodeController = framework.NewInformer(
|
||||
lbc.nodeLister.Store, lbc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return lbc.client.Get().
|
||||
return lbc.client.Core().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
Do().
|
||||
Get()
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return lbc.client.Get().
|
||||
return lbc.client.Core().RESTClient().Get().
|
||||
Prefix("watch").
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
|
@ -196,15 +201,15 @@ func NewLoadBalancerController(kubeClient *client.Client, clusterManager *Cluste
|
|||
return &lbc, nil
|
||||
}
|
||||
|
||||
func ingressListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
|
||||
func ingressListFunc(c client.Interface, ns string) func(api.ListOptions) (runtime.Object, error) {
|
||||
return func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return c.Extensions().Ingress(ns).List(opts)
|
||||
return c.Extensions().Ingresses(ns).List(opts)
|
||||
}
|
||||
}
|
||||
|
||||
func ingressWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) {
|
||||
func ingressWatchFunc(c client.Interface, ns string) func(options api.ListOptions) (watch.Interface, error) {
|
||||
return func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Extensions().Ingress(ns).Watch(options)
|
||||
return c.Extensions().Ingresses(ns).Watch(options)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -364,7 +369,7 @@ func (lbc *LoadBalancerController) sync(key string) (err error) {
|
|||
// updateIngressStatus updates the IP and annotations of a loadbalancer.
|
||||
// The annotations are parsed by kubectl describe.
|
||||
func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing extensions.Ingress) error {
|
||||
ingClient := lbc.client.Extensions().Ingress(ing.Namespace)
|
||||
ingClient := lbc.client.Extensions().Ingresses(ing.Namespace)
|
||||
|
||||
// Update IP through update/status endpoint
|
||||
ip := l7.GetIP()
|
||||
|
|
|
@ -23,14 +23,16 @@ import (
|
|||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
|
@ -50,7 +52,7 @@ func defaultBackendName(clusterName string) string {
|
|||
|
||||
// newLoadBalancerController create a loadbalancer controller.
|
||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager, masterUrl string) *LoadBalancerController {
|
||||
client := client.NewOrDie(&restclient.Config{Host: masterUrl, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
client := client.NewForConfigOrDie(&restclient.Config{Host: masterUrl, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
lb, err := NewLoadBalancerController(client, cm.ClusterManager, 1*time.Second, api.NamespaceAll)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
|
@ -191,7 +193,7 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
|
|||
}
|
||||
svcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))
|
||||
svc.Spec.Ports = []api.ServicePort{svcPort}
|
||||
lbc.svcLister.Store.Add(svc)
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,12 +20,12 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -19,10 +19,11 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
@ -43,7 +44,7 @@ func (n *noOPValidator) validate(certs *loadbalancers.TLSCerts) error {
|
|||
// apiServerTLSLoader loads TLS certs from the apiserver.
|
||||
type apiServerTLSLoader struct {
|
||||
noOPValidator
|
||||
client *client.Client
|
||||
client client.Interface
|
||||
}
|
||||
|
||||
func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
||||
|
@ -58,7 +59,7 @@ func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCe
|
|||
secretName := ing.Spec.TLS[0].SecretName
|
||||
// TODO: Replace this for a secret watcher.
|
||||
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
||||
secret, err := t.client.Secrets(ing.Namespace).Get(secretName)
|
||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -179,7 +179,7 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
}
|
||||
svc.Name = fmt.Sprintf("%d", np)
|
||||
svc.Namespace = ns
|
||||
lbc.svcLister.Store.Add(svc)
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
|
@ -291,7 +291,7 @@ func (t *GCETranslator) toGCEBackend(be *extensions.IngressBackend, ns string) (
|
|||
// getServiceNodePort looks in the svc store for a matching service:port,
|
||||
// and returns the nodeport.
|
||||
func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (int, error) {
|
||||
obj, exists, err := t.svcLister.Store.Get(
|
||||
obj, exists, err := t.svcLister.Indexer.Get(
|
||||
&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: be.ServiceName,
|
||||
|
@ -457,15 +457,15 @@ func isSimpleHTTPProbe(probe *api.Probe) bool {
|
|||
// the request path, callers are responsible for swapping this out for the
|
||||
// appropriate default.
|
||||
func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error) {
|
||||
sl, err := t.svcLister.List()
|
||||
sl, err := t.svcLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Find the label and target port of the one service with the given nodePort
|
||||
for _, s := range sl.Items {
|
||||
for _, s := range sl {
|
||||
for _, p := range s.Spec.Ports {
|
||||
if int32(port) == p.NodePort {
|
||||
rp, err := t.getHTTPProbe(s, p.TargetPort)
|
||||
rp, err := t.getHTTPProbe(*s, p.TargetPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
)
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"strconv"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// NewFakeHealthChecks returns a new FakeHealthChecks.
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
|
|
|
@ -26,9 +26,9 @@ import (
|
|||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
|
|
@ -21,10 +21,10 @@ import (
|
|||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
|
|
|
@ -27,12 +27,13 @@ import (
|
|||
"time"
|
||||
|
||||
flag "github.com/spf13/pflag"
|
||||
"k8s.io/contrib/ingress/controllers/gce/controller"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/ingress/controllers/gce/controller"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
@ -159,7 +160,6 @@ func handleSigterm(lbc *controller.LoadBalancerController, deleteAll bool) {
|
|||
// main function for GLBC.
|
||||
func main() {
|
||||
// TODO: Add a healthz endpoint
|
||||
var kubeClient *client.Client
|
||||
var err error
|
||||
var clusterManager *controller.ClusterManager
|
||||
|
||||
|
@ -181,18 +181,24 @@ func main() {
|
|||
glog.Fatalf("Please specify --default-backend")
|
||||
}
|
||||
|
||||
var config *restclient.Config
|
||||
// Create kubeclient
|
||||
if *inCluster {
|
||||
if kubeClient, err = client.NewInCluster(); err != nil {
|
||||
glog.Fatalf("Failed to create client: %v.", err)
|
||||
if config, err = restclient.InClusterConfig(); err != nil {
|
||||
glog.Fatalf("error creating client configuration: %v", err)
|
||||
}
|
||||
} else {
|
||||
config, err := clientConfig.ClientConfig()
|
||||
config, err = clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
glog.Fatalf("error connecting to the client: %v", err)
|
||||
glog.Fatalf("error creating client configuration: %v", err)
|
||||
}
|
||||
kubeClient, err = client.New(config)
|
||||
}
|
||||
|
||||
kubeClient, err := client.NewForConfig(config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create client: %v.", err)
|
||||
}
|
||||
|
||||
// Wait for the default backend Service. There's no pretty way to do this.
|
||||
parts := strings.Split(*defaultSvc, "/")
|
||||
if len(parts) != 2 {
|
||||
|
@ -239,7 +245,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
func newNamer(kubeClient *client.Client, clusterName string) (*utils.Namer, error) {
|
||||
func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, error) {
|
||||
name, err := getClusterUID(kubeClient, clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -271,7 +277,7 @@ func newNamer(kubeClient *client.Client, clusterName string) (*utils.Namer, erro
|
|||
// else, check if there are any working Ingresses
|
||||
// - remember that "" is the cluster uid
|
||||
// else, allocate a new uid
|
||||
func getClusterUID(kubeClient *client.Client, name string) (string, error) {
|
||||
func getClusterUID(kubeClient client.Interface, name string) (string, error) {
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||
if name != "" {
|
||||
glog.Infof("Using user provided cluster uid %v", name)
|
||||
|
@ -294,7 +300,7 @@ func getClusterUID(kubeClient *client.Client, name string) (string, error) {
|
|||
}
|
||||
|
||||
// Check if the cluster has an Ingress with ip
|
||||
ings, err := kubeClient.Extensions().Ingress(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()})
|
||||
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -325,11 +331,11 @@ func getClusterUID(kubeClient *client.Client, name string) (string, error) {
|
|||
}
|
||||
|
||||
// getNodePort waits for the Service, and returns it's first node port.
|
||||
func getNodePort(client *client.Client, ns, name string) (nodePort int64, err error) {
|
||||
func getNodePort(client client.Interface, ns, name string) (nodePort int64, err error) {
|
||||
var svc *api.Service
|
||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
svc, err = client.Services(ns).Get(name)
|
||||
svc, err = client.Core().Services(ns).Get(name)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
)
|
||||
|
||||
// UIDVault stores UIDs.
|
||||
|
@ -107,7 +107,7 @@ func (c *ConfigMapVault) Delete() error {
|
|||
// NewConfigMapVault creates a config map client.
|
||||
// This client is essentially meant to abstract out the details of
|
||||
// configmaps and the API, and just store/retrieve a single value, the cluster uid.
|
||||
func NewConfigMapVault(c *client.Client, uidNs, uidConfigMapName string) *ConfigMapVault {
|
||||
func NewConfigMapVault(c client.Interface, uidNs, uidConfigMapName string) *ConfigMapVault {
|
||||
return &ConfigMapVault{NewConfigMapStore(c), uidNs, uidConfigMapName}
|
||||
}
|
||||
|
||||
|
@ -128,27 +128,27 @@ type ConfigMapStore interface {
|
|||
// through cache.
|
||||
type ApiServerConfigMapStore struct {
|
||||
ConfigMapStore
|
||||
client *client.Client
|
||||
client client.Interface
|
||||
}
|
||||
|
||||
// Add adds the given config map to the apiserver's store.
|
||||
func (a *ApiServerConfigMapStore) Add(obj interface{}) error {
|
||||
cfg := obj.(*api.ConfigMap)
|
||||
_, err := a.client.ConfigMaps(cfg.Namespace).Create(cfg)
|
||||
_, err := a.client.Core().ConfigMaps(cfg.Namespace).Create(cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update updates the existing config map object.
|
||||
func (a *ApiServerConfigMapStore) Update(obj interface{}) error {
|
||||
cfg := obj.(*api.ConfigMap)
|
||||
_, err := a.client.ConfigMaps(cfg.Namespace).Update(cfg)
|
||||
_, err := a.client.Core().ConfigMaps(cfg.Namespace).Update(cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete deletes the existing config map object.
|
||||
func (a *ApiServerConfigMapStore) Delete(obj interface{}) error {
|
||||
cfg := obj.(*api.ConfigMap)
|
||||
return a.client.ConfigMaps(cfg.Namespace).Delete(cfg.Name)
|
||||
return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &api.DeleteOptions{})
|
||||
}
|
||||
|
||||
// GetByKey returns the config map for a given key.
|
||||
|
@ -159,7 +159,7 @@ func (a *ApiServerConfigMapStore) GetByKey(key string) (item interface{}, exists
|
|||
return nil, false, fmt.Errorf("Failed to get key %v, unexpecte format, expecting ns/name", key)
|
||||
}
|
||||
ns, name := nsName[0], nsName[1]
|
||||
cfg, err := a.client.ConfigMaps(ns).Get(name)
|
||||
cfg, err := a.client.Core().ConfigMaps(ns).Get(name)
|
||||
if err != nil {
|
||||
// Translate not found errors to found=false, err=nil
|
||||
if errors.IsNotFound(err) {
|
||||
|
@ -172,6 +172,6 @@ func (a *ApiServerConfigMapStore) GetByKey(key string) (item interface{}, exists
|
|||
|
||||
// NewConfigMapStore returns a config map store capable of persisting updates
|
||||
// to apiserver.
|
||||
func NewConfigMapStore(c *client.Client) ConfigMapStore {
|
||||
func NewConfigMapStore(c client.Interface) ConfigMapStore {
|
||||
return &ApiServerConfigMapStore{ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), client: c}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue