Update gce controller
This commit is contained in:
parent
ea7f943160
commit
c7c2a564a9
26 changed files with 275 additions and 236 deletions
|
@ -23,10 +23,11 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||||
"k8s.io/ingress/controllers/gce/instances"
|
"k8s.io/ingress/controllers/gce/instances"
|
||||||
"k8s.io/ingress/controllers/gce/storage"
|
"k8s.io/ingress/controllers/gce/storage"
|
||||||
|
|
|
@ -21,13 +21,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||||
"k8s.io/ingress/controllers/gce/instances"
|
"k8s.io/ingress/controllers/gce/instances"
|
||||||
"k8s.io/ingress/controllers/gce/storage"
|
"k8s.io/ingress/controllers/gce/storage"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultZone = "zone-a"
|
const defaultZone = "zone-a"
|
||||||
|
|
|
@ -20,8 +20,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewFakeBackendServices creates a new fake backend services manager.
|
// NewFakeBackendServices creates a new fake backend services manager.
|
||||||
|
|
|
@ -22,16 +22,17 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
|
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/backends"
|
"k8s.io/ingress/controllers/gce/backends"
|
||||||
"k8s.io/ingress/controllers/gce/firewalls"
|
"k8s.io/ingress/controllers/gce/firewalls"
|
||||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||||
"k8s.io/ingress/controllers/gce/instances"
|
"k8s.io/ingress/controllers/gce/instances"
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
|
||||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -23,20 +23,21 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
listers "k8s.io/client-go/listers/core/v1"
|
||||||
|
base_api "k8s.io/client-go/pkg/api"
|
||||||
|
api "k8s.io/client-go/pkg/api/v1"
|
||||||
|
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
|
||||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -57,16 +58,16 @@ var (
|
||||||
// LoadBalancerController watches the kubernetes api and adds/removes services
|
// LoadBalancerController watches the kubernetes api and adds/removes services
|
||||||
// from the loadbalancer, via loadBalancerConfig.
|
// from the loadbalancer, via loadBalancerConfig.
|
||||||
type LoadBalancerController struct {
|
type LoadBalancerController struct {
|
||||||
client client.Interface
|
client kubernetes.Interface
|
||||||
ingController *cache.Controller
|
ingController cache.Controller
|
||||||
nodeController *cache.Controller
|
nodeController cache.Controller
|
||||||
svcController *cache.Controller
|
svcController cache.Controller
|
||||||
podController *cache.Controller
|
podController cache.Controller
|
||||||
ingLister StoreToIngressLister
|
ingLister StoreToIngressLister
|
||||||
nodeLister cache.StoreToNodeLister
|
nodeLister StoreToNodeLister
|
||||||
svcLister cache.StoreToServiceLister
|
svcLister StoreToServiceLister
|
||||||
// Health checks are the readiness probes of containers on pods.
|
// Health checks are the readiness probes of containers on pods.
|
||||||
podLister cache.StoreToPodLister
|
podLister StoreToPodLister
|
||||||
// TODO: Watch secrets
|
// TODO: Watch secrets
|
||||||
CloudClusterManager *ClusterManager
|
CloudClusterManager *ClusterManager
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
|
@ -91,7 +92,7 @@ type LoadBalancerController struct {
|
||||||
// - clusterManager: A ClusterManager capable of creating all cloud resources
|
// - clusterManager: A ClusterManager capable of creating all cloud resources
|
||||||
// required for L7 loadbalancing.
|
// required for L7 loadbalancing.
|
||||||
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
|
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
|
||||||
func NewLoadBalancerController(kubeClient client.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
|
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
|
||||||
|
@ -101,7 +102,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
||||||
client: kubeClient,
|
client: kubeClient,
|
||||||
CloudClusterManager: clusterManager,
|
CloudClusterManager: clusterManager,
|
||||||
stopCh: make(chan struct{}),
|
stopCh: make(chan struct{}),
|
||||||
recorder: eventBroadcaster.NewRecorder(
|
recorder: eventBroadcaster.NewRecorder(base_api.Scheme,
|
||||||
api.EventSource{Component: "loadbalancer-controller"}),
|
api.EventSource{Component: "loadbalancer-controller"}),
|
||||||
}
|
}
|
||||||
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
|
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
|
||||||
|
@ -140,10 +141,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
lbc.ingLister.Store, lbc.ingController = cache.NewInformer(
|
lbc.ingLister.Store, lbc.ingController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
cache.NewListWatchFromClient(lbc.client.Extensions().RESTClient(), "ingresses", namespace, fields.Everything()),
|
||||||
ListFunc: ingressListFunc(lbc.client, namespace),
|
|
||||||
WatchFunc: ingressWatchFunc(lbc.client, namespace),
|
|
||||||
},
|
|
||||||
&extensions.Ingress{}, resyncPeriod, pathHandlers)
|
&extensions.Ingress{}, resyncPeriod, pathHandlers)
|
||||||
|
|
||||||
// Service watch handlers
|
// Service watch handlers
|
||||||
|
@ -173,30 +171,14 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
)
|
)
|
||||||
|
|
||||||
nodeHandlers := cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: lbc.nodeQueue.enqueue,
|
|
||||||
DeleteFunc: lbc.nodeQueue.enqueue,
|
|
||||||
// Nodes are updated every 10s and we don't care, so no update handler.
|
|
||||||
}
|
|
||||||
// Node watch handlers
|
// Node watch handlers
|
||||||
lbc.nodeLister.Store, lbc.nodeController = cache.NewInformer(
|
lbc.nodeLister.Indexer, lbc.nodeController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()),
|
||||||
ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
|
&api.Node{},
|
||||||
return lbc.client.Core().RESTClient().Get().
|
resyncPeriod,
|
||||||
Resource("nodes").
|
cache.ResourceEventHandlerFuncs{},
|
||||||
FieldsSelectorParam(fields.Everything()).
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
Do().
|
)
|
||||||
Get()
|
|
||||||
},
|
|
||||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
|
||||||
return lbc.client.Core().RESTClient().Get().
|
|
||||||
Prefix("watch").
|
|
||||||
Resource("nodes").
|
|
||||||
FieldsSelectorParam(fields.Everything()).
|
|
||||||
Param("resourceVersion", options.ResourceVersion).Watch()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&api.Node{}, 0, nodeHandlers)
|
|
||||||
|
|
||||||
lbc.tr = &GCETranslator{&lbc}
|
lbc.tr = &GCETranslator{&lbc}
|
||||||
lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client}
|
lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client}
|
||||||
|
@ -205,18 +187,6 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
||||||
return &lbc, nil
|
return &lbc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ingressListFunc(c client.Interface, ns string) func(api.ListOptions) (runtime.Object, error) {
|
|
||||||
return func(opts api.ListOptions) (runtime.Object, error) {
|
|
||||||
return c.Extensions().Ingresses(ns).List(opts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ingressWatchFunc(c client.Interface, ns string) func(options api.ListOptions) (watch.Interface, error) {
|
|
||||||
return func(options api.ListOptions) (watch.Interface, error) {
|
|
||||||
return c.Extensions().Ingresses(ns).Watch(options)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// enqueueIngressForService enqueues all the Ingress' for a Service.
|
// enqueueIngressForService enqueues all the Ingress' for a Service.
|
||||||
func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) {
|
func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) {
|
||||||
svc := obj.(*api.Service)
|
svc := obj.(*api.Service)
|
||||||
|
@ -377,7 +347,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing
|
||||||
|
|
||||||
// Update IP through update/status endpoint
|
// Update IP through update/status endpoint
|
||||||
ip := l7.GetIP()
|
ip := l7.GetIP()
|
||||||
currIng, err := ingClient.Get(ing.Name)
|
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -401,7 +371,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update annotations through /update endpoint
|
// Update annotations through /update endpoint
|
||||||
currIng, err = ingClient.Get(ing.Name)
|
currIng, err = ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -464,7 +434,7 @@ func (lbc *LoadBalancerController) syncNodes(key string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeReadyPredicate() cache.NodeConditionPredicate {
|
func getNodeReadyPredicate() listers.NodeConditionPredicate {
|
||||||
return func(node *api.Node) bool {
|
return func(node *api.Node) bool {
|
||||||
for ix := range node.Status.Conditions {
|
for ix := range node.Status.Conditions {
|
||||||
condition := &node.Status.Conditions[ix]
|
condition := &node.Status.Conditions[ix]
|
||||||
|
@ -479,7 +449,7 @@ func getNodeReadyPredicate() cache.NodeConditionPredicate {
|
||||||
// getReadyNodeNames returns names of schedulable, ready nodes from the node lister.
|
// getReadyNodeNames returns names of schedulable, ready nodes from the node lister.
|
||||||
func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) {
|
func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) {
|
||||||
nodeNames := []string{}
|
nodeNames := []string{}
|
||||||
nodes, err := lbc.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
nodes, err := listers.NewNodeLister(lbc.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nodeNames, err
|
return nodeNames, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,18 +24,18 @@ import (
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
"k8s.io/client-go/pkg/api"
|
||||||
|
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/firewalls"
|
"k8s.io/ingress/controllers/gce/firewalls"
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
|
||||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
"k8s.io/kubernetes/pkg/util/uuid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const testClusterName = "testcluster"
|
const testClusterName = "testcluster"
|
||||||
|
@ -51,9 +51,9 @@ func defaultBackendName(clusterName string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLoadBalancerController create a loadbalancer controller.
|
// newLoadBalancerController create a loadbalancer controller.
|
||||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager, masterURL string) *LoadBalancerController {
|
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalancerController {
|
||||||
client := client.NewForConfigOrDie(&restclient.Config{Host: masterURL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
kubeClient := fake.NewSimpleClientset()
|
||||||
lb, err := NewLoadBalancerController(client, cm.ClusterManager, 1*time.Second, api.NamespaceAll)
|
lb, err := NewLoadBalancerController(kubeClient, cm.ClusterManager, 1*time.Second, api_v1.NamespaceAll)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ func toIngressRules(hostRules map[string]utils.FakeIngressRuleValueMap) []extens
|
||||||
// newIngress returns a new Ingress with the given path map.
|
// newIngress returns a new Ingress with the given path map.
|
||||||
func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress {
|
func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress {
|
||||||
return &extensions.Ingress{
|
return &extensions.Ingress{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: fmt.Sprintf("%v", uuid.NewUUID()),
|
Name: fmt.Sprintf("%v", uuid.NewUUID()),
|
||||||
Namespace: api.NamespaceNone,
|
Namespace: api.NamespaceNone,
|
||||||
},
|
},
|
||||||
|
@ -107,8 +107,8 @@ func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.
|
||||||
Rules: toIngressRules(hostRules),
|
Rules: toIngressRules(hostRules),
|
||||||
},
|
},
|
||||||
Status: extensions.IngressStatus{
|
Status: extensions.IngressStatus{
|
||||||
LoadBalancer: api.LoadBalancerStatus{
|
LoadBalancer: api_v1.LoadBalancerStatus{
|
||||||
Ingress: []api.LoadBalancerIngress{
|
Ingress: []api_v1.LoadBalancerIngress{
|
||||||
{IP: testIPManager.ip()},
|
{IP: testIPManager.ip()},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -178,21 +178,21 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
|
||||||
}
|
}
|
||||||
for _, rule := range ing.Spec.Rules {
|
for _, rule := range ing.Spec.Rules {
|
||||||
for _, path := range rule.HTTP.Paths {
|
for _, path := range rule.HTTP.Paths {
|
||||||
svc := &api.Service{
|
svc := &api_v1.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: path.Backend.ServiceName,
|
Name: path.Backend.ServiceName,
|
||||||
Namespace: ing.Namespace,
|
Namespace: ing.Namespace,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var svcPort api.ServicePort
|
var svcPort api_v1.ServicePort
|
||||||
switch path.Backend.ServicePort.Type {
|
switch path.Backend.ServicePort.Type {
|
||||||
case intstr.Int:
|
case intstr.Int:
|
||||||
svcPort = api.ServicePort{Port: path.Backend.ServicePort.IntVal}
|
svcPort = api_v1.ServicePort{Port: path.Backend.ServicePort.IntVal}
|
||||||
default:
|
default:
|
||||||
svcPort = api.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
svcPort = api_v1.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
||||||
}
|
}
|
||||||
svcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))
|
svcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))
|
||||||
svc.Spec.Ports = []api.ServicePort{svcPort}
|
svc.Spec.Ports = []api_v1.ServicePort{svcPort}
|
||||||
lbc.svcLister.Indexer.Add(svc)
|
lbc.svcLister.Indexer.Add(svc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -201,7 +201,7 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
|
||||||
func TestLbCreateDelete(t *testing.T) {
|
func TestLbCreateDelete(t *testing.T) {
|
||||||
testFirewallName := "quux"
|
testFirewallName := "quux"
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
inputMap1 := map[string]utils.FakeIngressRuleValueMap{
|
inputMap1 := map[string]utils.FakeIngressRuleValueMap{
|
||||||
"foo.example.com": {
|
"foo.example.com": {
|
||||||
"/foo1": "foo1svc",
|
"/foo1": "foo1svc",
|
||||||
|
@ -293,7 +293,7 @@ func TestLbCreateDelete(t *testing.T) {
|
||||||
|
|
||||||
func TestLbFaultyUpdate(t *testing.T) {
|
func TestLbFaultyUpdate(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||||
"foo.example.com": {
|
"foo.example.com": {
|
||||||
"/foo1": "foo1svc",
|
"/foo1": "foo1svc",
|
||||||
|
@ -330,7 +330,7 @@ func TestLbFaultyUpdate(t *testing.T) {
|
||||||
|
|
||||||
func TestLbDefaulting(t *testing.T) {
|
func TestLbDefaulting(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
// Make sure the controller plugs in the default values accepted by GCE.
|
// Make sure the controller plugs in the default values accepted by GCE.
|
||||||
ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}})
|
ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}})
|
||||||
pm := newPortManager(1, 65536)
|
pm := newPortManager(1, 65536)
|
||||||
|
@ -348,7 +348,7 @@ func TestLbDefaulting(t *testing.T) {
|
||||||
|
|
||||||
func TestLbNoService(t *testing.T) {
|
func TestLbNoService(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||||
"foo.example.com": {
|
"foo.example.com": {
|
||||||
"/foo1": "foo1svc",
|
"/foo1": "foo1svc",
|
||||||
|
@ -373,8 +373,8 @@ func TestLbNoService(t *testing.T) {
|
||||||
// Creates the service, next sync should have complete url map.
|
// Creates the service, next sync should have complete url map.
|
||||||
pm := newPortManager(1, 65536)
|
pm := newPortManager(1, 65536)
|
||||||
addIngress(lbc, ing, pm)
|
addIngress(lbc, ing, pm)
|
||||||
lbc.enqueueIngressForService(&api.Service{
|
lbc.enqueueIngressForService(&api_v1.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: "foo1svc",
|
Name: "foo1svc",
|
||||||
Namespace: ing.Namespace,
|
Namespace: ing.Namespace,
|
||||||
},
|
},
|
||||||
|
@ -392,7 +392,7 @@ func TestLbNoService(t *testing.T) {
|
||||||
|
|
||||||
func TestLbChangeStaticIP(t *testing.T) {
|
func TestLbChangeStaticIP(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||||
"foo.example.com": {
|
"foo.example.com": {
|
||||||
"/foo1": "foo1svc",
|
"/foo1": "foo1svc",
|
||||||
|
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/backends"
|
"k8s.io/ingress/controllers/gce/backends"
|
||||||
"k8s.io/ingress/controllers/gce/firewalls"
|
"k8s.io/ingress/controllers/gce/firewalls"
|
||||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||||
|
|
|
@ -19,13 +19,14 @@ package controller
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
|
||||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
api "k8s.io/client-go/pkg/api/v1"
|
||||||
|
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||||
|
|
||||||
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// secretLoaders returns a type containing all the secrets of an Ingress.
|
// secretLoaders returns a type containing all the secrets of an Ingress.
|
||||||
|
@ -44,7 +45,7 @@ func (n *noOPValidator) validate(certs *loadbalancers.TLSCerts) error {
|
||||||
// apiServerTLSLoader loads TLS certs from the apiserver.
|
// apiServerTLSLoader loads TLS certs from the apiserver.
|
||||||
type apiServerTLSLoader struct {
|
type apiServerTLSLoader struct {
|
||||||
noOPValidator
|
noOPValidator
|
||||||
client client.Interface
|
client kubernetes.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
||||||
|
@ -59,7 +60,7 @@ func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCe
|
||||||
secretName := ing.Spec.TLS[0].SecretName
|
secretName := ing.Spec.TLS[0].SecretName
|
||||||
// TODO: Replace this for a secret watcher.
|
// TODO: Replace this for a secret watcher.
|
||||||
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
||||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName)
|
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName, meta_v1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Pods created in loops start from this time, for routines that
|
// Pods created in loops start from this time, for routines that
|
||||||
|
@ -33,7 +33,7 @@ var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
|
||||||
|
|
||||||
func TestZoneListing(t *testing.T) {
|
func TestZoneListing(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
zoneToNode := map[string][]string{
|
zoneToNode := map[string][]string{
|
||||||
"zone-1": {"n1"},
|
"zone-1": {"n1"},
|
||||||
"zone-2": {"n2"},
|
"zone-2": {"n2"},
|
||||||
|
@ -58,7 +58,7 @@ func TestZoneListing(t *testing.T) {
|
||||||
|
|
||||||
func TestInstancesAddedToZones(t *testing.T) {
|
func TestInstancesAddedToZones(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
zoneToNode := map[string][]string{
|
zoneToNode := map[string][]string{
|
||||||
"zone-1": {"n1", "n2"},
|
"zone-1": {"n1", "n2"},
|
||||||
"zone-2": {"n3"},
|
"zone-2": {"n3"},
|
||||||
|
@ -93,12 +93,12 @@ func TestInstancesAddedToZones(t *testing.T) {
|
||||||
|
|
||||||
func TestProbeGetter(t *testing.T) {
|
func TestProbeGetter(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
nodePortToHealthCheck := map[int64]string{
|
nodePortToHealthCheck := map[int64]string{
|
||||||
3001: "/healthz",
|
3001: "/healthz",
|
||||||
3002: "/foo",
|
3002: "/foo",
|
||||||
}
|
}
|
||||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||||
for p, exp := range nodePortToHealthCheck {
|
for p, exp := range nodePortToHealthCheck {
|
||||||
got, err := lbc.tr.HealthCheck(p)
|
got, err := lbc.tr.HealthCheck(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -111,13 +111,13 @@ func TestProbeGetter(t *testing.T) {
|
||||||
|
|
||||||
func TestProbeGetterNamedPort(t *testing.T) {
|
func TestProbeGetterNamedPort(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
nodePortToHealthCheck := map[int64]string{
|
nodePortToHealthCheck := map[int64]string{
|
||||||
3001: "/healthz",
|
3001: "/healthz",
|
||||||
}
|
}
|
||||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||||
for _, p := range lbc.podLister.Indexer.List() {
|
for _, p := range lbc.podLister.Indexer.List() {
|
||||||
pod := p.(*api.Pod)
|
pod := p.(*api_v1.Pod)
|
||||||
pod.Spec.Containers[0].Ports[0].Name = "test"
|
pod.Spec.Containers[0].Ports[0].Name = "test"
|
||||||
pod.Spec.Containers[0].ReadinessProbe.Handler.HTTPGet.Port = intstr.IntOrString{Type: intstr.String, StrVal: "test"}
|
pod.Spec.Containers[0].ReadinessProbe.Handler.HTTPGet.Port = intstr.IntOrString{Type: intstr.String, StrVal: "test"}
|
||||||
}
|
}
|
||||||
|
@ -134,26 +134,26 @@ func TestProbeGetterNamedPort(t *testing.T) {
|
||||||
|
|
||||||
func TestProbeGetterCrossNamespace(t *testing.T) {
|
func TestProbeGetterCrossNamespace(t *testing.T) {
|
||||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||||
lbc := newLoadBalancerController(t, cm, "")
|
lbc := newLoadBalancerController(t, cm)
|
||||||
|
|
||||||
firstPod := &api.Pod{
|
firstPod := &api_v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
// labels match those added by "addPods", but ns and health check
|
// labels match those added by "addPods", but ns and health check
|
||||||
// path is different. If this pod was created in the same ns, it
|
// path is different. If this pod was created in the same ns, it
|
||||||
// would become the health check.
|
// would become the health check.
|
||||||
Labels: map[string]string{"app-3001": "test"},
|
Labels: map[string]string{"app-3001": "test"},
|
||||||
Name: fmt.Sprintf("test-pod-new-ns"),
|
Name: fmt.Sprintf("test-pod-new-ns"),
|
||||||
Namespace: "new-ns",
|
Namespace: "new-ns",
|
||||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: api_v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api_v1.Container{
|
||||||
{
|
{
|
||||||
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
Ports: []api_v1.ContainerPort{{ContainerPort: 80}},
|
||||||
ReadinessProbe: &api.Probe{
|
ReadinessProbe: &api_v1.Probe{
|
||||||
Handler: api.Handler{
|
Handler: api_v1.Handler{
|
||||||
HTTPGet: &api.HTTPGetAction{
|
HTTPGet: &api_v1.HTTPGetAction{
|
||||||
Scheme: api.URISchemeHTTP,
|
Scheme: api_v1.URISchemeHTTP,
|
||||||
Path: "/badpath",
|
Path: "/badpath",
|
||||||
Port: intstr.IntOrString{
|
Port: intstr.IntOrString{
|
||||||
Type: intstr.Int,
|
Type: intstr.Int,
|
||||||
|
@ -170,7 +170,7 @@ func TestProbeGetterCrossNamespace(t *testing.T) {
|
||||||
nodePortToHealthCheck := map[int64]string{
|
nodePortToHealthCheck := map[int64]string{
|
||||||
3001: "/healthz",
|
3001: "/healthz",
|
||||||
}
|
}
|
||||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||||
|
|
||||||
for p, exp := range nodePortToHealthCheck {
|
for p, exp := range nodePortToHealthCheck {
|
||||||
got, err := lbc.tr.HealthCheck(p)
|
got, err := lbc.tr.HealthCheck(p)
|
||||||
|
@ -186,10 +186,10 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
||||||
delay := time.Minute
|
delay := time.Minute
|
||||||
for np, u := range nodePortToHealthCheck {
|
for np, u := range nodePortToHealthCheck {
|
||||||
l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
|
l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
|
||||||
svc := &api.Service{
|
svc := &api_v1.Service{
|
||||||
Spec: api.ServiceSpec{
|
Spec: api_v1.ServiceSpec{
|
||||||
Selector: l,
|
Selector: l,
|
||||||
Ports: []api.ServicePort{
|
Ports: []api_v1.ServicePort{
|
||||||
{
|
{
|
||||||
NodePort: int32(np),
|
NodePort: int32(np),
|
||||||
TargetPort: intstr.IntOrString{
|
TargetPort: intstr.IntOrString{
|
||||||
|
@ -204,21 +204,21 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
||||||
svc.Namespace = ns
|
svc.Namespace = ns
|
||||||
lbc.svcLister.Indexer.Add(svc)
|
lbc.svcLister.Indexer.Add(svc)
|
||||||
|
|
||||||
pod := &api.Pod{
|
pod := &api_v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Labels: l,
|
Labels: l,
|
||||||
Name: fmt.Sprintf("%d", np),
|
Name: fmt.Sprintf("%d", np),
|
||||||
Namespace: ns,
|
Namespace: ns,
|
||||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(delay)),
|
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(delay)),
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: api_v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api_v1.Container{
|
||||||
{
|
{
|
||||||
Ports: []api.ContainerPort{{Name: "test", ContainerPort: 80}},
|
Ports: []api_v1.ContainerPort{{Name: "test", ContainerPort: 80}},
|
||||||
ReadinessProbe: &api.Probe{
|
ReadinessProbe: &api_v1.Probe{
|
||||||
Handler: api.Handler{
|
Handler: api_v1.Handler{
|
||||||
HTTPGet: &api.HTTPGetAction{
|
HTTPGet: &api_v1.HTTPGetAction{
|
||||||
Scheme: api.URISchemeHTTP,
|
Scheme: api_v1.URISchemeHTTP,
|
||||||
Path: u,
|
Path: u,
|
||||||
Port: intstr.IntOrString{
|
Port: intstr.IntOrString{
|
||||||
Type: intstr.Int,
|
Type: intstr.Int,
|
||||||
|
@ -239,20 +239,20 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
||||||
func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) {
|
func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) {
|
||||||
for zone, nodes := range zoneToNode {
|
for zone, nodes := range zoneToNode {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
n := &api.Node{
|
n := &api_v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: node,
|
Name: node,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
zoneKey: zone,
|
zoneKey: zone,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Status: api.NodeStatus{
|
Status: api_v1.NodeStatus{
|
||||||
Conditions: []api.NodeCondition{
|
Conditions: []api_v1.NodeCondition{
|
||||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
{Type: api_v1.NodeReady, Status: api_v1.ConditionTrue},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
lbc.nodeLister.Store.Add(n)
|
lbc.nodeLister.Indexer.Add(n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lbc.CloudClusterManager.instancePool.Init(lbc.tr)
|
lbc.CloudClusterManager.instancePool.Init(lbc.tr)
|
||||||
|
|
|
@ -22,19 +22,23 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
listers "k8s.io/client-go/listers/core/v1"
|
||||||
|
api "k8s.io/client-go/pkg/api/v1"
|
||||||
|
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
|
||||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -202,6 +206,41 @@ type StoreToIngressLister struct {
|
||||||
cache.Store
|
cache.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StoreToNodeLister makes a Store that lists Node.
|
||||||
|
type StoreToNodeLister struct {
|
||||||
|
cache.Indexer
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreToServiceLister makes a Store that lists Service.
|
||||||
|
type StoreToServiceLister struct {
|
||||||
|
cache.Indexer
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreToPodLister makes a Store that lists Pods.
|
||||||
|
type StoreToPodLister struct {
|
||||||
|
cache.Indexer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StoreToPodLister) List(selector labels.Selector) (ret []*api.Pod, err error) {
|
||||||
|
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||||
|
ret = append(ret, m.(*api.Pod))
|
||||||
|
})
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func ListAll(store cache.Store, selector labels.Selector, appendFn cache.AppendFunc) error {
|
||||||
|
for _, m := range store.List() {
|
||||||
|
metadata, err := meta.Accessor(m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||||
|
appendFn(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// List lists all Ingress' in the store.
|
// List lists all Ingress' in the store.
|
||||||
func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) {
|
func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) {
|
||||||
for _, m := range s.Store.List() {
|
for _, m := range s.Store.List() {
|
||||||
|
@ -336,7 +375,7 @@ func (t *GCETranslator) toGCEBackend(be *extensions.IngressBackend, ns string) (
|
||||||
func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (int, error) {
|
func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (int, error) {
|
||||||
obj, exists, err := t.svcLister.Indexer.Get(
|
obj, exists, err := t.svcLister.Indexer.Get(
|
||||||
&api.Service{
|
&api.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: be.ServiceName,
|
Name: be.ServiceName,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
},
|
},
|
||||||
|
@ -411,7 +450,7 @@ func getZone(n *api.Node) string {
|
||||||
|
|
||||||
// GetZoneForNode returns the zone for a given node by looking up its zone label.
|
// GetZoneForNode returns the zone for a given node by looking up its zone label.
|
||||||
func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
||||||
nodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
nodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -428,7 +467,7 @@ func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
||||||
// ListZones returns a list of zones this Kubernetes cluster spans.
|
// ListZones returns a list of zones this Kubernetes cluster spans.
|
||||||
func (t *GCETranslator) ListZones() ([]string, error) {
|
func (t *GCETranslator) ListZones() ([]string, error) {
|
||||||
zones := sets.String{}
|
zones := sets.String{}
|
||||||
readyNodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
readyNodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return zones.List(), err
|
return zones.List(), err
|
||||||
}
|
}
|
||||||
|
@ -502,14 +541,12 @@ func isSimpleHTTPProbe(probe *api.Probe) bool {
|
||||||
// the request path, callers are responsible for swapping this out for the
|
// the request path, callers are responsible for swapping this out for the
|
||||||
// appropriate default.
|
// appropriate default.
|
||||||
func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error) {
|
func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error) {
|
||||||
sl, err := t.svcLister.List(labels.Everything())
|
sl := t.svcLister.List()
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var ingresses []extensions.Ingress
|
var ingresses []extensions.Ingress
|
||||||
var healthCheck *compute.HttpHealthCheck
|
var healthCheck *compute.HttpHealthCheck
|
||||||
// Find the label and target port of the one service with the given nodePort
|
// Find the label and target port of the one service with the given nodePort
|
||||||
for _, s := range sl {
|
for _, as := range sl {
|
||||||
|
s := as.(*api.Service)
|
||||||
for _, p := range s.Spec.Ports {
|
for _, p := range s.Spec.Ports {
|
||||||
|
|
||||||
// only one Service can match this nodePort, try and look up
|
// only one Service can match this nodePort, try and look up
|
||||||
|
|
|
@ -26,9 +26,11 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
registered "k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/pkg/api"
|
||||||
|
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
|
||||||
// This installs the legacy v1 API
|
// This installs the legacy v1 API
|
||||||
_ "k8s.io/kubernetes/pkg/api/install"
|
_ "k8s.io/kubernetes/pkg/api/install"
|
||||||
|
@ -58,14 +60,19 @@ func main() {
|
||||||
}
|
}
|
||||||
tlsCrt := read(*crt)
|
tlsCrt := read(*crt)
|
||||||
tlsKey := read(*key)
|
tlsKey := read(*key)
|
||||||
secret := &api.Secret{
|
secret := &api_v1.Secret{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: *name,
|
Name: *name,
|
||||||
},
|
},
|
||||||
Data: map[string][]byte{
|
Data: map[string][]byte{
|
||||||
api.TLSCertKey: tlsCrt,
|
api_v1.TLSCertKey: tlsCrt,
|
||||||
api.TLSPrivateKeyKey: tlsKey,
|
api_v1.TLSPrivateKeyKey: tlsKey,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), secret))
|
|
||||||
|
arm, err := registered.NewAPIRegistrationManager("")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(arm.EnabledVersions()...), secret))
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,8 +20,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
|
||||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||||
|
|
||||||
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fakeFirewallRules struct {
|
type fakeFirewallRules struct {
|
||||||
|
|
|
@ -17,13 +17,15 @@ limitations under the License.
|
||||||
package firewalls
|
package firewalls
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/glog"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Src ranges from which the GCE L7 performs health checks.
|
// Src ranges from which the GCE L7 performs health checks.
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -17,11 +17,12 @@ limitations under the License.
|
||||||
package healthchecks
|
package healthchecks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
compute "google.golang.org/api/compute/v1"
|
"net/http"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HealthChecks manages health checks.
|
// HealthChecks manages health checks.
|
||||||
|
|
|
@ -20,8 +20,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewFakeInstanceGroups creates a new FakeInstanceGroups.
|
// NewFakeInstanceGroups creates a new FakeInstanceGroups.
|
||||||
|
|
|
@ -21,12 +21,13 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/storage"
|
"k8s.io/ingress/controllers/gce/storage"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -19,7 +19,7 @@ package instances
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultZone = "default-zone"
|
const defaultZone = "default-zone"
|
||||||
|
|
|
@ -21,8 +21,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var testIPManager = testIP{}
|
var testIPManager = testIP{}
|
||||||
|
|
|
@ -25,13 +25,14 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/backends"
|
"k8s.io/ingress/controllers/gce/backends"
|
||||||
"k8s.io/ingress/controllers/gce/storage"
|
"k8s.io/ingress/controllers/gce/storage"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -21,11 +21,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/backends"
|
"k8s.io/ingress/controllers/gce/backends"
|
||||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||||
"k8s.io/ingress/controllers/gce/instances"
|
"k8s.io/ingress/controllers/gce/instances"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -26,20 +26,23 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
flag "github.com/spf13/pflag"
|
flag "github.com/spf13/pflag"
|
||||||
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/pkg/api"
|
||||||
|
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/controller"
|
"k8s.io/ingress/controllers/gce/controller"
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
"k8s.io/ingress/controllers/gce/storage"
|
"k8s.io/ingress/controllers/gce/storage"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
|
||||||
kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Entrypoint of GLBC. Example invocation:
|
// Entrypoint of GLBC. Example invocation:
|
||||||
|
@ -50,9 +53,9 @@ import (
|
||||||
// $ glbc --proxy="http://localhost:proxyport"
|
// $ glbc --proxy="http://localhost:proxyport"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// lbApiPort is the port on which the loadbalancer controller serves a
|
// lbAPIPort is the port on which the loadbalancer controller serves a
|
||||||
// minimal api (/healthz, /delete-all-and-quit etc).
|
// minimal api (/healthz, /delete-all-and-quit etc).
|
||||||
lbApiPort = 8081
|
lbAPIPort = 8081
|
||||||
|
|
||||||
// A delimiter used for clarity in naming GCE resources.
|
// A delimiter used for clarity in naming GCE resources.
|
||||||
clusterNameDelimiter = "--"
|
clusterNameDelimiter = "--"
|
||||||
|
@ -119,7 +122,7 @@ var (
|
||||||
`Path to a file containing the gce config. If left unspecified this
|
`Path to a file containing the gce config. If left unspecified this
|
||||||
controller only works with default zones.`)
|
controller only works with default zones.`)
|
||||||
|
|
||||||
healthzPort = flags.Int("healthz-port", lbApiPort,
|
healthzPort = flags.Int("healthz-port", lbAPIPort,
|
||||||
`Port to run healthz server. Must match the health check port in yaml.`)
|
`Port to run healthz server. Must match the health check port in yaml.`)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -171,7 +174,7 @@ func main() {
|
||||||
// We only really need a binary switch from light, v(2) logging to
|
// We only really need a binary switch from light, v(2) logging to
|
||||||
// heavier debug style V(4) logging, which we use --verbose for.
|
// heavier debug style V(4) logging, which we use --verbose for.
|
||||||
flags.Parse(os.Args)
|
flags.Parse(os.Args)
|
||||||
clientConfig := kubectl_util.DefaultClientConfig(flags)
|
//clientConfig := kubectl_util.DefaultClientConfig(flags)
|
||||||
|
|
||||||
// Set glog verbosity levels, unconditionally set --alsologtostderr.
|
// Set glog verbosity levels, unconditionally set --alsologtostderr.
|
||||||
go_flag.Lookup("logtostderr").Value.Set("true")
|
go_flag.Lookup("logtostderr").Value.Set("true")
|
||||||
|
@ -183,20 +186,23 @@ func main() {
|
||||||
glog.Fatalf("Please specify --default-backend")
|
glog.Fatalf("Please specify --default-backend")
|
||||||
}
|
}
|
||||||
|
|
||||||
var config *restclient.Config
|
var config *rest.Config
|
||||||
// Create kubeclient
|
// Create kubeclient
|
||||||
if *inCluster {
|
if *inCluster {
|
||||||
if config, err = restclient.InClusterConfig(); err != nil {
|
if config, err = rest.InClusterConfig(); err != nil {
|
||||||
glog.Fatalf("error creating client configuration: %v", err)
|
glog.Fatalf("error creating client configuration: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
config, err = clientConfig.ClientConfig()
|
config, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||||
|
&clientcmd.ClientConfigLoadingRules{},
|
||||||
|
&clientcmd.ConfigOverrides{}).ClientConfig()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("error creating client configuration: %v", err)
|
glog.Fatalf("error creating client configuration: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeClient, err := client.NewForConfig(config)
|
kubeClient, err := kubernetes.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Failed to create client: %v.", err)
|
glog.Fatalf("Failed to create client: %v.", err)
|
||||||
}
|
}
|
||||||
|
@ -247,7 +253,7 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNamer(kubeClient client.Interface, clusterName string, fwName string) (*utils.Namer, error) {
|
func newNamer(kubeClient kubernetes.Interface, clusterName string, fwName string) (*utils.Namer, error) {
|
||||||
name, err := getClusterUID(kubeClient, clusterName)
|
name, err := getClusterUID(kubeClient, clusterName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -329,7 +335,7 @@ func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_n
|
||||||
// backwards compatibility, the firewall name will default to the cluster UID.
|
// backwards compatibility, the firewall name will default to the cluster UID.
|
||||||
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
|
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
|
||||||
// else, use the cluster UID as a backup (this retains backwards compatibility).
|
// else, use the cluster UID as a backup (this retains backwards compatibility).
|
||||||
func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (string, error) {
|
func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) {
|
||||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||||
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
|
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -347,7 +353,7 @@ func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (str
|
||||||
// else, check if there are any working Ingresses
|
// else, check if there are any working Ingresses
|
||||||
// - remember that "" is the cluster uid
|
// - remember that "" is the cluster uid
|
||||||
// else, allocate a new uid
|
// else, allocate a new uid
|
||||||
func getClusterUID(kubeClient client.Interface, name string) (string, error) {
|
func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) {
|
||||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||||
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
|
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -356,7 +362,9 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the cluster has an Ingress with ip
|
// Check if the cluster has an Ingress with ip
|
||||||
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()})
|
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(meta_v1.ListOptions{
|
||||||
|
LabelSelector: labels.Everything().String(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -387,11 +395,11 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getNodePort waits for the Service, and returns it's first node port.
|
// getNodePort waits for the Service, and returns it's first node port.
|
||||||
func getNodePort(client client.Interface, ns, name string) (nodePort int64, err error) {
|
func getNodePort(client kubernetes.Interface, ns, name string) (nodePort int64, err error) {
|
||||||
var svc *api.Service
|
var svc *api_v1.Service
|
||||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||||
svc, err = client.Core().Services(ns).Get(name)
|
svc, err = client.Core().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,10 +22,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/client-go/kubernetes"
|
||||||
|
api "k8s.io/client-go/pkg/api/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -72,7 +74,7 @@ func (c *ConfigMapVault) Put(key, val string) error {
|
||||||
c.storeLock.Lock()
|
c.storeLock.Lock()
|
||||||
defer c.storeLock.Unlock()
|
defer c.storeLock.Unlock()
|
||||||
apiObj := &api.ConfigMap{
|
apiObj := &api.ConfigMap{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.name,
|
Name: c.name,
|
||||||
Namespace: c.namespace,
|
Namespace: c.namespace,
|
||||||
},
|
},
|
||||||
|
@ -121,7 +123,7 @@ func (c *ConfigMapVault) Delete() error {
|
||||||
// NewConfigMapVault creates a config map client.
|
// NewConfigMapVault creates a config map client.
|
||||||
// This client is essentially meant to abstract out the details of
|
// This client is essentially meant to abstract out the details of
|
||||||
// configmaps and the API, and just store/retrieve a single value, the cluster uid.
|
// configmaps and the API, and just store/retrieve a single value, the cluster uid.
|
||||||
func NewConfigMapVault(c client.Interface, uidNs, uidConfigMapName string) *ConfigMapVault {
|
func NewConfigMapVault(c kubernetes.Interface, uidNs, uidConfigMapName string) *ConfigMapVault {
|
||||||
return &ConfigMapVault{
|
return &ConfigMapVault{
|
||||||
ConfigMapStore: NewConfigMapStore(c),
|
ConfigMapStore: NewConfigMapStore(c),
|
||||||
namespace: uidNs,
|
namespace: uidNs,
|
||||||
|
@ -148,7 +150,7 @@ type ConfigMapStore interface {
|
||||||
// through cache.
|
// through cache.
|
||||||
type APIServerConfigMapStore struct {
|
type APIServerConfigMapStore struct {
|
||||||
ConfigMapStore
|
ConfigMapStore
|
||||||
client client.Interface
|
client kubernetes.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds the given config map to the apiserver's store.
|
// Add adds the given config map to the apiserver's store.
|
||||||
|
@ -168,7 +170,7 @@ func (a *APIServerConfigMapStore) Update(obj interface{}) error {
|
||||||
// Delete deletes the existing config map object.
|
// Delete deletes the existing config map object.
|
||||||
func (a *APIServerConfigMapStore) Delete(obj interface{}) error {
|
func (a *APIServerConfigMapStore) Delete(obj interface{}) error {
|
||||||
cfg := obj.(*api.ConfigMap)
|
cfg := obj.(*api.ConfigMap)
|
||||||
return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &api.DeleteOptions{})
|
return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetByKey returns the config map for a given key.
|
// GetByKey returns the config map for a given key.
|
||||||
|
@ -179,7 +181,7 @@ func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists
|
||||||
return nil, false, fmt.Errorf("failed to get key %v, unexpecte format, expecting ns/name", key)
|
return nil, false, fmt.Errorf("failed to get key %v, unexpecte format, expecting ns/name", key)
|
||||||
}
|
}
|
||||||
ns, name := nsName[0], nsName[1]
|
ns, name := nsName[0], nsName[1]
|
||||||
cfg, err := a.client.Core().ConfigMaps(ns).Get(name)
|
cfg, err := a.client.Core().ConfigMaps(ns).Get(name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Translate not found errors to found=false, err=nil
|
// Translate not found errors to found=false, err=nil
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
|
@ -192,6 +194,6 @@ func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists
|
||||||
|
|
||||||
// NewConfigMapStore returns a config map store capable of persisting updates
|
// NewConfigMapStore returns a config map store capable of persisting updates
|
||||||
// to apiserver.
|
// to apiserver.
|
||||||
func NewConfigMapStore(c client.Interface) ConfigMapStore {
|
func NewConfigMapStore(c kubernetes.Interface) ConfigMapStore {
|
||||||
return &APIServerConfigMapStore{ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), client: c}
|
return &APIServerConfigMapStore{ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), client: c}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ package storage
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/client-go/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfigMapUID(t *testing.T) {
|
func TestConfigMapUID(t *testing.T) {
|
||||||
|
|
|
@ -21,8 +21,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Snapshotter is an interface capable of providing a consistent snapshot of
|
// Snapshotter is an interface capable of providing a consistent snapshot of
|
||||||
|
|
|
@ -18,6 +18,7 @@ package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -25,7 +26,6 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"regexp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
Loading…
Reference in a new issue