Update gce controller
This commit is contained in:
parent
ea7f943160
commit
c7c2a564a9
26 changed files with 275 additions and 236 deletions
|
@ -23,10 +23,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
|
|
|
@ -21,13 +21,13 @@ import (
|
|||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const defaultZone = "zone-a"
|
||||
|
|
|
@ -20,8 +20,9 @@ import (
|
|||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
)
|
||||
|
||||
// NewFakeBackendServices creates a new fake backend services manager.
|
||||
|
|
|
@ -22,16 +22,17 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -23,20 +23,21 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
base_api "k8s.io/client-go/pkg/api"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -57,16 +58,16 @@ var (
|
|||
// LoadBalancerController watches the kubernetes api and adds/removes services
|
||||
// from the loadbalancer, via loadBalancerConfig.
|
||||
type LoadBalancerController struct {
|
||||
client client.Interface
|
||||
ingController *cache.Controller
|
||||
nodeController *cache.Controller
|
||||
svcController *cache.Controller
|
||||
podController *cache.Controller
|
||||
client kubernetes.Interface
|
||||
ingController cache.Controller
|
||||
nodeController cache.Controller
|
||||
svcController cache.Controller
|
||||
podController cache.Controller
|
||||
ingLister StoreToIngressLister
|
||||
nodeLister cache.StoreToNodeLister
|
||||
svcLister cache.StoreToServiceLister
|
||||
nodeLister StoreToNodeLister
|
||||
svcLister StoreToServiceLister
|
||||
// Health checks are the readiness probes of containers on pods.
|
||||
podLister cache.StoreToPodLister
|
||||
podLister StoreToPodLister
|
||||
// TODO: Watch secrets
|
||||
CloudClusterManager *ClusterManager
|
||||
recorder record.EventRecorder
|
||||
|
@ -91,7 +92,7 @@ type LoadBalancerController struct {
|
|||
// - clusterManager: A ClusterManager capable of creating all cloud resources
|
||||
// required for L7 loadbalancing.
|
||||
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
|
||||
func NewLoadBalancerController(kubeClient client.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||
func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
|
||||
|
@ -101,7 +102,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
client: kubeClient,
|
||||
CloudClusterManager: clusterManager,
|
||||
stopCh: make(chan struct{}),
|
||||
recorder: eventBroadcaster.NewRecorder(
|
||||
recorder: eventBroadcaster.NewRecorder(base_api.Scheme,
|
||||
api.EventSource{Component: "loadbalancer-controller"}),
|
||||
}
|
||||
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
|
||||
|
@ -140,10 +141,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
},
|
||||
}
|
||||
lbc.ingLister.Store, lbc.ingController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: ingressListFunc(lbc.client, namespace),
|
||||
WatchFunc: ingressWatchFunc(lbc.client, namespace),
|
||||
},
|
||||
cache.NewListWatchFromClient(lbc.client.Extensions().RESTClient(), "ingresses", namespace, fields.Everything()),
|
||||
&extensions.Ingress{}, resyncPeriod, pathHandlers)
|
||||
|
||||
// Service watch handlers
|
||||
|
@ -173,30 +171,14 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
nodeHandlers := cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.nodeQueue.enqueue,
|
||||
DeleteFunc: lbc.nodeQueue.enqueue,
|
||||
// Nodes are updated every 10s and we don't care, so no update handler.
|
||||
}
|
||||
// Node watch handlers
|
||||
lbc.nodeLister.Store, lbc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return lbc.client.Core().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
Do().
|
||||
Get()
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return lbc.client.Core().RESTClient().Get().
|
||||
Prefix("watch").
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
Param("resourceVersion", options.ResourceVersion).Watch()
|
||||
},
|
||||
},
|
||||
&api.Node{}, 0, nodeHandlers)
|
||||
lbc.nodeLister.Indexer, lbc.nodeController = cache.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()),
|
||||
&api.Node{},
|
||||
resyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
lbc.tr = &GCETranslator{&lbc}
|
||||
lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client}
|
||||
|
@ -205,18 +187,6 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
return &lbc, nil
|
||||
}
|
||||
|
||||
func ingressListFunc(c client.Interface, ns string) func(api.ListOptions) (runtime.Object, error) {
|
||||
return func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return c.Extensions().Ingresses(ns).List(opts)
|
||||
}
|
||||
}
|
||||
|
||||
func ingressWatchFunc(c client.Interface, ns string) func(options api.ListOptions) (watch.Interface, error) {
|
||||
return func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Extensions().Ingresses(ns).Watch(options)
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueIngressForService enqueues all the Ingress' for a Service.
|
||||
func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) {
|
||||
svc := obj.(*api.Service)
|
||||
|
@ -377,7 +347,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing
|
|||
|
||||
// Update IP through update/status endpoint
|
||||
ip := l7.GetIP()
|
||||
currIng, err := ingClient.Get(ing.Name)
|
||||
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -401,7 +371,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing
|
|||
}
|
||||
}
|
||||
// Update annotations through /update endpoint
|
||||
currIng, err = ingClient.Get(ing.Name)
|
||||
currIng, err = ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -464,7 +434,7 @@ func (lbc *LoadBalancerController) syncNodes(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getNodeReadyPredicate() cache.NodeConditionPredicate {
|
||||
func getNodeReadyPredicate() listers.NodeConditionPredicate {
|
||||
return func(node *api.Node) bool {
|
||||
for ix := range node.Status.Conditions {
|
||||
condition := &node.Status.Conditions[ix]
|
||||
|
@ -479,7 +449,7 @@ func getNodeReadyPredicate() cache.NodeConditionPredicate {
|
|||
// getReadyNodeNames returns names of schedulable, ready nodes from the node lister.
|
||||
func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) {
|
||||
nodeNames := []string{}
|
||||
nodes, err := lbc.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
||||
nodes, err := listers.NewNodeLister(lbc.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return nodeNames, err
|
||||
}
|
||||
|
|
|
@ -24,18 +24,18 @@ import (
|
|||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
)
|
||||
|
||||
const testClusterName = "testcluster"
|
||||
|
@ -51,9 +51,9 @@ func defaultBackendName(clusterName string) string {
|
|||
}
|
||||
|
||||
// newLoadBalancerController create a loadbalancer controller.
|
||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager, masterURL string) *LoadBalancerController {
|
||||
client := client.NewForConfigOrDie(&restclient.Config{Host: masterURL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
lb, err := NewLoadBalancerController(client, cm.ClusterManager, 1*time.Second, api.NamespaceAll)
|
||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalancerController {
|
||||
kubeClient := fake.NewSimpleClientset()
|
||||
lb, err := NewLoadBalancerController(kubeClient, cm.ClusterManager, 1*time.Second, api_v1.NamespaceAll)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func toIngressRules(hostRules map[string]utils.FakeIngressRuleValueMap) []extens
|
|||
// newIngress returns a new Ingress with the given path map.
|
||||
func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress {
|
||||
return &extensions.Ingress{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v", uuid.NewUUID()),
|
||||
Namespace: api.NamespaceNone,
|
||||
},
|
||||
|
@ -107,8 +107,8 @@ func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.
|
|||
Rules: toIngressRules(hostRules),
|
||||
},
|
||||
Status: extensions.IngressStatus{
|
||||
LoadBalancer: api.LoadBalancerStatus{
|
||||
Ingress: []api.LoadBalancerIngress{
|
||||
LoadBalancer: api_v1.LoadBalancerStatus{
|
||||
Ingress: []api_v1.LoadBalancerIngress{
|
||||
{IP: testIPManager.ip()},
|
||||
},
|
||||
},
|
||||
|
@ -178,21 +178,21 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
|
|||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
svc := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
svc := &api_v1.Service{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: path.Backend.ServiceName,
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
}
|
||||
var svcPort api.ServicePort
|
||||
var svcPort api_v1.ServicePort
|
||||
switch path.Backend.ServicePort.Type {
|
||||
case intstr.Int:
|
||||
svcPort = api.ServicePort{Port: path.Backend.ServicePort.IntVal}
|
||||
svcPort = api_v1.ServicePort{Port: path.Backend.ServicePort.IntVal}
|
||||
default:
|
||||
svcPort = api.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
||||
svcPort = api_v1.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
||||
}
|
||||
svcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))
|
||||
svc.Spec.Ports = []api.ServicePort{svcPort}
|
||||
svc.Spec.Ports = []api_v1.ServicePort{svcPort}
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
}
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
|
|||
func TestLbCreateDelete(t *testing.T) {
|
||||
testFirewallName := "quux"
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap1 := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
@ -293,7 +293,7 @@ func TestLbCreateDelete(t *testing.T) {
|
|||
|
||||
func TestLbFaultyUpdate(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
@ -330,7 +330,7 @@ func TestLbFaultyUpdate(t *testing.T) {
|
|||
|
||||
func TestLbDefaulting(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
// Make sure the controller plugs in the default values accepted by GCE.
|
||||
ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}})
|
||||
pm := newPortManager(1, 65536)
|
||||
|
@ -348,7 +348,7 @@ func TestLbDefaulting(t *testing.T) {
|
|||
|
||||
func TestLbNoService(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
@ -373,8 +373,8 @@ func TestLbNoService(t *testing.T) {
|
|||
// Creates the service, next sync should have complete url map.
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
lbc.enqueueIngressForService(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
lbc.enqueueIngressForService(&api_v1.Service{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: "foo1svc",
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
|
@ -392,7 +392,7 @@ func TestLbNoService(t *testing.T) {
|
|||
|
||||
func TestLbChangeStaticIP(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
|
|
|
@ -19,13 +19,14 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
)
|
||||
|
||||
// secretLoaders returns a type containing all the secrets of an Ingress.
|
||||
|
@ -44,7 +45,7 @@ func (n *noOPValidator) validate(certs *loadbalancers.TLSCerts) error {
|
|||
// apiServerTLSLoader loads TLS certs from the apiserver.
|
||||
type apiServerTLSLoader struct {
|
||||
noOPValidator
|
||||
client client.Interface
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
||||
|
@ -59,7 +60,7 @@ func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCe
|
|||
secretName := ing.Spec.TLS[0].SecretName
|
||||
// TODO: Replace this for a secret watcher.
|
||||
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName)
|
||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -21,10 +21,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
// Pods created in loops start from this time, for routines that
|
||||
|
@ -33,7 +33,7 @@ var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
|
|||
|
||||
func TestZoneListing(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
zoneToNode := map[string][]string{
|
||||
"zone-1": {"n1"},
|
||||
"zone-2": {"n2"},
|
||||
|
@ -58,7 +58,7 @@ func TestZoneListing(t *testing.T) {
|
|||
|
||||
func TestInstancesAddedToZones(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
zoneToNode := map[string][]string{
|
||||
"zone-1": {"n1", "n2"},
|
||||
"zone-2": {"n3"},
|
||||
|
@ -93,12 +93,12 @@ func TestInstancesAddedToZones(t *testing.T) {
|
|||
|
||||
func TestProbeGetter(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
3002: "/foo",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
if err != nil {
|
||||
|
@ -111,13 +111,13 @@ func TestProbeGetter(t *testing.T) {
|
|||
|
||||
func TestProbeGetterNamedPort(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
for _, p := range lbc.podLister.Indexer.List() {
|
||||
pod := p.(*api.Pod)
|
||||
pod := p.(*api_v1.Pod)
|
||||
pod.Spec.Containers[0].Ports[0].Name = "test"
|
||||
pod.Spec.Containers[0].ReadinessProbe.Handler.HTTPGet.Port = intstr.IntOrString{Type: intstr.String, StrVal: "test"}
|
||||
}
|
||||
|
@ -134,26 +134,26 @@ func TestProbeGetterNamedPort(t *testing.T) {
|
|||
|
||||
func TestProbeGetterCrossNamespace(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
|
||||
firstPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
firstPod := &api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
// labels match those added by "addPods", but ns and health check
|
||||
// path is different. If this pod was created in the same ns, it
|
||||
// would become the health check.
|
||||
Labels: map[string]string{"app-3001": "test"},
|
||||
Name: fmt.Sprintf("test-pod-new-ns"),
|
||||
Namespace: "new-ns",
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: api_v1.PodSpec{
|
||||
Containers: []api_v1.Container{
|
||||
{
|
||||
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Ports: []api_v1.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URISchemeHTTP,
|
||||
Path: "/badpath",
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
|
@ -170,7 +170,7 @@ func TestProbeGetterCrossNamespace(t *testing.T) {
|
|||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
|
@ -186,10 +186,10 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
delay := time.Minute
|
||||
for np, u := range nodePortToHealthCheck {
|
||||
l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
|
||||
svc := &api.Service{
|
||||
Spec: api.ServiceSpec{
|
||||
svc := &api_v1.Service{
|
||||
Spec: api_v1.ServiceSpec{
|
||||
Selector: l,
|
||||
Ports: []api.ServicePort{
|
||||
Ports: []api_v1.ServicePort{
|
||||
{
|
||||
NodePort: int32(np),
|
||||
TargetPort: intstr.IntOrString{
|
||||
|
@ -204,21 +204,21 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
svc.Namespace = ns
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Labels: l,
|
||||
Name: fmt.Sprintf("%d", np),
|
||||
Namespace: ns,
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(delay)),
|
||||
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(delay)),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: api_v1.PodSpec{
|
||||
Containers: []api_v1.Container{
|
||||
{
|
||||
Ports: []api.ContainerPort{{Name: "test", ContainerPort: 80}},
|
||||
ReadinessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Ports: []api_v1.ContainerPort{{Name: "test", ContainerPort: 80}},
|
||||
ReadinessProbe: &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URISchemeHTTP,
|
||||
Path: u,
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
|
@ -239,20 +239,20 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) {
|
||||
for zone, nodes := range zoneToNode {
|
||||
for _, node := range nodes {
|
||||
n := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
n := &api_v1.Node{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: node,
|
||||
Labels: map[string]string{
|
||||
zoneKey: zone,
|
||||
},
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
||||
Status: api_v1.NodeStatus{
|
||||
Conditions: []api_v1.NodeCondition{
|
||||
{Type: api_v1.NodeReady, Status: api_v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbc.nodeLister.Store.Add(n)
|
||||
lbc.nodeLister.Indexer.Add(n)
|
||||
}
|
||||
}
|
||||
lbc.CloudClusterManager.instancePool.Init(lbc.tr)
|
||||
|
|
|
@ -22,19 +22,23 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -202,6 +206,41 @@ type StoreToIngressLister struct {
|
|||
cache.Store
|
||||
}
|
||||
|
||||
// StoreToNodeLister makes a Store that lists Node.
|
||||
type StoreToNodeLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
// StoreToServiceLister makes a Store that lists Service.
|
||||
type StoreToServiceLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
// StoreToPodLister makes a Store that lists Pods.
|
||||
type StoreToPodLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToPodLister) List(selector labels.Selector) (ret []*api.Pod, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.Pod))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func ListAll(store cache.Store, selector labels.Selector, appendFn cache.AppendFunc) error {
|
||||
for _, m := range store.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists all Ingress' in the store.
|
||||
func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
|
@ -336,7 +375,7 @@ func (t *GCETranslator) toGCEBackend(be *extensions.IngressBackend, ns string) (
|
|||
func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (int, error) {
|
||||
obj, exists, err := t.svcLister.Indexer.Get(
|
||||
&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: be.ServiceName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -411,7 +450,7 @@ func getZone(n *api.Node) string {
|
|||
|
||||
// GetZoneForNode returns the zone for a given node by looking up its zone label.
|
||||
func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
||||
nodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
||||
nodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -428,7 +467,7 @@ func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
|||
// ListZones returns a list of zones this Kubernetes cluster spans.
|
||||
func (t *GCETranslator) ListZones() ([]string, error) {
|
||||
zones := sets.String{}
|
||||
readyNodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
||||
readyNodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return zones.List(), err
|
||||
}
|
||||
|
@ -502,14 +541,12 @@ func isSimpleHTTPProbe(probe *api.Probe) bool {
|
|||
// the request path, callers are responsible for swapping this out for the
|
||||
// appropriate default.
|
||||
func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error) {
|
||||
sl, err := t.svcLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sl := t.svcLister.List()
|
||||
var ingresses []extensions.Ingress
|
||||
var healthCheck *compute.HttpHealthCheck
|
||||
// Find the label and target port of the one service with the given nodePort
|
||||
for _, s := range sl {
|
||||
for _, as := range sl {
|
||||
s := as.(*api.Service)
|
||||
for _, p := range s.Spec.Ports {
|
||||
|
||||
// only one Service can match this nodePort, try and look up
|
||||
|
|
|
@ -26,9 +26,11 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
registered "k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
// This installs the legacy v1 API
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
|
@ -58,14 +60,19 @@ func main() {
|
|||
}
|
||||
tlsCrt := read(*crt)
|
||||
tlsKey := read(*key)
|
||||
secret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
secret := &api_v1.Secret{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: *name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
api.TLSCertKey: tlsCrt,
|
||||
api.TLSPrivateKeyKey: tlsKey,
|
||||
api_v1.TLSCertKey: tlsCrt,
|
||||
api_v1.TLSPrivateKeyKey: tlsKey,
|
||||
},
|
||||
}
|
||||
fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), secret))
|
||||
|
||||
arm, err := registered.NewAPIRegistrationManager("")
|
||||
if err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(arm.EnabledVersions()...), secret))
|
||||
}
|
||||
|
|
|
@ -20,8 +20,9 @@ import (
|
|||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
type fakeFirewallRules struct {
|
||||
|
|
|
@ -17,13 +17,15 @@ limitations under the License.
|
|||
package firewalls
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// Src ranges from which the GCE L7 performs health checks.
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
|
|
|
@ -17,11 +17,12 @@ limitations under the License.
|
|||
package healthchecks
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HealthChecks manages health checks.
|
||||
|
|
|
@ -20,8 +20,9 @@ import (
|
|||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// NewFakeInstanceGroups creates a new FakeInstanceGroups.
|
||||
|
|
|
@ -21,12 +21,13 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -19,7 +19,7 @@ package instances
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const defaultZone = "default-zone"
|
||||
|
|
|
@ -21,8 +21,9 @@ import (
|
|||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
var testIPManager = testIP{}
|
||||
|
|
|
@ -25,13 +25,14 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -21,11 +21,12 @@ import (
|
|||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -26,20 +26,23 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
flag "github.com/spf13/pflag"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/controller"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// Entrypoint of GLBC. Example invocation:
|
||||
|
@ -50,9 +53,9 @@ import (
|
|||
// $ glbc --proxy="http://localhost:proxyport"
|
||||
|
||||
const (
|
||||
// lbApiPort is the port on which the loadbalancer controller serves a
|
||||
// lbAPIPort is the port on which the loadbalancer controller serves a
|
||||
// minimal api (/healthz, /delete-all-and-quit etc).
|
||||
lbApiPort = 8081
|
||||
lbAPIPort = 8081
|
||||
|
||||
// A delimiter used for clarity in naming GCE resources.
|
||||
clusterNameDelimiter = "--"
|
||||
|
@ -119,7 +122,7 @@ var (
|
|||
`Path to a file containing the gce config. If left unspecified this
|
||||
controller only works with default zones.`)
|
||||
|
||||
healthzPort = flags.Int("healthz-port", lbApiPort,
|
||||
healthzPort = flags.Int("healthz-port", lbAPIPort,
|
||||
`Port to run healthz server. Must match the health check port in yaml.`)
|
||||
)
|
||||
|
||||
|
@ -171,7 +174,7 @@ func main() {
|
|||
// We only really need a binary switch from light, v(2) logging to
|
||||
// heavier debug style V(4) logging, which we use --verbose for.
|
||||
flags.Parse(os.Args)
|
||||
clientConfig := kubectl_util.DefaultClientConfig(flags)
|
||||
//clientConfig := kubectl_util.DefaultClientConfig(flags)
|
||||
|
||||
// Set glog verbosity levels, unconditionally set --alsologtostderr.
|
||||
go_flag.Lookup("logtostderr").Value.Set("true")
|
||||
|
@ -183,20 +186,23 @@ func main() {
|
|||
glog.Fatalf("Please specify --default-backend")
|
||||
}
|
||||
|
||||
var config *restclient.Config
|
||||
var config *rest.Config
|
||||
// Create kubeclient
|
||||
if *inCluster {
|
||||
if config, err = restclient.InClusterConfig(); err != nil {
|
||||
if config, err = rest.InClusterConfig(); err != nil {
|
||||
glog.Fatalf("error creating client configuration: %v", err)
|
||||
}
|
||||
} else {
|
||||
config, err = clientConfig.ClientConfig()
|
||||
config, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{},
|
||||
&clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating client configuration: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
kubeClient, err := client.NewForConfig(config)
|
||||
kubeClient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create client: %v.", err)
|
||||
}
|
||||
|
@ -247,7 +253,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
func newNamer(kubeClient client.Interface, clusterName string, fwName string) (*utils.Namer, error) {
|
||||
func newNamer(kubeClient kubernetes.Interface, clusterName string, fwName string) (*utils.Namer, error) {
|
||||
name, err := getClusterUID(kubeClient, clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -329,7 +335,7 @@ func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_n
|
|||
// backwards compatibility, the firewall name will default to the cluster UID.
|
||||
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
|
||||
// else, use the cluster UID as a backup (this retains backwards compatibility).
|
||||
func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (string, error) {
|
||||
func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) {
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
|
||||
return "", err
|
||||
|
@ -347,7 +353,7 @@ func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (str
|
|||
// else, check if there are any working Ingresses
|
||||
// - remember that "" is the cluster uid
|
||||
// else, allocate a new uid
|
||||
func getClusterUID(kubeClient client.Interface, name string) (string, error) {
|
||||
func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) {
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
|
||||
return "", err
|
||||
|
@ -356,7 +362,9 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) {
|
|||
}
|
||||
|
||||
// Check if the cluster has an Ingress with ip
|
||||
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()})
|
||||
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(meta_v1.ListOptions{
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -387,11 +395,11 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) {
|
|||
}
|
||||
|
||||
// getNodePort waits for the Service, and returns it's first node port.
|
||||
func getNodePort(client client.Interface, ns, name string) (nodePort int64, err error) {
|
||||
var svc *api.Service
|
||||
func getNodePort(client kubernetes.Interface, ns, name string) (nodePort int64, err error) {
|
||||
var svc *api_v1.Service
|
||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
svc, err = client.Core().Services(ns).Get(name)
|
||||
svc, err = client.Core().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -22,10 +22,12 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -72,7 +74,7 @@ func (c *ConfigMapVault) Put(key, val string) error {
|
|||
c.storeLock.Lock()
|
||||
defer c.storeLock.Unlock()
|
||||
apiObj := &api.ConfigMap{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.name,
|
||||
Namespace: c.namespace,
|
||||
},
|
||||
|
@ -121,7 +123,7 @@ func (c *ConfigMapVault) Delete() error {
|
|||
// NewConfigMapVault creates a config map client.
|
||||
// This client is essentially meant to abstract out the details of
|
||||
// configmaps and the API, and just store/retrieve a single value, the cluster uid.
|
||||
func NewConfigMapVault(c client.Interface, uidNs, uidConfigMapName string) *ConfigMapVault {
|
||||
func NewConfigMapVault(c kubernetes.Interface, uidNs, uidConfigMapName string) *ConfigMapVault {
|
||||
return &ConfigMapVault{
|
||||
ConfigMapStore: NewConfigMapStore(c),
|
||||
namespace: uidNs,
|
||||
|
@ -148,7 +150,7 @@ type ConfigMapStore interface {
|
|||
// through cache.
|
||||
type APIServerConfigMapStore struct {
|
||||
ConfigMapStore
|
||||
client client.Interface
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
// Add adds the given config map to the apiserver's store.
|
||||
|
@ -168,7 +170,7 @@ func (a *APIServerConfigMapStore) Update(obj interface{}) error {
|
|||
// Delete deletes the existing config map object.
|
||||
func (a *APIServerConfigMapStore) Delete(obj interface{}) error {
|
||||
cfg := obj.(*api.ConfigMap)
|
||||
return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &api.DeleteOptions{})
|
||||
return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// GetByKey returns the config map for a given key.
|
||||
|
@ -179,7 +181,7 @@ func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists
|
|||
return nil, false, fmt.Errorf("failed to get key %v, unexpecte format, expecting ns/name", key)
|
||||
}
|
||||
ns, name := nsName[0], nsName[1]
|
||||
cfg, err := a.client.Core().ConfigMaps(ns).Get(name)
|
||||
cfg, err := a.client.Core().ConfigMaps(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Translate not found errors to found=false, err=nil
|
||||
if errors.IsNotFound(err) {
|
||||
|
@ -192,6 +194,6 @@ func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists
|
|||
|
||||
// NewConfigMapStore returns a config map store capable of persisting updates
|
||||
// to apiserver.
|
||||
func NewConfigMapStore(c client.Interface) ConfigMapStore {
|
||||
func NewConfigMapStore(c kubernetes.Interface) ConfigMapStore {
|
||||
return &APIServerConfigMapStore{ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), client: c}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ package storage
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
)
|
||||
|
||||
func TestConfigMapUID(t *testing.T) {
|
||||
|
|
|
@ -21,8 +21,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// Snapshotter is an interface capable of providing a consistent snapshot of
|
||||
|
|
|
@ -18,6 +18,7 @@ package utils
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
Loading…
Reference in a new issue