Merge pull request #1771 from bprashanth/ing_0.8.0

Automatic merge from submit-queue

Bump glbc to 0.8.0

Don't think this hits the bar for 1.4.0, but hopefully it can make 1.4.1. The version bump is for the godep update that fixes an issue with the throttling workqueue (https://github.com/kubernetes/kubernetes/pull/31396). I should've done this sooner, dropped it.

Also fixes https://github.com/kubernetes/contrib/issues/1776 and https://github.com/kubernetes/contrib/issues/1783
This commit is contained in:
Kubernetes Submit Queue 2016-09-27 14:39:04 -07:00 committed by GitHub
commit d6e8d89108
6 changed files with 89 additions and 17 deletions

View file

@ -327,7 +327,7 @@ So simply delete the replication controller:
$ kubectl get rc glbc $ kubectl get rc glbc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE
glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m
l7-lb-controller gcr.io/google_containers/glbc:0.6.0 l7-lb-controller gcr.io/google_containers/glbc:0.8.0
$ kubectl delete rc glbc $ kubectl delete rc glbc
replicationcontroller "glbc" deleted replicationcontroller "glbc" deleted

View file

@ -19,12 +19,18 @@ package controller
import ( import (
"fmt" "fmt"
"testing" "testing"
"time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
// Pods created in loops start from this time, for routines that
// sort on timestamp.
var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
func TestZoneListing(t *testing.T) { func TestZoneListing(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID) cm := NewFakeClusterManager(DefaultClusterUID)
lbc := newLoadBalancerController(t, cm, "") lbc := newLoadBalancerController(t, cm, "")
@ -92,7 +98,7 @@ func TestProbeGetter(t *testing.T) {
3001: "/healthz", 3001: "/healthz",
3002: "/foo", 3002: "/foo",
} }
addPods(lbc, nodePortToHealthCheck) addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
for p, exp := range nodePortToHealthCheck { for p, exp := range nodePortToHealthCheck {
got, err := lbc.tr.HealthCheck(p) got, err := lbc.tr.HealthCheck(p)
if err != nil { if err != nil {
@ -103,7 +109,58 @@ func TestProbeGetter(t *testing.T) {
} }
} }
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string) { func TestProbeGetterCrossNamespace(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
lbc := newLoadBalancerController(t, cm, "")
firstPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
// labels match those added by "addPods", but ns and health check
// path is different. If this pod was created in the same ns, it
// would become the health check.
Labels: map[string]string{"app-3001": "test"},
Name: fmt.Sprintf("test-pod-new-ns"),
Namespace: "new-ns",
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: &api.Probe{
Handler: api.Handler{
HTTPGet: &api.HTTPGetAction{
Scheme: api.URISchemeHTTP,
Path: "/badpath",
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 80,
},
},
},
},
},
},
},
}
lbc.podLister.Indexer.Add(firstPod)
nodePortToHealthCheck := map[int64]string{
3001: "/healthz",
}
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
for p, exp := range nodePortToHealthCheck {
got, err := lbc.tr.HealthCheck(p)
if err != nil {
t.Errorf("Failed to get health check for node port %v: %v", p, err)
} else if got.RequestPath != exp {
t.Errorf("Wrong health check for node port %v, got %v expected %v", p, got.RequestPath, exp)
}
}
}
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string, ns string) {
delay := time.Minute
for np, u := range nodePortToHealthCheck { for np, u := range nodePortToHealthCheck {
l := map[string]string{fmt.Sprintf("app-%d", np): "test"} l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
svc := &api.Service{ svc := &api.Service{
@ -121,12 +178,15 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
}, },
} }
svc.Name = fmt.Sprintf("%d", np) svc.Name = fmt.Sprintf("%d", np)
svc.Namespace = ns
lbc.svcLister.Store.Add(svc) lbc.svcLister.Store.Add(svc)
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Labels: l, Labels: l,
Name: fmt.Sprintf("%d", np), Name: fmt.Sprintf("%d", np),
Namespace: ns,
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(delay)),
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{ Containers: []api.Container{
@ -149,6 +209,7 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
}, },
} }
lbc.podLister.Indexer.Add(pod) lbc.podLister.Indexer.Add(pod)
delay = 2 * delay
} }
} }

View file

@ -405,7 +405,9 @@ func isPortEqual(port, targetPort intstr.IntOrString) bool {
// geHTTPProbe returns the http readiness probe from the first container // geHTTPProbe returns the http readiness probe from the first container
// that matches targetPort, from the set of pods matching the given labels. // that matches targetPort, from the set of pods matching the given labels.
func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntOrString) (*api.Probe, error) { func (t *GCETranslator) getHTTPProbe(svc api.Service, targetPort intstr.IntOrString) (*api.Probe, error) {
l := svc.Spec.Selector
// Lookup any container with a matching targetPort from the set of pods // Lookup any container with a matching targetPort from the set of pods
// with a matching label selector. // with a matching label selector.
pl, err := t.podLister.List(labels.SelectorFromSet(labels.Set(l))) pl, err := t.podLister.List(labels.SelectorFromSet(labels.Set(l)))
@ -417,6 +419,9 @@ func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntO
sort.Sort(PodsByCreationTimestamp(pl)) sort.Sort(PodsByCreationTimestamp(pl))
for _, pod := range pl { for _, pod := range pl {
if pod.Namespace != svc.Namespace {
continue
}
logStr := fmt.Sprintf("Pod %v matching service selectors %v (targetport %+v)", pod.Name, l, targetPort) logStr := fmt.Sprintf("Pod %v matching service selectors %v (targetport %+v)", pod.Name, l, targetPort)
for _, c := range pod.Spec.Containers { for _, c := range pod.Spec.Containers {
if !isSimpleHTTPProbe(c.ReadinessProbe) { if !isSimpleHTTPProbe(c.ReadinessProbe) {
@ -460,7 +465,7 @@ func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error
for _, s := range sl.Items { for _, s := range sl.Items {
for _, p := range s.Spec.Ports { for _, p := range s.Spec.Ports {
if int32(port) == p.NodePort { if int32(port) == p.NodePort {
rp, err := t.getHTTPProbe(s.Spec.Selector, p.TargetPort) rp, err := t.getHTTPProbe(s, p.TargetPort)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -64,13 +64,13 @@ func (h *HealthChecks) Add(port int64) error {
return err return err
} }
} else if wantHC.RequestPath != hc.RequestPath { } else if wantHC.RequestPath != hc.RequestPath {
// TODO: also compare headers interval etc. // TODO: reconcile health checks, and compare headers interval etc.
glog.Infof("Updating health check %v, path %v -> %v", name, hc.RequestPath, wantHC.RequestPath) // Currently Ingress doesn't expose all the health check params
if err := h.cloud.UpdateHttpHealthCheck(wantHC); err != nil { // natively, so some users prefer to hand modify the check.
return err glog.Infof("Unexpected request path on health check %v, has %v want %v, NOT reconciling",
} name, hc.RequestPath, wantHC.RequestPath)
} else { } else {
glog.Infof("Health check %v already exists", hc.Name) glog.Infof("Health check %v already exists and has the expected path %v", hc.Name, hc.RequestPath)
} }
return nil return nil
} }

View file

@ -60,7 +60,7 @@ const (
alphaNumericChar = "0" alphaNumericChar = "0"
// Current docker image version. Only used in debug logging. // Current docker image version. Only used in debug logging.
imageVersion = "glbc:0.7.1" imageVersion = "glbc:0.8.0"
// Key used to persist UIDs to configmaps. // Key used to persist UIDs to configmaps.
uidConfigMapName = "ingress-uid" uidConfigMapName = "ingress-uid"
@ -162,12 +162,18 @@ func main() {
var kubeClient *client.Client var kubeClient *client.Client
var err error var err error
var clusterManager *controller.ClusterManager var clusterManager *controller.ClusterManager
// TODO: We can simply parse all go flags with
// flags.AddGoFlagSet(go_flag.CommandLine)
// but that pollutes --help output with a ton of standard go flags.
// We only really need a binary switch from light, v(2) logging to
// heavier debug style V(4) logging, which we use --verbose for.
flags.Parse(os.Args) flags.Parse(os.Args)
clientConfig := kubectl_util.DefaultClientConfig(flags) clientConfig := kubectl_util.DefaultClientConfig(flags)
// Set glog verbosity levels // Set glog verbosity levels, unconditionally set --alsologtostderr.
if *verbose {
go_flag.Lookup("logtostderr").Value.Set("true") go_flag.Lookup("logtostderr").Value.Set("true")
if *verbose {
go_flag.Set("v", "4") go_flag.Set("v", "4")
} }
glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName) glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName)

View file

@ -61,7 +61,7 @@ spec:
requests: requests:
cpu: 10m cpu: 10m
memory: 20Mi memory: 20Mi
- image: gcr.io/google_containers/glbc:0.6.2 - image: gcr.io/google_containers/glbc:0.8.0
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz