Merge pull request #1771 from bprashanth/ing_0.8.0
Automatic merge from submit-queue Bump glbc to 0.8.0 Don't think this hits the bar for 1.4.0, but hopefully it can make 1.4.1. The version bump is for the godep update that fixes an issue with the throttling workqueue (https://github.com/kubernetes/kubernetes/pull/31396). I should've done this sooner, dropped it. Also fixes https://github.com/kubernetes/contrib/issues/1776 and https://github.com/kubernetes/contrib/issues/1783
This commit is contained in:
commit
d6e8d89108
6 changed files with 89 additions and 17 deletions
|
@ -327,7 +327,7 @@ So simply delete the replication controller:
|
|||
$ kubectl get rc glbc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE
|
||||
glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m
|
||||
l7-lb-controller gcr.io/google_containers/glbc:0.6.0
|
||||
l7-lb-controller gcr.io/google_containers/glbc:0.8.0
|
||||
|
||||
$ kubectl delete rc glbc
|
||||
replicationcontroller "glbc" deleted
|
||||
|
|
|
@ -19,12 +19,18 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Pods created in loops start from this time, for routines that
|
||||
// sort on timestamp.
|
||||
var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
|
||||
|
||||
func TestZoneListing(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
|
@ -92,7 +98,7 @@ func TestProbeGetter(t *testing.T) {
|
|||
3001: "/healthz",
|
||||
3002: "/foo",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck)
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
if err != nil {
|
||||
|
@ -103,7 +109,58 @@ func TestProbeGetter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string) {
|
||||
func TestProbeGetterCrossNamespace(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
|
||||
firstPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
// labels match those added by "addPods", but ns and health check
|
||||
// path is different. If this pod was created in the same ns, it
|
||||
// would become the health check.
|
||||
Labels: map[string]string{"app-3001": "test"},
|
||||
Name: fmt.Sprintf("test-pod-new-ns"),
|
||||
Namespace: "new-ns",
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Path: "/badpath",
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbc.podLister.Indexer.Add(firstPod)
|
||||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get health check for node port %v: %v", p, err)
|
||||
} else if got.RequestPath != exp {
|
||||
t.Errorf("Wrong health check for node port %v, got %v expected %v", p, got.RequestPath, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string, ns string) {
|
||||
delay := time.Minute
|
||||
for np, u := range nodePortToHealthCheck {
|
||||
l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
|
||||
svc := &api.Service{
|
||||
|
@ -121,12 +178,15 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
},
|
||||
}
|
||||
svc.Name = fmt.Sprintf("%d", np)
|
||||
svc.Namespace = ns
|
||||
lbc.svcLister.Store.Add(svc)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: l,
|
||||
Name: fmt.Sprintf("%d", np),
|
||||
Labels: l,
|
||||
Name: fmt.Sprintf("%d", np),
|
||||
Namespace: ns,
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(delay)),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
|
@ -149,6 +209,7 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
},
|
||||
}
|
||||
lbc.podLister.Indexer.Add(pod)
|
||||
delay = 2 * delay
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -405,7 +405,9 @@ func isPortEqual(port, targetPort intstr.IntOrString) bool {
|
|||
|
||||
// geHTTPProbe returns the http readiness probe from the first container
|
||||
// that matches targetPort, from the set of pods matching the given labels.
|
||||
func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntOrString) (*api.Probe, error) {
|
||||
func (t *GCETranslator) getHTTPProbe(svc api.Service, targetPort intstr.IntOrString) (*api.Probe, error) {
|
||||
l := svc.Spec.Selector
|
||||
|
||||
// Lookup any container with a matching targetPort from the set of pods
|
||||
// with a matching label selector.
|
||||
pl, err := t.podLister.List(labels.SelectorFromSet(labels.Set(l)))
|
||||
|
@ -417,6 +419,9 @@ func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntO
|
|||
sort.Sort(PodsByCreationTimestamp(pl))
|
||||
|
||||
for _, pod := range pl {
|
||||
if pod.Namespace != svc.Namespace {
|
||||
continue
|
||||
}
|
||||
logStr := fmt.Sprintf("Pod %v matching service selectors %v (targetport %+v)", pod.Name, l, targetPort)
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if !isSimpleHTTPProbe(c.ReadinessProbe) {
|
||||
|
@ -460,7 +465,7 @@ func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error
|
|||
for _, s := range sl.Items {
|
||||
for _, p := range s.Spec.Ports {
|
||||
if int32(port) == p.NodePort {
|
||||
rp, err := t.getHTTPProbe(s.Spec.Selector, p.TargetPort)
|
||||
rp, err := t.getHTTPProbe(s, p.TargetPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -64,13 +64,13 @@ func (h *HealthChecks) Add(port int64) error {
|
|||
return err
|
||||
}
|
||||
} else if wantHC.RequestPath != hc.RequestPath {
|
||||
// TODO: also compare headers interval etc.
|
||||
glog.Infof("Updating health check %v, path %v -> %v", name, hc.RequestPath, wantHC.RequestPath)
|
||||
if err := h.cloud.UpdateHttpHealthCheck(wantHC); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: reconcile health checks, and compare headers interval etc.
|
||||
// Currently Ingress doesn't expose all the health check params
|
||||
// natively, so some users prefer to hand modify the check.
|
||||
glog.Infof("Unexpected request path on health check %v, has %v want %v, NOT reconciling",
|
||||
name, hc.RequestPath, wantHC.RequestPath)
|
||||
} else {
|
||||
glog.Infof("Health check %v already exists", hc.Name)
|
||||
glog.Infof("Health check %v already exists and has the expected path %v", hc.Name, hc.RequestPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ const (
|
|||
alphaNumericChar = "0"
|
||||
|
||||
// Current docker image version. Only used in debug logging.
|
||||
imageVersion = "glbc:0.7.1"
|
||||
imageVersion = "glbc:0.8.0"
|
||||
|
||||
// Key used to persist UIDs to configmaps.
|
||||
uidConfigMapName = "ingress-uid"
|
||||
|
@ -162,12 +162,18 @@ func main() {
|
|||
var kubeClient *client.Client
|
||||
var err error
|
||||
var clusterManager *controller.ClusterManager
|
||||
|
||||
// TODO: We can simply parse all go flags with
|
||||
// flags.AddGoFlagSet(go_flag.CommandLine)
|
||||
// but that pollutes --help output with a ton of standard go flags.
|
||||
// We only really need a binary switch from light, v(2) logging to
|
||||
// heavier debug style V(4) logging, which we use --verbose for.
|
||||
flags.Parse(os.Args)
|
||||
clientConfig := kubectl_util.DefaultClientConfig(flags)
|
||||
|
||||
// Set glog verbosity levels
|
||||
// Set glog verbosity levels, unconditionally set --alsologtostderr.
|
||||
go_flag.Lookup("logtostderr").Value.Set("true")
|
||||
if *verbose {
|
||||
go_flag.Lookup("logtostderr").Value.Set("true")
|
||||
go_flag.Set("v", "4")
|
||||
}
|
||||
glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName)
|
||||
|
|
|
@ -61,7 +61,7 @@ spec:
|
|||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- image: gcr.io/google_containers/glbc:0.6.2
|
||||
- image: gcr.io/google_containers/glbc:0.8.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
|
Loading…
Reference in a new issue