Don't consider pods cross namespace for health checks
This commit is contained in:
parent
3c703356ee
commit
d99efea804
2 changed files with 72 additions and 6 deletions
|
@ -19,12 +19,18 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Pods created in loops start from this time, for routines that
|
||||
// sort on timestamp.
|
||||
var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
|
||||
|
||||
func TestZoneListing(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
|
@ -92,7 +98,7 @@ func TestProbeGetter(t *testing.T) {
|
|||
3001: "/healthz",
|
||||
3002: "/foo",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck)
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
if err != nil {
|
||||
|
@ -103,7 +109,58 @@ func TestProbeGetter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string) {
|
||||
func TestProbeGetterCrossNamespace(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
|
||||
firstPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
// labels match those added by "addPods", but ns and health check
|
||||
// path is different. If this pod was created in the same ns, it
|
||||
// would become the health check.
|
||||
Labels: map[string]string{"app-3001": "test"},
|
||||
Name: fmt.Sprintf("test-pod-new-ns"),
|
||||
Namespace: "new-ns",
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Path: "/badpath",
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbc.podLister.Indexer.Add(firstPod)
|
||||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get health check for node port %v: %v", p, err)
|
||||
} else if got.RequestPath != exp {
|
||||
t.Errorf("Wrong health check for node port %v, got %v expected %v", p, got.RequestPath, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string, ns string) {
|
||||
delay := time.Minute
|
||||
for np, u := range nodePortToHealthCheck {
|
||||
l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
|
||||
svc := &api.Service{
|
||||
|
@ -121,12 +178,15 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
},
|
||||
}
|
||||
svc.Name = fmt.Sprintf("%d", np)
|
||||
svc.Namespace = ns
|
||||
lbc.svcLister.Store.Add(svc)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: l,
|
||||
Name: fmt.Sprintf("%d", np),
|
||||
Namespace: ns,
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(delay)),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
|
@ -149,6 +209,7 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
},
|
||||
}
|
||||
lbc.podLister.Indexer.Add(pod)
|
||||
delay = 2 * delay
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -405,7 +405,9 @@ func isPortEqual(port, targetPort intstr.IntOrString) bool {
|
|||
|
||||
// geHTTPProbe returns the http readiness probe from the first container
|
||||
// that matches targetPort, from the set of pods matching the given labels.
|
||||
func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntOrString) (*api.Probe, error) {
|
||||
func (t *GCETranslator) getHTTPProbe(svc api.Service, targetPort intstr.IntOrString) (*api.Probe, error) {
|
||||
l := svc.Spec.Selector
|
||||
|
||||
// Lookup any container with a matching targetPort from the set of pods
|
||||
// with a matching label selector.
|
||||
pl, err := t.podLister.List(labels.SelectorFromSet(labels.Set(l)))
|
||||
|
@ -417,6 +419,9 @@ func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntO
|
|||
sort.Sort(PodsByCreationTimestamp(pl))
|
||||
|
||||
for _, pod := range pl {
|
||||
if pod.Namespace != svc.Namespace {
|
||||
continue
|
||||
}
|
||||
logStr := fmt.Sprintf("Pod %v matching service selectors %v (targetport %+v)", pod.Name, l, targetPort)
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if !isSimpleHTTPProbe(c.ReadinessProbe) {
|
||||
|
@ -460,7 +465,7 @@ func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error
|
|||
for _, s := range sl.Items {
|
||||
for _, p := range s.Spec.Ports {
|
||||
if int32(port) == p.NodePort {
|
||||
rp, err := t.getHTTPProbe(s.Spec.Selector, p.TargetPort)
|
||||
rp, err := t.getHTTPProbe(s, p.TargetPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue