From 64791c35f02130614c37b6980d02c06abc57c9dc Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Tue, 12 Apr 2016 22:13:55 -0300 Subject: [PATCH 01/16] Add complete TLS example in nginx Ingress controller --- controllers/nginx/examples/tls/README.md | 101 +++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/controllers/nginx/examples/tls/README.md b/controllers/nginx/examples/tls/README.md index e69de29bb..bc34b63a6 100644 --- a/controllers/nginx/examples/tls/README.md +++ b/controllers/nginx/examples/tls/README.md @@ -0,0 +1,101 @@ +This is an example to use a TLS Ingress rule to use SSL in NGINX + +*First expose the `echoheaders` service:* + +``` +kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.3 --replicas=1 --port=8080 +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +``` + +*Next create a SSL certificate for `foo.bar.com` host:* + +``` +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foo.bar.com" +``` + +*Now store the SSL certificate in a secret:* + +``` +echo " +apiVersion: v1 +kind: Secret +metadata: + name: foo-secret +data: + tls.crt: `base64 /tmp/tls.crt` + tls.key: `base64 /tmp/tls.key` +" | kubectl create -f - +``` + +*Finally create a tls Ingress rule:* + +``` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: foo + namespace: default +spec: + tls: + - hosts: + - foo.bar.com + secretName: foo-secret + rules: + - host: foo.bar.com + http: + paths: + - backend: + serviceName: echoheaders-x + servicePort: 80 + path: / +" | kubectl create -f - +``` + +``` +TODO: +- show logs +- curl +``` + + +##### Another example: + +This shows a more complex example that creates the servers `foo.bar.com` and `bar.baz.com` where only `foo.bar.com` uses SSL + +``` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: complex-foo + namespace: default +spec: + tls: + - hosts: + - foo.bar.com + secretName: foo-tls + - hosts: + - bar.baz.com + secretName: foo-tls + rules: + - host: foo.bar.com + http: + paths: + - backend: + serviceName: echoheaders-x + servicePort: 80 + path: / + - host: bar.baz.com + http: + paths: + - backend: + serviceName: echoheaders-y + servicePort: 80 + path: / +``` + + +``` +TODO: +- show logs +- curl +``` From 724a829eaec3c3f9dc955684c4e1d4350f37c307 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Tue, 12 Apr 2016 23:19:08 -0300 Subject: [PATCH 02/16] Detect path collisions in Ingress rules --- controllers/nginx/controller.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 85f478a9c..522b02937 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -458,9 +458,9 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng } server := servers[rule.Host] - locations := []*nginx.Location{} for _, path := range rule.HTTP.Paths { + upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue()) ups := upstreams[upsName] @@ -490,18 +490,23 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng } } - for _, ups := range upstreams { - if upsName == ups.Name { - loc := &nginx.Location{Path: path.Path} - loc.Upstream = *ups - locations = append(locations, loc) + // Validate that there is no another previuous rule + // for the same host and path. + skipLoc := false + for _, loc := range server.Locations { + if loc.Path == path.Path { + lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", "Path '%v' already defined in another Ingress rule", path) + skipLoc = true break } } - } - for _, loc := range locations { - server.Locations = append(server.Locations, loc) + if skipLoc == false { + server.Locations = append(server.Locations, &nginx.Location{ + Path: path.Path, + Upstream: *ups, + }) + } } } } From 87297ade3206cda095d3a841f7330b170774c1ce Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Tue, 12 Apr 2016 23:19:39 -0300 Subject: [PATCH 03/16] Check for valid PEM content --- controllers/nginx/nginx/ssl.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/controllers/nginx/nginx/ssl.go b/controllers/nginx/nginx/ssl.go index 65ad0608c..82d192303 100644 --- a/controllers/nginx/nginx/ssl.go +++ b/controllers/nginx/nginx/ssl.go @@ -53,8 +53,10 @@ func (nginx *Manager) CheckSSLCertificate(pemFileName string) ([]string, error) return []string{}, err } - var block *pem.Block - block, _ = pem.Decode(pemCerts) + block, _ := pem.Decode(pemCerts) + if block == nil { + return []string{}, fmt.Errorf("No valid PEM formatted block found") + } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { From f05eec6781bf53a0126198d43bc6548885b8e046 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Wed, 13 Apr 2016 14:37:08 -0300 Subject: [PATCH 04/16] Fix https port --- controllers/nginx/README.md | 2 + controllers/nginx/examples/README.md | 8 ++ .../rc-custom-configuration.yaml | 2 +- .../examples/daemonset/as-daemonset.yaml | 2 +- .../nginx/examples/default/rc-default.yaml | 2 +- controllers/nginx/examples/full/rc-full.yaml | 2 +- controllers/nginx/examples/tcp/rc-tcp.yaml | 2 +- controllers/nginx/examples/tls/README.md | 81 ++++++++----------- controllers/nginx/examples/tls/rc-ssl.yaml | 2 +- controllers/nginx/examples/udp/rc-udp.yaml | 2 +- 10 files changed, 52 insertions(+), 53 deletions(-) create mode 100644 controllers/nginx/examples/README.md diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index 8a527e37f..def7affb5 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -241,3 +241,5 @@ The previous behavior can be restored using `retry-non-idempotent=true` in the c ## Limitations - Ingress rules for TLS require the definition of the field `host` +- The IP address in the status of loadBalancer could contain old values + diff --git a/controllers/nginx/examples/README.md b/controllers/nginx/examples/README.md new file mode 100644 index 000000000..30348b639 --- /dev/null +++ b/controllers/nginx/examples/README.md @@ -0,0 +1,8 @@ + +All the examples references the services `echoheaders-x` and `echoheaders-y` + +``` +kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.3 --replicas=1 --port=8080 +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +``` diff --git a/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml b/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml index 7e43da8b9..45c8be2cf 100644 --- a/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml +++ b/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/daemonset/as-daemonset.yaml b/controllers/nginx/examples/daemonset/as-daemonset.yaml index 9cc493969..3c6aa14e4 100644 --- a/controllers/nginx/examples/daemonset/as-daemonset.yaml +++ b/controllers/nginx/examples/daemonset/as-daemonset.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/default/rc-default.yaml b/controllers/nginx/examples/default/rc-default.yaml index 842a372cc..48227cfa6 100644 --- a/controllers/nginx/examples/default/rc-default.yaml +++ b/controllers/nginx/examples/default/rc-default.yaml @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/full/rc-full.yaml b/controllers/nginx/examples/full/rc-full.yaml index d54fe4dbb..1bb68dded 100644 --- a/controllers/nginx/examples/full/rc-full.yaml +++ b/controllers/nginx/examples/full/rc-full.yaml @@ -45,7 +45,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 - containerPort: 8080 hostPort: 9000 volumeMounts: diff --git a/controllers/nginx/examples/tcp/rc-tcp.yaml b/controllers/nginx/examples/tcp/rc-tcp.yaml index ef64d30b7..f083e2faa 100644 --- a/controllers/nginx/examples/tcp/rc-tcp.yaml +++ b/controllers/nginx/examples/tcp/rc-tcp.yaml @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 # we expose 8080 to access nginx stats in url /nginx-status # this is optional - containerPort: 8080 diff --git a/controllers/nginx/examples/tls/README.md b/controllers/nginx/examples/tls/README.md index bc34b63a6..39ea28406 100644 --- a/controllers/nginx/examples/tls/README.md +++ b/controllers/nginx/examples/tls/README.md @@ -1,11 +1,12 @@ This is an example to use a TLS Ingress rule to use SSL in NGINX -*First expose the `echoheaders` service:* +# TLS certificate termination -``` -kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.3 --replicas=1 --port=8080 -kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x -``` +This examples uses 2 different certificates to terminate SSL for 2 hostnames. + +1. Deploy the controller by creating the rc in the parent dir +2. Create tls secret for foo.bar.com +3. Create rc-ssl.yaml *Next create a SSL certificate for `foo.bar.com` host:* @@ -30,6 +31,7 @@ data: *Finally create a tls Ingress rule:* ``` +echo " apiVersion: extensions/v1beta1 kind: Ingress metadata: @@ -51,51 +53,38 @@ spec: " | kubectl create -f - ``` +You should be able to reach your nginx service or echoheaders service using a hostname: ``` -TODO: -- show logs -- curl +$ kubectl get ing +NAME RULE BACKEND ADDRESS +foo - 10.4.0.3 + foo.bar.com + / echoheaders-x:80 ``` - -##### Another example: - -This shows a more complex example that creates the servers `foo.bar.com` and `bar.baz.com` where only `foo.bar.com` uses SSL - -``` -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: complex-foo - namespace: default -spec: - tls: - - hosts: - - foo.bar.com - secretName: foo-tls - - hosts: - - bar.baz.com - secretName: foo-tls - rules: - - host: foo.bar.com - http: - paths: - - backend: - serviceName: echoheaders-x - servicePort: 80 - path: / - - host: bar.baz.com - http: - paths: - - backend: - serviceName: echoheaders-y - servicePort: 80 - path: / ``` +$ curl https://10.4.0.3 -H 'Host:foo.bar.com' -k +old-mbp:contrib aledbf$ curl https://10.4.0.3 -H 'Host:foo.bar.com' -k +CLIENT VALUES: +client_address=10.2.48.4 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://foo.bar.com:8080/ +SERVER VALUES: +server_version=nginx: 1.9.7 - lua: 9019 -``` -TODO: -- show logs -- curl +HEADERS RECEIVED: +accept=*/* +connection=close +host=foo.bar.com +user-agent=curl/7.43.0 +x-forwarded-for=10.2.48.1 +x-forwarded-host=foo.bar.com +x-forwarded-proto=https +x-real-ip=10.2.48.1 +BODY: +-no body in request- ``` diff --git a/controllers/nginx/examples/tls/rc-ssl.yaml b/controllers/nginx/examples/tls/rc-ssl.yaml index f98a71902..2bf1aa3e3 100644 --- a/controllers/nginx/examples/tls/rc-ssl.yaml +++ b/controllers/nginx/examples/tls/rc-ssl.yaml @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 - containerPort: 8080 hostPort: 9000 args: diff --git a/controllers/nginx/examples/udp/rc-udp.yaml b/controllers/nginx/examples/udp/rc-udp.yaml index 283c2211b..22d167f1c 100644 --- a/controllers/nginx/examples/udp/rc-udp.yaml +++ b/controllers/nginx/examples/udp/rc-udp.yaml @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 # we expose 8080 to access nginx stats in url /nginx-status # this is optional - containerPort: 8080 From 16b4af504bbea99fd0475f19cd8beddd10157b26 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Thu, 14 Apr 2016 20:42:37 -0300 Subject: [PATCH 05/16] Fix issues with named ports --- controllers/nginx/controller.go | 148 +++++++++++++++++++++++++++++--- 1 file changed, 138 insertions(+), 10 deletions(-) diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 522b02937..52f64dcd6 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "encoding/json" "fmt" "reflect" "sort" @@ -28,11 +29,13 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + podutil "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/watch" @@ -41,14 +44,35 @@ import ( ) const ( - defUpstreamName = "upstream-default-backend" - defServerName = "_" + defUpstreamName = "upstream-default-backend" + defServerName = "_" + namedPortAnnotation = "kubernetes.io/ingress-named-ports" ) var ( keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc ) +type namedPortMapping map[string]string + +func (npm namedPortMapping) getPort(name string) (string, bool) { + val, ok := npm.getMappings()[name] + return val, ok +} + +func (npm namedPortMapping) getMappings() map[string]string { + data := npm[namedPortAnnotation] + var mapping map[string]string + if data == "" { + return mapping + } + if err := json.Unmarshal([]byte(data), &mapping); err != nil { + glog.Errorf("unexpected error reading annotations: %v", err) + } + + return mapping +} + // loadBalancerController watches the kubernetes api and adds/removes services // from the loadbalancer type loadBalancerController struct { @@ -74,6 +98,10 @@ type loadBalancerController struct { // this avoids a sync execution in the ResourceEventHandlerFuncs ingQueue *taskQueue + // used to update the annotation that matches a service using one or + // more named ports to an endpoint port + svcEpQueue *taskQueue + // stopLock is used to enforce only a single call to Stop is active. // Needed because we allow stopping through an http endpoint and // allowing concurrent stoppers leads to stack traces. @@ -104,6 +132,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura lbc.syncQueue = NewTaskQueue(lbc.sync) lbc.ingQueue = NewTaskQueue(lbc.updateIngressStatus) + lbc.svcEpQueue = NewTaskQueue(lbc.updateEpNamedPorts) ingEventHandler := framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -141,6 +170,17 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura }, } + svcEventHandler := framework.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + lbc.svcEpQueue.enqueue(obj) + }, + UpdateFunc: func(old, cur interface{}) { + if !reflect.DeepEqual(old, cur) { + lbc.svcEpQueue.enqueue(cur) + } + }, + } + lbc.ingLister.Store, lbc.ingController = framework.NewInformer( &cache.ListWatch{ ListFunc: ingressListFunc(lbc.client, namespace), @@ -160,7 +200,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura ListFunc: serviceListFunc(lbc.client, namespace), WatchFunc: serviceWatchFunc(lbc.client, namespace), }, - &api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{}) + &api.Service{}, resyncPeriod, svcEventHandler) return &lbc, nil } @@ -217,6 +257,75 @@ func (lbc *loadBalancerController) getUDPConfigMap(ns, name string) (*api.Config return lbc.client.ConfigMaps(ns).Get(name) } +func (lbc *loadBalancerController) updateEpNamedPorts(key string) { + if !lbc.controllersInSync() { + lbc.svcEpQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) + return + } + + svcObj, svcExists, err := lbc.svcLister.GetByKey(key) + if err != nil { + glog.Warningf("error getting service %v: %v", key, err) + return + } + + if !svcExists { + glog.Warningf("service %v not found", key) + return + } + + svc := svcObj.(*api.Service) + if svc.Spec.Selector == nil { + return + } + + pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{ + LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(), + }) + if err != nil { + glog.Errorf("error searching service pods %q: %v", key, err) + return + } + + namedPorts := map[string]string{} + + for i := range pods.Items { + pod := &pods.Items[i] + + for i := range svc.Spec.Ports { + servicePort := &svc.Spec.Ports[i] + + _, err := strconv.Atoi(servicePort.TargetPort.StrVal) + if err != nil { + portNum, err := podutil.FindPort(pod, servicePort) + if err != nil { + glog.V(4).Infof("Failed to find port for service %s/%s: %v", svc.Namespace, svc.Name, err) + continue + } + + if servicePort.TargetPort.StrVal == "" { + continue + } + + namedPorts[servicePort.TargetPort.StrVal] = fmt.Sprintf("%v", portNum) + } + } + } + + if !reflect.DeepEqual(svc.ObjectMeta.Annotations, namedPorts) { + data, _ := json.Marshal(namedPorts) + if svc.ObjectMeta.Annotations == nil { + svc.ObjectMeta.Annotations = map[string]string{} + } + svc.ObjectMeta.Annotations[namedPortAnnotation] = string(data) + glog.Infof("updating service %v with new named port mappings", svc.Name) + _, err := lbc.client.Services(svc.Namespace).Update(svc) + if err != nil { + glog.Errorf("Error syncing service %q: %v", key, err) + } + } +} + func (lbc *loadBalancerController) sync(key string) { if !lbc.controllersInSync() { lbc.syncQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) @@ -373,7 +482,12 @@ func (lbc *loadBalancerController) getServices(data map[string]string, proto api var endps []nginx.UpstreamServer targetPort, err := strconv.Atoi(svcPort[1]) if err != nil { - endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1]), proto) + for _, sp := range svc.Spec.Ports { + if sp.Name == svcPort[1] { + endps = lbc.getEndpoints(svc, sp.TargetPort, proto) + break + } + } } else { // we need to use the TargetPort (where the endpoints are running) for _, sp := range svc.Spec.Ports { @@ -461,7 +575,7 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng for _, path := range rule.HTTP.Paths { - upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue()) + upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.StrVal) ups := upstreams[upsName] svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) @@ -479,8 +593,13 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng svc := svcObj.(*api.Service) for _, servicePort := range svc.Spec.Ports { - if servicePort.Port == path.Backend.ServicePort.IntValue() { - endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP) + port := servicePort.TargetPort + if servicePort.Name != "" { + port = intstr.FromString(servicePort.Name) + } + + if port == path.Backend.ServicePort { + endps := lbc.getEndpoints(svc, port, api.ProtocolTCP) if len(endps) == 0 { glog.Warningf("service %v does no have any active endpoints", svcKey) } @@ -546,7 +665,7 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin } for _, path := range rule.HTTP.Paths { - name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue()) + name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.StrVal) if _, ok := upstreams[name]; !ok { upstreams[name] = nginx.NewUpstream(name) } @@ -666,8 +785,16 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints targetPort = epPort.Port } case intstr.String: - if epPort.Name == servicePort.StrVal { - targetPort = epPort.Port + if val, ok := namedPortMapping(s.ObjectMeta.Annotations).getPort(servicePort.StrVal); ok { + port, err := strconv.Atoi(val) + if err != nil { + glog.Warningf("%v is not valid as a port", val) + continue + } + + if epPort.Protocol == proto { + targetPort = port + } } } @@ -754,6 +881,7 @@ func (lbc *loadBalancerController) Run() { go lbc.syncQueue.run(time.Second, lbc.stopCh) go lbc.ingQueue.run(time.Second, lbc.stopCh) + go lbc.svcEpQueue.run(time.Second, lbc.stopCh) <-lbc.stopCh glog.Infof("shutting down NGINX loadbalancer controller") From 102c056b67fc64ee4098a01bdb0e294f7c6c7a85 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Thu, 14 Apr 2016 20:43:07 -0300 Subject: [PATCH 06/16] Add header X-Forwarded-Port --- controllers/nginx/nginx.tmpl | 1 + 1 file changed, 1 insertion(+) diff --git a/controllers/nginx/nginx.tmpl b/controllers/nginx/nginx.tmpl index facbb766e..86fb102a0 100644 --- a/controllers/nginx/nginx.tmpl +++ b/controllers/nginx/nginx.tmpl @@ -182,6 +182,7 @@ http { proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $host; + proxy_set_header X­-Forwarded­-Port $server_port; proxy_set_header X-Forwarded-Proto $pass_access_scheme; proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s; From 5663c725beeac3b5b3f832b8c67b4873d587142f Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Fri, 15 Apr 2016 09:35:39 -0300 Subject: [PATCH 07/16] Make optional redirect to SSL --- controllers/nginx/README.md | 3 +++ controllers/nginx/nginx.tmpl | 8 +++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index def7affb5..a975a5cfd 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -120,6 +120,9 @@ Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/exampl Check the [example](examples/tls/README.md) +### Force HTTPS + +By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. To disable this behavior use `use-hts=false` in the NGINX ConfigMap. #### Optimizing TLS Time To First Byte (TTTFB) diff --git a/controllers/nginx/nginx.tmpl b/controllers/nginx/nginx.tmpl index 86fb102a0..046ffc7cd 100644 --- a/controllers/nginx/nginx.tmpl +++ b/controllers/nginx/nginx.tmpl @@ -80,10 +80,6 @@ http { '' $scheme; } - map $pass_access_scheme $sts { - 'https' 'max-age={{ $cfg.htsMaxAge }}{{ if $cfg.htsIncludeSubdomains }}; includeSubDomains{{ end }}; preload'; - } - # Map a response error watching the header Content-Type map $http_accept $httpAccept { default html; @@ -163,10 +159,12 @@ http { server_name {{ $server.Name }}; - {{ if $server.SSL }} + {{ if (and $server.SSL $cfg.UseHTS) }} if ($scheme = http) { return 301 https://$host$request_uri; } + + more_set_headers "Strict-Transport-Security: max-age={{ $cfg.htsMaxAge }}{{ if $cfg.htsIncludeSubdomains }}; includeSubDomains{{ end }}; preload"; {{ end }} {{ range $location := $server.Locations }} From 107bf1837b8bd512c695cbd852adb023d3889167 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Fri, 15 Apr 2016 11:39:53 -0300 Subject: [PATCH 08/16] Bump version --- controllers/nginx/Makefile | 2 +- .../examples/custom-configuration/rc-custom-configuration.yaml | 2 +- controllers/nginx/examples/daemonset/as-daemonset.yaml | 2 +- controllers/nginx/examples/default/rc-default.yaml | 2 +- controllers/nginx/examples/full/rc-full.yaml | 2 +- controllers/nginx/examples/tcp/rc-tcp.yaml | 2 +- controllers/nginx/examples/tls/rc-ssl.yaml | 2 +- controllers/nginx/examples/udp/rc-udp.yaml | 2 +- controllers/nginx/main.go | 2 +- controllers/nginx/rc.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/controllers/nginx/Makefile b/controllers/nginx/Makefile index ce0668a9d..8556bf2ed 100644 --- a/controllers/nginx/Makefile +++ b/controllers/nginx/Makefile @@ -1,7 +1,7 @@ all: push # 0.0 shouldn't clobber any release builds -TAG = 0.5 +TAG = 0.6 PREFIX = gcr.io/google_containers/nginx-ingress-controller REPO_INFO=$(shell git config --get remote.origin.url) diff --git a/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml b/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml index 45c8be2cf..10ae3fa8e 100644 --- a/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml +++ b/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: diff --git a/controllers/nginx/examples/daemonset/as-daemonset.yaml b/controllers/nginx/examples/daemonset/as-daemonset.yaml index 3c6aa14e4..bf46f89ac 100644 --- a/controllers/nginx/examples/daemonset/as-daemonset.yaml +++ b/controllers/nginx/examples/daemonset/as-daemonset.yaml @@ -10,7 +10,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: diff --git a/controllers/nginx/examples/default/rc-default.yaml b/controllers/nginx/examples/default/rc-default.yaml index 48227cfa6..ea8e2d924 100644 --- a/controllers/nginx/examples/default/rc-default.yaml +++ b/controllers/nginx/examples/default/rc-default.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: diff --git a/controllers/nginx/examples/full/rc-full.yaml b/controllers/nginx/examples/full/rc-full.yaml index 1bb68dded..9623ba6af 100644 --- a/controllers/nginx/examples/full/rc-full.yaml +++ b/controllers/nginx/examples/full/rc-full.yaml @@ -21,7 +21,7 @@ spec: secret: secretName: dhparam-example containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: diff --git a/controllers/nginx/examples/tcp/rc-tcp.yaml b/controllers/nginx/examples/tcp/rc-tcp.yaml index f083e2faa..3a40c755b 100644 --- a/controllers/nginx/examples/tcp/rc-tcp.yaml +++ b/controllers/nginx/examples/tcp/rc-tcp.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: diff --git a/controllers/nginx/examples/tls/rc-ssl.yaml b/controllers/nginx/examples/tls/rc-ssl.yaml index 2bf1aa3e3..8195f931e 100644 --- a/controllers/nginx/examples/tls/rc-ssl.yaml +++ b/controllers/nginx/examples/tls/rc-ssl.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: diff --git a/controllers/nginx/examples/udp/rc-udp.yaml b/controllers/nginx/examples/udp/rc-udp.yaml index 22d167f1c..108a0a3fc 100644 --- a/controllers/nginx/examples/udp/rc-udp.yaml +++ b/controllers/nginx/examples/udp/rc-udp.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: diff --git a/controllers/nginx/main.go b/controllers/nginx/main.go index a5b9afca9..becd220df 100644 --- a/controllers/nginx/main.go +++ b/controllers/nginx/main.go @@ -43,7 +43,7 @@ const ( var ( // value overwritten during build. This can be used to resolve issues. - version = "0.5" + version = "0.6" gitRepo = "https://github.com/kubernetes/contrib" flags = pflag.NewFlagSet("", pflag.ExitOnError) diff --git a/controllers/nginx/rc.yaml b/controllers/nginx/rc.yaml index 527ef42e5..ea76dd084 100644 --- a/controllers/nginx/rc.yaml +++ b/controllers/nginx/rc.yaml @@ -68,7 +68,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: From 996e19cdb8e1ddd0fc0824aad7bda1a8257c2bf9 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Fri, 15 Apr 2016 11:59:02 -0300 Subject: [PATCH 09/16] Use Ingress creation and update events instead services to reduce pod queries --- controllers/nginx/controller.go | 76 +++++++++++++++++++++------------ 1 file changed, 49 insertions(+), 27 deletions(-) diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 52f64dcd6..0124f06fe 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -116,7 +116,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) + eventBroadcaster.StartRecordingToSink(kubeClient.Events(namespace)) lbc := loadBalancerController{ client: kubeClient, @@ -139,6 +139,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura addIng := obj.(*extensions.Ingress) lbc.recorder.Eventf(addIng, api.EventTypeNormal, "CREATE", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name)) lbc.ingQueue.enqueue(obj) + lbc.svcEpQueue.enqueue(obj) lbc.syncQueue.enqueue(obj) }, DeleteFunc: func(obj interface{}) { @@ -151,6 +152,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura upIng := cur.(*extensions.Ingress) lbc.recorder.Eventf(upIng, api.EventTypeNormal, "UPDATE", fmt.Sprintf("%s/%s", upIng.Namespace, upIng.Name)) lbc.ingQueue.enqueue(cur) + lbc.svcEpQueue.enqueue(cur) lbc.syncQueue.enqueue(cur) } }, @@ -170,17 +172,6 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura }, } - svcEventHandler := framework.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - lbc.svcEpQueue.enqueue(obj) - }, - UpdateFunc: func(old, cur interface{}) { - if !reflect.DeepEqual(old, cur) { - lbc.svcEpQueue.enqueue(cur) - } - }, - } - lbc.ingLister.Store, lbc.ingController = framework.NewInformer( &cache.ListWatch{ ListFunc: ingressListFunc(lbc.client, namespace), @@ -200,7 +191,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura ListFunc: serviceListFunc(lbc.client, namespace), WatchFunc: serviceWatchFunc(lbc.client, namespace), }, - &api.Service{}, resyncPeriod, svcEventHandler) + &api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{}) return &lbc, nil } @@ -263,27 +254,54 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) { return } - svcObj, svcExists, err := lbc.svcLister.GetByKey(key) + glog.V(4).Infof("checking if service %v uses named ports to update annotation %v", key, namedPortAnnotation) + + ingObj, ingExists, err := lbc.ingLister.Store.GetByKey(key) if err != nil { glog.Warningf("error getting service %v: %v", key, err) return } - if !svcExists { + if !ingExists { glog.Warningf("service %v not found", key) return } - svc := svcObj.(*api.Service) - if svc.Spec.Selector == nil { - return - } + ing := ingObj.(*extensions.Ingress) + for _, rule := range ing.Spec.Rules { + if rule.IngressRuleValue.HTTP == nil { + continue + } + for _, path := range rule.HTTP.Paths { + svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) + svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) + if err != nil { + glog.Infof("error getting service %v from the cache: %v", svcKey, err) + continue + } + + if !svcExists { + glog.Warningf("service %v does no exists", svcKey) + continue + } + + svc := svcObj.(*api.Service) + if svc.Spec.Selector == nil { + return + } + + lbc.checkSvcForUpdate(svc) + } + } +} + +func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) { pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{ LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(), }) if err != nil { - glog.Errorf("error searching service pods %q: %v", key, err) + glog.Errorf("error searching service pods %v/%v: %v", svc.Namespace, svc.Name, err) return } @@ -291,7 +309,7 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) { for i := range pods.Items { pod := &pods.Items[i] - + glog.V(4).Infof("checking pod %v/%v for named port information", pod.Namespace, pod.Name) for i := range svc.Spec.Ports { servicePort := &svc.Spec.Ports[i] @@ -299,7 +317,7 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) { if err != nil { portNum, err := podutil.FindPort(pod, servicePort) if err != nil { - glog.V(4).Infof("Failed to find port for service %s/%s: %v", svc.Namespace, svc.Name, err) + glog.V(4).Infof("failed to find port for service %s/%s: %v", svc.Namespace, svc.Name, err) continue } @@ -312,16 +330,18 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) { } } - if !reflect.DeepEqual(svc.ObjectMeta.Annotations, namedPorts) { + if svc.ObjectMeta.Annotations == nil { + svc.ObjectMeta.Annotations = map[string]string{} + } + + curNamedPort := svc.ObjectMeta.Annotations[namedPortAnnotation] + if !reflect.DeepEqual(curNamedPort, namedPorts) { data, _ := json.Marshal(namedPorts) - if svc.ObjectMeta.Annotations == nil { - svc.ObjectMeta.Annotations = map[string]string{} - } svc.ObjectMeta.Annotations[namedPortAnnotation] = string(data) glog.Infof("updating service %v with new named port mappings", svc.Name) _, err := lbc.client.Services(svc.Namespace).Update(svc) if err != nil { - glog.Errorf("Error syncing service %q: %v", key, err) + glog.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err) } } } @@ -879,6 +899,8 @@ func (lbc *loadBalancerController) Run() { go lbc.endpController.Run(lbc.stopCh) go lbc.svcController.Run(lbc.stopCh) + time.Sleep(1 * time.Second) + go lbc.syncQueue.run(time.Second, lbc.stopCh) go lbc.ingQueue.run(time.Second, lbc.stopCh) go lbc.svcEpQueue.run(time.Second, lbc.stopCh) From 102c2eeaa4ce0fb793df793e112bfe1fc1296a8c Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Fri, 15 Apr 2016 12:29:12 -0300 Subject: [PATCH 10/16] Avoid iteration in pods during update of service annotations --- controllers/nginx/controller.go | 74 +++++++++++++++++++++------------ controllers/nginx/nginx.tmpl | 2 +- 2 files changed, 49 insertions(+), 27 deletions(-) diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 0124f06fe..107f9f36f 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -291,42 +291,52 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) { return } - lbc.checkSvcForUpdate(svc) + err = lbc.checkSvcForUpdate(svc) + if err != nil { + lbc.svcEpQueue.requeue(key, err) + return + } } } } -func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) { +// checkSvcForUpdate verifies if one of the running pods for a service contains +// named port. If the annotation in the service does not exists or is not equals +// to the port mapping obtained from the pod the service must be updated to reflect +// the current state +func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { + // get the pods associated with the service pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{ LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(), }) if err != nil { - glog.Errorf("error searching service pods %v/%v: %v", svc.Namespace, svc.Name, err) - return + return fmt.Errorf("error searching service pods %v/%v: %v", svc.Namespace, svc.Name, err) + } + + if len(pods.Items) == 0 { + return nil } namedPorts := map[string]string{} + // we need to check only one pod searching for named ports + pod := &pods.Items[0] + glog.V(4).Infof("checking pod %v/%v for named port information", pod.Namespace, pod.Name) + for i := range svc.Spec.Ports { + servicePort := &svc.Spec.Ports[i] - for i := range pods.Items { - pod := &pods.Items[i] - glog.V(4).Infof("checking pod %v/%v for named port information", pod.Namespace, pod.Name) - for i := range svc.Spec.Ports { - servicePort := &svc.Spec.Ports[i] - - _, err := strconv.Atoi(servicePort.TargetPort.StrVal) + _, err := strconv.Atoi(servicePort.TargetPort.StrVal) + if err != nil { + portNum, err := podutil.FindPort(pod, servicePort) if err != nil { - portNum, err := podutil.FindPort(pod, servicePort) - if err != nil { - glog.V(4).Infof("failed to find port for service %s/%s: %v", svc.Namespace, svc.Name, err) - continue - } - - if servicePort.TargetPort.StrVal == "" { - continue - } - - namedPorts[servicePort.TargetPort.StrVal] = fmt.Sprintf("%v", portNum) + glog.V(4).Infof("failed to find port for service %s/%s: %v", svc.Namespace, svc.Name, err) + continue } + + if servicePort.TargetPort.StrVal == "" { + continue + } + + namedPorts[servicePort.TargetPort.StrVal] = fmt.Sprintf("%v", portNum) } } @@ -337,13 +347,25 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) { curNamedPort := svc.ObjectMeta.Annotations[namedPortAnnotation] if !reflect.DeepEqual(curNamedPort, namedPorts) { data, _ := json.Marshal(namedPorts) - svc.ObjectMeta.Annotations[namedPortAnnotation] = string(data) - glog.Infof("updating service %v with new named port mappings", svc.Name) - _, err := lbc.client.Services(svc.Namespace).Update(svc) + + newSvc, err := lbc.client.Services(svc.Namespace).Get(svc.Name) if err != nil { - glog.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err) + return fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err) + } + + if svc.ObjectMeta.Annotations == nil { + svc.ObjectMeta.Annotations = map[string]string{} + } + + newSvc.ObjectMeta.Annotations[namedPortAnnotation] = string(data) + glog.Infof("updating service %v with new named port mappings", svc.Name) + _, err = lbc.client.Services(svc.Namespace).Update(svc) + if err != nil { + return fmt.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err) } } + + return nil } func (lbc *loadBalancerController) sync(key string) { diff --git a/controllers/nginx/nginx.tmpl b/controllers/nginx/nginx.tmpl index 046ffc7cd..02499dd74 100644 --- a/controllers/nginx/nginx.tmpl +++ b/controllers/nginx/nginx.tmpl @@ -180,7 +180,7 @@ http { proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $host; - proxy_set_header X­-Forwarded­-Port $server_port; + proxy_set_header X-Forwarded­-Port $server_port; proxy_set_header X-Forwarded-Proto $pass_access_scheme; proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s; From a86a6824293cc901a67e20abdb57405d15b25ee1 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Sat, 16 Apr 2016 19:36:45 -0300 Subject: [PATCH 11/16] Fix HSTS --- controllers/nginx/Dockerfile | 2 +- controllers/nginx/README.md | 31 +++-- controllers/nginx/controller.go | 146 +++++++++++++-------- controllers/nginx/examples/tcp/rc-tcp.yaml | 4 - controllers/nginx/examples/tls/rc-ssl.yaml | 2 - controllers/nginx/examples/udp/rc-udp.yaml | 4 - controllers/nginx/nginx.tmpl | 20 +-- controllers/nginx/nginx/main.go | 23 ++-- controllers/nginx/rc.yaml | 4 + 9 files changed, 126 insertions(+), 110 deletions(-) diff --git a/controllers/nginx/Dockerfile b/controllers/nginx/Dockerfile index 853dbe364..64beee83c 100644 --- a/controllers/nginx/Dockerfile +++ b/controllers/nginx/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/google_containers/nginx-slim:0.5 +FROM gcr.io/google_containers/nginx-slim:0.7 RUN apt-get update && apt-get install -y \ diffutils \ diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index a975a5cfd..f2ed1e63d 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -11,7 +11,6 @@ This is a nginx Ingress controller that uses [ConfigMap](https://github.com/kube - custom ssl_dhparam (optional). Just mount a secret with a file named `dhparam.pem`. - support for TCP services (flag `--tcp-services-configmap`) - custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md) -- custom error pages. Using the flag `--custom-error-service` is possible to use a custom compatible [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) image ## Requirements @@ -120,10 +119,13 @@ Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/exampl Check the [example](examples/tls/README.md) -### Force HTTPS +### HTTP Strict Transport Security -By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. To disable this behavior use `use-hts=false` in the NGINX ConfigMap. +HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. +By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. + +To disable this behavior use `hsts=false` in the NGINX ConfigMap. #### Optimizing TLS Time To First Byte (TTTFB) @@ -190,25 +192,22 @@ Please check the example `example/rc-default.yaml` To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` + +### Custom errors + +In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers: +- `X-Code` indicates the HTTP code +- `X-Format` the value of the `Accept` header + +Using this two headers is possible to use a custom backend service like [this one](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) that inspect each request and returns a custom error page with the format expected by the client. This images handles `html` and `json` responses. + + ## Troubleshooting Problems encountered during [1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md): * make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container. * To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504) -### Custom errors - -The default backend provides a way to customize the default 404 page. This helps but sometimes is not enough. -Using the flag `--custom-error-service` is possible to use an image that must be 404 compatible and provide the route /error -[Here](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) there is an example of the the image - -The route `/error` expects two arguments: code and format -* code defines the wich error code is expected to be returned (502,503,etc.) -* format the format that should be returned For instance /error?code=504&format=json or /error?code=502&format=html - -Using a volume pointing to `/var/www/html` directory is possible to use a custom error - - ### Debug Using the flag `--v=XX` it is possible to increase the level of logging. diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 107f9f36f..04ded6aa1 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -44,9 +44,10 @@ import ( ) const ( - defUpstreamName = "upstream-default-backend" - defServerName = "_" - namedPortAnnotation = "kubernetes.io/ingress-named-ports" + defUpstreamName = "upstream-default-backend" + defServerName = "_" + namedPortAnnotation = "kubernetes.io/ingress-named-ports" + podStoreSyncedPollPeriod = 1 * time.Second ) var ( @@ -127,7 +128,9 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura tcpConfigMap: tcpConfigMapName, udpConfigMap: udpConfigMapName, defaultSvc: defaultSvc, - recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "loadbalancer-controller"}), + recorder: eventBroadcaster.NewRecorder(api.EventSource{ + Component: "nginx-ingress-controller", + }), } lbc.syncQueue = NewTaskQueue(lbc.sync) @@ -250,6 +253,7 @@ func (lbc *loadBalancerController) getUDPConfigMap(ns, name string) (*api.Config func (lbc *loadBalancerController) updateEpNamedPorts(key string) { if !lbc.controllersInSync() { + time.Sleep(podStoreSyncedPollPeriod) lbc.svcEpQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) return } @@ -291,6 +295,12 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) { return } + // check to avoid a call to checkSvcForUpdate if the port is not a string + _, err = strconv.Atoi(path.Backend.ServicePort.StrVal) + if err == nil { + continue + } + err = lbc.checkSvcForUpdate(svc) if err != nil { lbc.svcEpQueue.requeue(key, err) @@ -306,6 +316,7 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) { // the current state func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { // get the pods associated with the service + // TODO: switch this to a watch pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{ LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(), }) @@ -345,7 +356,7 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { } curNamedPort := svc.ObjectMeta.Annotations[namedPortAnnotation] - if !reflect.DeepEqual(curNamedPort, namedPorts) { + if len(namedPorts) > 0 && !reflect.DeepEqual(curNamedPort, namedPorts) { data, _ := json.Marshal(namedPorts) newSvc, err := lbc.client.Services(svc.Namespace).Get(svc.Name) @@ -353,8 +364,8 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { return fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err) } - if svc.ObjectMeta.Annotations == nil { - svc.ObjectMeta.Annotations = map[string]string{} + if newSvc.ObjectMeta.Annotations == nil { + newSvc.ObjectMeta.Annotations = map[string]string{} } newSvc.ObjectMeta.Annotations[namedPortAnnotation] = string(data) @@ -370,6 +381,7 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { func (lbc *loadBalancerController) sync(key string) { if !lbc.controllersInSync() { + time.Sleep(podStoreSyncedPollPeriod) lbc.syncQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) return } @@ -396,6 +408,7 @@ func (lbc *loadBalancerController) sync(key string) { func (lbc *loadBalancerController) updateIngressStatus(key string) { if !lbc.controllersInSync() { + time.Sleep(podStoreSyncedPollPeriod) lbc.ingQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) return } @@ -462,7 +475,7 @@ func (lbc *loadBalancerController) getTCPServices() []*nginx.Location { return []*nginx.Location{} } - return lbc.getServices(tcpMap.Data, api.ProtocolTCP) + return lbc.getStreamServices(tcpMap.Data, api.ProtocolTCP) } func (lbc *loadBalancerController) getUDPServices() []*nginx.Location { @@ -482,10 +495,10 @@ func (lbc *loadBalancerController) getUDPServices() []*nginx.Location { return []*nginx.Location{} } - return lbc.getServices(tcpMap.Data, api.ProtocolUDP) + return lbc.getStreamServices(tcpMap.Data, api.ProtocolUDP) } -func (lbc *loadBalancerController) getServices(data map[string]string, proto api.Protocol) []*nginx.Location { +func (lbc *loadBalancerController) getStreamServices(data map[string]string, proto api.Protocol) []*nginx.Location { var svcs []*nginx.Location // k -> port to expose in nginx // v -> /: @@ -496,36 +509,45 @@ func (lbc *loadBalancerController) getServices(data map[string]string, proto api continue } - svcPort := strings.Split(v, ":") - if len(svcPort) != 2 { + // this ports are required for NGINX + if k == "80" || k == "443" || k == "8181" { + glog.Warningf("port %v cannot be used for TCP or UDP services. Is reserved for NGINX", k) + continue + } + + nsSvcPort := strings.Split(v, ":") + if len(nsSvcPort) != 2 { glog.Warningf("invalid format (namespace/name:port) '%v'", k) continue } - svcNs, svcName, err := parseNsName(svcPort[0]) + nsName := nsSvcPort[0] + svcPort := nsSvcPort[1] + + svcNs, svcName, err := parseNsName(nsName) if err != nil { glog.Warningf("%v", err) continue } - svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcPort[0]) + svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(nsName) if err != nil { - glog.Warningf("error getting service %v: %v", svcPort[0], err) + glog.Warningf("error getting service %v: %v", nsName, err) continue } if !svcExists { - glog.Warningf("service %v was not found", svcPort[0]) + glog.Warningf("service %v was not found", nsName) continue } svc := svcObj.(*api.Service) var endps []nginx.UpstreamServer - targetPort, err := strconv.Atoi(svcPort[1]) + targetPort, err := strconv.Atoi(svcPort) if err != nil { for _, sp := range svc.Spec.Ports { - if sp.Name == svcPort[1] { + if sp.Name == svcPort { endps = lbc.getEndpoints(svc, sp.TargetPort, proto) break } @@ -616,55 +638,32 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng server := servers[rule.Host] for _, path := range rule.HTTP.Paths { - - upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.StrVal) + upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String()) ups := upstreams[upsName] - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) - svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) - if err != nil { - glog.Infof("error getting service %v from the cache: %v", svcKey, err) - continue - } - - if !svcExists { - glog.Warningf("service %v does no exists", svcKey) - continue - } - - svc := svcObj.(*api.Service) - - for _, servicePort := range svc.Spec.Ports { - port := servicePort.TargetPort - if servicePort.Name != "" { - port = intstr.FromString(servicePort.Name) - } - - if port == path.Backend.ServicePort { - endps := lbc.getEndpoints(svc, port, api.ProtocolTCP) - if len(endps) == 0 { - glog.Warningf("service %v does no have any active endpoints", svcKey) - } - - ups.Backends = append(ups.Backends, endps...) - break - } + nginxPath := path.Path + // if there's no path defined we assume / + if nginxPath == "" { + lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", + "Ingress rule '%v/%v' contains no path definition. Assuming /", ing.GetNamespace(), ing.GetName()) + nginxPath = "/" } // Validate that there is no another previuous rule // for the same host and path. - skipLoc := false + addLoc := true for _, loc := range server.Locations { - if loc.Path == path.Path { - lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", "Path '%v' already defined in another Ingress rule", path) - skipLoc = true + if loc.Path == nginxPath { + lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", + "Path '%v' already defined in another Ingress rule", nginxPath) + addLoc = false break } } - if skipLoc == false { + if addLoc { server.Locations = append(server.Locations, &nginx.Location{ - Path: path.Path, + Path: nginxPath, Upstream: *ups, }) } @@ -707,9 +706,39 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin } for _, path := range rule.HTTP.Paths { - name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.StrVal) + name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String()) if _, ok := upstreams[name]; !ok { upstreams[name] = nginx.NewUpstream(name) + + svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) + svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) + if err != nil { + glog.Infof("error getting service %v from the cache: %v", svcKey, err) + continue + } + + if !svcExists { + glog.Warningf("service %v does no exists", svcKey) + continue + } + + svc := svcObj.(*api.Service) + for _, servicePort := range svc.Spec.Ports { + port := servicePort.TargetPort + if servicePort.Name != "" { + port = intstr.FromString(servicePort.Name) + } + + if port == path.Backend.ServicePort { + endps := lbc.getEndpoints(svc, port, api.ProtocolTCP) + if len(endps) == 0 { + glog.Warningf("service %v does no have any active endpoints", svcKey) + } + + upstreams[name].Backends = append(upstreams[name].Backends, endps...) + break + } + } } } } @@ -877,6 +906,9 @@ func (lbc *loadBalancerController) Stop() error { return fmt.Errorf("shutdown already in progress") } +// removeFromIngress removes the IP address of the node where the Ingres +// controller is running before shutdown to avoid incorrect status +// information in Ingress rules func (lbc *loadBalancerController) removeFromIngress() { ings := lbc.ingLister.Store.List() glog.Infof("updating %v Ingress rule/s", len(ings)) @@ -921,8 +953,6 @@ func (lbc *loadBalancerController) Run() { go lbc.endpController.Run(lbc.stopCh) go lbc.svcController.Run(lbc.stopCh) - time.Sleep(1 * time.Second) - go lbc.syncQueue.run(time.Second, lbc.stopCh) go lbc.ingQueue.run(time.Second, lbc.stopCh) go lbc.svcEpQueue.run(time.Second, lbc.stopCh) diff --git a/controllers/nginx/examples/tcp/rc-tcp.yaml b/controllers/nginx/examples/tcp/rc-tcp.yaml index 3a40c755b..36420c831 100644 --- a/controllers/nginx/examples/tcp/rc-tcp.yaml +++ b/controllers/nginx/examples/tcp/rc-tcp.yaml @@ -41,10 +41,6 @@ spec: hostPort: 80 - containerPort: 443 hostPort: 443 - # we expose 8080 to access nginx stats in url /nginx-status - # this is optional - - containerPort: 8080 - hostPort: 8081 # service echoheaders as TCP service default/echoheaders:9000 # 9000 indicates the port used to expose the service - containerPort: 9000 diff --git a/controllers/nginx/examples/tls/rc-ssl.yaml b/controllers/nginx/examples/tls/rc-ssl.yaml index 8195f931e..ea8e2d924 100644 --- a/controllers/nginx/examples/tls/rc-ssl.yaml +++ b/controllers/nginx/examples/tls/rc-ssl.yaml @@ -41,8 +41,6 @@ spec: hostPort: 80 - containerPort: 443 hostPort: 443 - - containerPort: 8080 - hostPort: 9000 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/udp/rc-udp.yaml b/controllers/nginx/examples/udp/rc-udp.yaml index 108a0a3fc..01eafd613 100644 --- a/controllers/nginx/examples/udp/rc-udp.yaml +++ b/controllers/nginx/examples/udp/rc-udp.yaml @@ -41,10 +41,6 @@ spec: hostPort: 80 - containerPort: 443 hostPort: 443 - # we expose 8080 to access nginx stats in url /nginx-status - # this is optional - - containerPort: 8080 - hostPort: 8081 - containerPort: 53 hostPort: 53 args: diff --git a/controllers/nginx/nginx.tmpl b/controllers/nginx/nginx.tmpl index 02499dd74..3b413b7ac 100644 --- a/controllers/nginx/nginx.tmpl +++ b/controllers/nginx/nginx.tmpl @@ -149,8 +149,8 @@ http { {{ range $server := .servers }} server { - listen 80; - {{ if $server.SSL }}listen 443 ssl http2; + listen 80{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }}; + {{ if $server.SSL }}listen 443{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} ssl http2; ssl_certificate {{ $server.SSLCertificate }}; ssl_certificate_key {{ $server.SSLCertificateKey }};{{ end }} {{ if $cfg.enableVtsStatus }} @@ -159,12 +159,12 @@ http { server_name {{ $server.Name }}; - {{ if (and $server.SSL $cfg.UseHTS) }} + {{ if (and $server.SSL $cfg.hsts) }} if ($scheme = http) { return 301 https://$host$request_uri; } - more_set_headers "Strict-Transport-Security: max-age={{ $cfg.htsMaxAge }}{{ if $cfg.htsIncludeSubdomains }}; includeSubDomains{{ end }}; preload"; + more_set_headers "Strict-Transport-Security: max-age={{ $cfg.hstsMaxAge }}{{ if $cfg.hstsIncludeSubdomains }}; includeSubDomains{{ end }}; preload"; {{ end }} {{ range $location := $server.Locations }} @@ -180,7 +180,7 @@ http { proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $host; - proxy_set_header X-Forwarded­-Port $server_port; + proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Proto $pass_access_scheme; proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s; @@ -213,18 +213,13 @@ http { # default server, including healthcheck server { - listen 8080 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} reuseport; + listen 8080 default_server reuseport; location /healthz { access_log off; return 200; } - location /health-check { - access_log off; - proxy_pass http://127.0.0.1:10249/healthz; - } - location /nginx_status { {{ if $cfg.enableVtsStatus }} vhost_traffic_status_display; @@ -253,9 +248,7 @@ http { } } - stream { - # TCP services {{ range $i, $tcpServer := .tcpUpstreams }} upstream tcp-{{ $tcpServer.Upstream.Name }} { @@ -285,7 +278,6 @@ stream { proxy_pass udp-{{ $udpServer.Upstream.Name }}; } {{ end }} - } {{/* definition of templates to avoid repetitions */}} diff --git a/controllers/nginx/nginx/main.go b/controllers/nginx/nginx/main.go index 66c702c89..8dfc892dc 100644 --- a/controllers/nginx/nginx/main.go +++ b/controllers/nginx/nginx/main.go @@ -49,7 +49,7 @@ const ( // that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. // https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security // max-age is the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. - htsMaxAge = "15724800" + hstsMaxAge = "15724800" // If UseProxyProtocol is enabled defIPCIDR defines the default the IP/network address of your external load balancer defIPCIDR = "0.0.0.0/0" @@ -105,18 +105,19 @@ type nginxConfiguration struct { // Log levels above are listed in the order of increasing severity ErrorLogLevel string `structs:"error-log-level,omitempty"` - // Enables or disables the header HTS in servers running SSL - UseHTS bool `structs:"use-hts,omitempty"` + // Enables or disables the header HSTS in servers running SSL + HSTS bool `structs:"hsts,omitempty"` - // Enables or disables the use of HTS in all the subdomains of the servername - HTSIncludeSubdomains bool `structs:"hts-include-subdomains,omitempty"` + // Enables or disables the use of HSTS in all the subdomains of the servername + // Default: true + HSTSIncludeSubdomains bool `structs:"hsts-include-subdomains,omitempty"` // HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) // that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. // https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security // max-age is the time, in seconds, that the browser should remember that this site is only to be // accessed using HTTPS. - HTSMaxAge string `structs:"hts-max-age,omitempty"` + HSTSMaxAge string `structs:"hsts-max-age,omitempty"` // Time during which a keep-alive client connection will stay open on the server side. // The zero value disables keep-alive client connections @@ -239,11 +240,11 @@ type Manager struct { // in the file default-conf.json func newDefaultNginxCfg() nginxConfiguration { cfg := nginxConfiguration{ - BodySize: bodySize, - ErrorLogLevel: errorLevel, - UseHTS: true, - HTSIncludeSubdomains: true, - HTSMaxAge: htsMaxAge, + BodySize: bodySize, + ErrorLogLevel: errorLevel, + HSTS: true, + HSTSIncludeSubdomains: true, + HSTSMaxAge: hstsMaxAge, GzipTypes: gzipTypes, KeepAlive: 75, MaxWorkerConnections: 16384, diff --git a/controllers/nginx/rc.yaml b/controllers/nginx/rc.yaml index ea76dd084..a6f2bc760 100644 --- a/controllers/nginx/rc.yaml +++ b/controllers/nginx/rc.yaml @@ -93,6 +93,10 @@ spec: hostPort: 80 - containerPort: 443 hostPort: 443 + # we expose 8080 to access nginx stats in url /nginx-status + # this is optional + - containerPort: 8080 + hostPort: 8080 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend From 8bf7007c4093e8fb9ab2f9c0507cb50131785110 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Thu, 28 Apr 2016 01:03:59 -0300 Subject: [PATCH 12/16] Add support for sticky sessions --- controllers/nginx/controller.go | 2 ++ controllers/nginx/nginx.tmpl | 11 ++++++----- controllers/nginx/nginx/main.go | 5 +++++ 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 04ded6aa1..40f835442 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -694,6 +694,8 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng return aUpstreams, aServers } +// createUpstreams creates the NGINX upstreams for each service referenced in +// Ingress rules. The servers inside the upstream are endpoints. func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[string]*nginx.Upstream { upstreams := make(map[string]*nginx.Upstream) diff --git a/controllers/nginx/nginx.tmpl b/controllers/nginx/nginx.tmpl index 3b413b7ac..77ff2ac21 100644 --- a/controllers/nginx/nginx.tmpl +++ b/controllers/nginx/nginx.tmpl @@ -141,9 +141,13 @@ http { {{range $name, $upstream := .upstreams}} upstream {{$upstream.Name}} { + {{ if $cfg.enableStickySessions }} + sticky hash=sha1 httponly; + {{ else }} least_conn; - {{range $server := $upstream.Backends}}server {{$server.Address}}:{{$server.Port}}; - {{end}} + {{ end }} + {{ range $server := $upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }}; + {{ end }} } {{end}} @@ -153,9 +157,6 @@ http { {{ if $server.SSL }}listen 443{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} ssl http2; ssl_certificate {{ $server.SSLCertificate }}; ssl_certificate_key {{ $server.SSLCertificateKey }};{{ end }} - {{ if $cfg.enableVtsStatus }} - vhost_traffic_status_filter_by_set_key {{ $server.Name }} application::*; - {{ end }} server_name {{ $server.Name }}; diff --git a/controllers/nginx/nginx/main.go b/controllers/nginx/nginx/main.go index 8dfc892dc..28d1f2156 100644 --- a/controllers/nginx/nginx/main.go +++ b/controllers/nginx/nginx/main.go @@ -89,6 +89,11 @@ type nginxConfiguration struct { // Sets the maximum allowed size of the client request body BodySize string `structs:"body-size,omitempty"` + // EnableStickySessions enabled sticky sessions using cookies + // https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng + // By default this is disabled + EnableStickySessions bool `structs:"enable-sticky-sessions,omitempty"` + // EnableVtsStatus allows the replacement of the default status page with a third party module named // nginx-module-vts - https://github.com/vozlt/nginx-module-vts // By default this is disabled From 996c769080e08985c04c7e20b3feda9d52fd044d Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Thu, 28 Apr 2016 20:04:41 -0300 Subject: [PATCH 13/16] Simplify port mapping of endpoints --- controllers/nginx/Dockerfile | 2 +- controllers/nginx/controller.go | 80 +++++++++++++++++++------------- controllers/nginx/nginx/nginx.go | 5 +- 3 files changed, 52 insertions(+), 35 deletions(-) diff --git a/controllers/nginx/Dockerfile b/controllers/nginx/Dockerfile index 64beee83c..cb285d9b3 100644 --- a/controllers/nginx/Dockerfile +++ b/controllers/nginx/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/google_containers/nginx-slim:0.7 +FROM gcr.io/google_containers/nginx-slim:0.6 RUN apt-get update && apt-get install -y \ diffutils \ diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 40f835442..0a3e3838a 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -48,6 +48,7 @@ const ( defServerName = "_" namedPortAnnotation = "kubernetes.io/ingress-named-ports" podStoreSyncedPollPeriod = 1 * time.Second + rootLocation = "/" ) var ( @@ -621,8 +622,9 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng servers[defServerName] = &nginx.Server{ Name: defServerName, Locations: []*nginx.Location{{ - Path: "/", - Upstream: *lbc.getDefaultUpstream(), + Path: rootLocation, + IsDefBackend: true, + Upstream: *lbc.getDefaultUpstream(), }, }, } @@ -646,13 +648,19 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng if nginxPath == "" { lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", "Ingress rule '%v/%v' contains no path definition. Assuming /", ing.GetNamespace(), ing.GetName()) - nginxPath = "/" + nginxPath = rootLocation } - // Validate that there is no another previuous rule - // for the same host and path. + // Validate that there is no another previuous + // rule for the same host and path. addLoc := true for _, loc := range server.Locations { + if loc.Path == rootLocation && nginxPath == rootLocation && loc.IsDefBackend { + loc.Upstream = *ups + addLoc = false + continue + } + if loc.Path == nginxPath { lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", "Path '%v' already defined in another Ingress rule", nginxPath) @@ -677,6 +685,7 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng aUpstreams := make([]*nginx.Upstream, 0, len(upstreams)) for _, value := range upstreams { if len(value.Backends) == 0 { + glog.Warningf("upstream %v does no have any active endpoints. Using default backend", value.Name) value.Backends = append(value.Backends, nginx.NewDefaultServer()) } sort.Sort(nginx.UpstreamServerByAddrPort(value.Backends)) @@ -709,37 +718,38 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin for _, path := range rule.HTTP.Paths { name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String()) - if _, ok := upstreams[name]; !ok { - upstreams[name] = nginx.NewUpstream(name) + if _, ok := upstreams[name]; ok { + continue + } - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) - svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) - if err != nil { - glog.Infof("error getting service %v from the cache: %v", svcKey, err) - continue - } + glog.V(3).Infof("creating upstream %v", name) + upstreams[name] = nginx.NewUpstream(name) - if !svcExists { - glog.Warningf("service %v does no exists", svcKey) - continue - } + svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) + svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) + if err != nil { + glog.Infof("error getting service %v from the cache: %v", svcKey, err) + continue + } - svc := svcObj.(*api.Service) - for _, servicePort := range svc.Spec.Ports { - port := servicePort.TargetPort - if servicePort.Name != "" { - port = intstr.FromString(servicePort.Name) + if !svcExists { + glog.Warningf("service %v does no exists", svcKey) + continue + } + + svc := svcObj.(*api.Service) + glog.V(3).Infof("obtaining port information for service %v", svcKey) + bp := path.Backend.ServicePort.String() + for _, servicePort := range svc.Spec.Ports { + // targetPort could be a string, use the name or the port (int) + if strconv.Itoa(servicePort.Port) == bp || servicePort.TargetPort.String() == bp || servicePort.Name == bp { + endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP) + if len(endps) == 0 { + glog.Warningf("service %v does no have any active endpoints", svcKey) } - if port == path.Backend.ServicePort { - endps := lbc.getEndpoints(svc, port, api.ProtocolTCP) - if len(endps) == 0 { - glog.Warningf("service %v does no have any active endpoints", svcKey) - } - - upstreams[name].Backends = append(upstreams[name].Backends, endps...) - break - } + upstreams[name].Backends = append(upstreams[name].Backends, endps...) + break } } } @@ -759,7 +769,13 @@ func (lbc *loadBalancerController) createServers(data []interface{}) map[string] for _, rule := range ing.Spec.Rules { if _, ok := servers[rule.Host]; !ok { - servers[rule.Host] = &nginx.Server{Name: rule.Host, Locations: []*nginx.Location{}} + locs := []*nginx.Location{} + locs = append(locs, &nginx.Location{ + Path: rootLocation, + IsDefBackend: true, + Upstream: *lbc.getDefaultUpstream(), + }) + servers[rule.Host] = &nginx.Server{Name: rule.Host, Locations: locs} } if pemFile, ok := pems[rule.Host]; ok { diff --git a/controllers/nginx/nginx/nginx.go b/controllers/nginx/nginx/nginx.go index 7f05650a6..0a670a3b8 100644 --- a/controllers/nginx/nginx/nginx.go +++ b/controllers/nginx/nginx/nginx.go @@ -82,8 +82,9 @@ func (c ServerByName) Less(i, j int) bool { // Location describes an NGINX location type Location struct { - Path string - Upstream Upstream + Path string + IsDefBackend bool + Upstream Upstream } // LocationByPath sorts location by path From b086a686dd78c64ad3f0b11d5d9bc799b4ffef0b Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Sat, 30 Apr 2016 12:34:33 -0300 Subject: [PATCH 14/16] Allow custom nginx templates --- controllers/nginx/Dockerfile | 2 +- controllers/nginx/README.md | 9 +++ .../nginx/examples/custom-template/README.md | 9 +++ .../custom-template/custom-template.yaml | 57 +++++++++++++++++++ controllers/nginx/nginx/template.go | 3 +- 5 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 controllers/nginx/examples/custom-template/README.md create mode 100644 controllers/nginx/examples/custom-template/custom-template.yaml diff --git a/controllers/nginx/Dockerfile b/controllers/nginx/Dockerfile index cb285d9b3..28e44a7d9 100644 --- a/controllers/nginx/Dockerfile +++ b/controllers/nginx/Dockerfile @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y \ && rm -rf /var/lib/apt/lists/* COPY nginx-ingress-controller / -COPY nginx.tmpl / +COPY nginx.tmpl /etc/nginx/template/nginx.tmpl COPY default.conf /etc/nginx/nginx.conf COPY lua /etc/nginx/lua/ diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index f2ed1e63d..c82d71c81 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -180,6 +180,15 @@ Using a ConfigMap it is possible to customize the defaults in nginx. Please check the [tcp services](examples/custom-configuration/README.md) example +## Custom NGINX template + +The NGINX template is located in the file `/etc/nginx/template/nginx.tmpl`. Mounting a volume is possible to use a custom version. +Use the [custom-template](examples/custom-template/README.md) example as a guide + +**Please note the template is tied to the go code. Be sure to no change names in the variable `$cfg`** + + + ### NGINX status page The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`. diff --git a/controllers/nginx/examples/custom-template/README.md b/controllers/nginx/examples/custom-template/README.md new file mode 100644 index 000000000..6d6375d0b --- /dev/null +++ b/controllers/nginx/examples/custom-template/README.md @@ -0,0 +1,9 @@ + +This example shows how is possible to use a custom template + +First create a configmap with a template inside running: +``` +kubectl create configmap nginx-template --from-file=nginx.tmpl=../../nginx.tmpl +``` + +Next create the rc `kubectl create -f custom-template.yaml` diff --git a/controllers/nginx/examples/custom-template/custom-template.yaml b/controllers/nginx/examples/custom-template/custom-template.yaml new file mode 100644 index 000000000..ac7991033 --- /dev/null +++ b/controllers/nginx/examples/custom-template/custom-template.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-lb +spec: + replicas: 1 + selector: + k8s-app: nginx-ingress-lb + template: + metadata: + labels: + k8s-app: nginx-ingress-lb + name: nginx-ingress-lb + spec: + terminationGracePeriodSeconds: 60 + containers: + - image: aledbf/nginx-third-party:0.15 + name: nginx-ingress-lb + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: 10249 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + # use downward API + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 4430 + args: + - /nginx-ingress-controller + - --default-backend-service=default/default-http-backend + volumeMounts: + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + volumes: + - name: nginx-template-volume + configMap: + name: nginx-template + items: + - key: nginx.tmpl + path: nginx.tmpl diff --git a/controllers/nginx/nginx/template.go b/controllers/nginx/nginx/template.go index fc67cfd1c..8a5b1626f 100644 --- a/controllers/nginx/nginx/template.go +++ b/controllers/nginx/nginx/template.go @@ -29,6 +29,7 @@ import ( var ( camelRegexp = regexp.MustCompile("[0-9A-Za-z]+") + tmplPath = "/etc/nginx/template/nginx.tmpl" funcMap = template.FuncMap{ "empty": func(input interface{}) bool { @@ -43,7 +44,7 @@ var ( ) func (ngx *Manager) loadTemplate() { - tmpl, _ := template.New("nginx.tmpl").Funcs(funcMap).ParseFiles("./nginx.tmpl") + tmpl, _ := template.New("nginx.tmpl").Funcs(funcMap).ParseFiles(tmplPath) ngx.template = tmpl } From 4d25306b52a3f7663f6d8e21d54442111d28b74b Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Sun, 1 May 2016 19:07:31 -0300 Subject: [PATCH 15/16] Allow custom default server when host is empty in ingress rule --- controllers/nginx/controller.go | 43 +++++++++++++------ .../custom-template/custom-template.yaml | 2 +- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 0a3e3838a..bbf016170 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -618,15 +618,18 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng upstreams[defUpstreamName] = lbc.getDefaultUpstream() servers := lbc.createServers(data) - // default server - no servername. - servers[defServerName] = &nginx.Server{ - Name: defServerName, - Locations: []*nginx.Location{{ - Path: rootLocation, - IsDefBackend: true, - Upstream: *lbc.getDefaultUpstream(), - }, - }, + if _, ok := servers[defServerName]; !ok { + // default server - no servername. + // there is no rule with default backend + servers[defServerName] = &nginx.Server{ + Name: defServerName, + Locations: []*nginx.Location{{ + Path: rootLocation, + IsDefBackend: true, + Upstream: *lbc.getDefaultUpstream(), + }, + }, + } } for _, ingIf := range data { @@ -637,7 +640,14 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng continue } - server := servers[rule.Host] + host := rule.Host + if host == "" { + host = defServerName + } + server := servers[host] + if server == nil { + server = servers["_"] + } for _, path := range rule.HTTP.Paths { upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String()) @@ -768,18 +778,23 @@ func (lbc *loadBalancerController) createServers(data []interface{}) map[string] ing := ingIf.(*extensions.Ingress) for _, rule := range ing.Spec.Rules { - if _, ok := servers[rule.Host]; !ok { + host := rule.Host + if host == "" { + host = defServerName + } + + if _, ok := servers[host]; !ok { locs := []*nginx.Location{} locs = append(locs, &nginx.Location{ Path: rootLocation, IsDefBackend: true, Upstream: *lbc.getDefaultUpstream(), }) - servers[rule.Host] = &nginx.Server{Name: rule.Host, Locations: locs} + servers[host] = &nginx.Server{Name: host, Locations: locs} } - if pemFile, ok := pems[rule.Host]; ok { - server := servers[rule.Host] + if pemFile, ok := pems[host]; ok { + server := servers[host] server.SSL = true server.SSLCertificate = pemFile server.SSLCertificateKey = pemFile diff --git a/controllers/nginx/examples/custom-template/custom-template.yaml b/controllers/nginx/examples/custom-template/custom-template.yaml index ac7991033..65ef90e86 100644 --- a/controllers/nginx/examples/custom-template/custom-template.yaml +++ b/controllers/nginx/examples/custom-template/custom-template.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: aledbf/nginx-third-party:0.15 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: From 96a66aa6fa57dce9dc45458c4af0cefc6bc8e20b Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Sun, 1 May 2016 23:34:00 -0300 Subject: [PATCH 16/16] Only update service annotations if it contains named ports --- controllers/nginx/controller.go | 113 ++++++------------ .../custom-template/custom-template.yaml | 2 +- 2 files changed, 35 insertions(+), 80 deletions(-) diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index bbf016170..f2cb6173f 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -57,12 +57,15 @@ var ( type namedPortMapping map[string]string +// getPort returns the port defined in a named port func (npm namedPortMapping) getPort(name string) (string, bool) { - val, ok := npm.getMappings()[name] + val, ok := npm.getPortMappings()[name] return val, ok } -func (npm namedPortMapping) getMappings() map[string]string { +// getPortMappings returns the map containing the +// mapping of named port names and the port number +func (npm namedPortMapping) getPortMappings() map[string]string { data := npm[namedPortAnnotation] var mapping map[string]string if data == "" { @@ -100,10 +103,6 @@ type loadBalancerController struct { // this avoids a sync execution in the ResourceEventHandlerFuncs ingQueue *taskQueue - // used to update the annotation that matches a service using one or - // more named ports to an endpoint port - svcEpQueue *taskQueue - // stopLock is used to enforce only a single call to Stop is active. // Needed because we allow stopping through an http endpoint and // allowing concurrent stoppers leads to stack traces. @@ -136,14 +135,12 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura lbc.syncQueue = NewTaskQueue(lbc.sync) lbc.ingQueue = NewTaskQueue(lbc.updateIngressStatus) - lbc.svcEpQueue = NewTaskQueue(lbc.updateEpNamedPorts) ingEventHandler := framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { addIng := obj.(*extensions.Ingress) lbc.recorder.Eventf(addIng, api.EventTypeNormal, "CREATE", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name)) lbc.ingQueue.enqueue(obj) - lbc.svcEpQueue.enqueue(obj) lbc.syncQueue.enqueue(obj) }, DeleteFunc: func(obj interface{}) { @@ -156,7 +153,6 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura upIng := cur.(*extensions.Ingress) lbc.recorder.Eventf(upIng, api.EventTypeNormal, "UPDATE", fmt.Sprintf("%s/%s", upIng.Namespace, upIng.Name)) lbc.ingQueue.enqueue(cur) - lbc.svcEpQueue.enqueue(cur) lbc.syncQueue.enqueue(cur) } }, @@ -252,84 +248,26 @@ func (lbc *loadBalancerController) getUDPConfigMap(ns, name string) (*api.Config return lbc.client.ConfigMaps(ns).Get(name) } -func (lbc *loadBalancerController) updateEpNamedPorts(key string) { - if !lbc.controllersInSync() { - time.Sleep(podStoreSyncedPollPeriod) - lbc.svcEpQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) - return - } - - glog.V(4).Infof("checking if service %v uses named ports to update annotation %v", key, namedPortAnnotation) - - ingObj, ingExists, err := lbc.ingLister.Store.GetByKey(key) - if err != nil { - glog.Warningf("error getting service %v: %v", key, err) - return - } - - if !ingExists { - glog.Warningf("service %v not found", key) - return - } - - ing := ingObj.(*extensions.Ingress) - for _, rule := range ing.Spec.Rules { - if rule.IngressRuleValue.HTTP == nil { - continue - } - - for _, path := range rule.HTTP.Paths { - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) - svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) - if err != nil { - glog.Infof("error getting service %v from the cache: %v", svcKey, err) - continue - } - - if !svcExists { - glog.Warningf("service %v does no exists", svcKey) - continue - } - - svc := svcObj.(*api.Service) - if svc.Spec.Selector == nil { - return - } - - // check to avoid a call to checkSvcForUpdate if the port is not a string - _, err = strconv.Atoi(path.Backend.ServicePort.StrVal) - if err == nil { - continue - } - - err = lbc.checkSvcForUpdate(svc) - if err != nil { - lbc.svcEpQueue.requeue(key, err) - return - } - } - } -} - // checkSvcForUpdate verifies if one of the running pods for a service contains // named port. If the annotation in the service does not exists or is not equals // to the port mapping obtained from the pod the service must be updated to reflect // the current state -func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { +func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) (map[string]string, error) { // get the pods associated with the service // TODO: switch this to a watch pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{ LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(), }) + + namedPorts := map[string]string{} if err != nil { - return fmt.Errorf("error searching service pods %v/%v: %v", svc.Namespace, svc.Name, err) + return namedPorts, fmt.Errorf("error searching service pods %v/%v: %v", svc.Namespace, svc.Name, err) } if len(pods.Items) == 0 { - return nil + return namedPorts, nil } - namedPorts := map[string]string{} // we need to check only one pod searching for named ports pod := &pods.Items[0] glog.V(4).Infof("checking pod %v/%v for named port information", pod.Namespace, pod.Name) @@ -362,7 +300,7 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { newSvc, err := lbc.client.Services(svc.Namespace).Get(svc.Name) if err != nil { - return fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err) + return namedPorts, fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err) } if newSvc.ObjectMeta.Annotations == nil { @@ -371,13 +309,15 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error { newSvc.ObjectMeta.Annotations[namedPortAnnotation] = string(data) glog.Infof("updating service %v with new named port mappings", svc.Name) - _, err = lbc.client.Services(svc.Namespace).Update(svc) + _, err = lbc.client.Services(svc.Namespace).Update(newSvc) if err != nil { - return fmt.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err) + return namedPorts, fmt.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err) } + + return newSvc.ObjectMeta.Annotations, nil } - return nil + return namedPorts, nil } func (lbc *loadBalancerController) sync(key string) { @@ -889,14 +829,30 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints targetPort = epPort.Port } case intstr.String: - if val, ok := namedPortMapping(s.ObjectMeta.Annotations).getPort(servicePort.StrVal); ok { + namedPorts := s.ObjectMeta.Annotations + val, ok := namedPortMapping(namedPorts).getPort(servicePort.StrVal) + if ok { port, err := strconv.Atoi(val) if err != nil { glog.Warningf("%v is not valid as a port", val) continue } - if epPort.Protocol == proto { + targetPort = port + } else { + newnp, err := lbc.checkSvcForUpdate(s) + if err != nil { + glog.Warningf("error mapping service ports: %v", err) + continue + } + val, ok := namedPortMapping(newnp).getPort(servicePort.StrVal) + if ok { + port, err := strconv.Atoi(val) + if err != nil { + glog.Warningf("%v is not valid as a port", val) + continue + } + targetPort = port } } @@ -988,7 +944,6 @@ func (lbc *loadBalancerController) Run() { go lbc.syncQueue.run(time.Second, lbc.stopCh) go lbc.ingQueue.run(time.Second, lbc.stopCh) - go lbc.svcEpQueue.run(time.Second, lbc.stopCh) <-lbc.stopCh glog.Infof("shutting down NGINX loadbalancer controller") diff --git a/controllers/nginx/examples/custom-template/custom-template.yaml b/controllers/nginx/examples/custom-template/custom-template.yaml index 65ef90e86..1b33910e9 100644 --- a/controllers/nginx/examples/custom-template/custom-template.yaml +++ b/controllers/nginx/examples/custom-template/custom-template.yaml @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4430 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend