diff --git a/controllers/nginx/Dockerfile b/controllers/nginx/Dockerfile index 853dbe364..28e44a7d9 100644 --- a/controllers/nginx/Dockerfile +++ b/controllers/nginx/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/google_containers/nginx-slim:0.5 +FROM gcr.io/google_containers/nginx-slim:0.6 RUN apt-get update && apt-get install -y \ diffutils \ @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y \ && rm -rf /var/lib/apt/lists/* COPY nginx-ingress-controller / -COPY nginx.tmpl / +COPY nginx.tmpl /etc/nginx/template/nginx.tmpl COPY default.conf /etc/nginx/nginx.conf COPY lua /etc/nginx/lua/ diff --git a/controllers/nginx/Makefile b/controllers/nginx/Makefile index ce0668a9d..8556bf2ed 100644 --- a/controllers/nginx/Makefile +++ b/controllers/nginx/Makefile @@ -1,7 +1,7 @@ all: push # 0.0 shouldn't clobber any release builds -TAG = 0.5 +TAG = 0.6 PREFIX = gcr.io/google_containers/nginx-ingress-controller REPO_INFO=$(shell git config --get remote.origin.url) diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index 8a527e37f..c82d71c81 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -11,7 +11,6 @@ This is a nginx Ingress controller that uses [ConfigMap](https://github.com/kube - custom ssl_dhparam (optional). Just mount a secret with a file named `dhparam.pem`. - support for TCP services (flag `--tcp-services-configmap`) - custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md) -- custom error pages. Using the flag `--custom-error-service` is possible to use a custom compatible [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) image ## Requirements @@ -120,7 +119,13 @@ Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/exampl Check the [example](examples/tls/README.md) +### HTTP Strict Transport Security +HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. + +By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. + +To disable this behavior use `hsts=false` in the NGINX ConfigMap. #### Optimizing TLS Time To First Byte (TTTFB) @@ -175,6 +180,15 @@ Using a ConfigMap it is possible to customize the defaults in nginx. Please check the [tcp services](examples/custom-configuration/README.md) example +## Custom NGINX template + +The NGINX template is located in the file `/etc/nginx/template/nginx.tmpl`. Mounting a volume is possible to use a custom version. +Use the [custom-template](examples/custom-template/README.md) example as a guide + +**Please note the template is tied to the go code. Be sure to no change names in the variable `$cfg`** + + + ### NGINX status page The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`. @@ -187,25 +201,22 @@ Please check the example `example/rc-default.yaml` To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` + +### Custom errors + +In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers: +- `X-Code` indicates the HTTP code +- `X-Format` the value of the `Accept` header + +Using this two headers is possible to use a custom backend service like [this one](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) that inspect each request and returns a custom error page with the format expected by the client. This images handles `html` and `json` responses. + + ## Troubleshooting Problems encountered during [1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md): * make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container. * To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504) -### Custom errors - -The default backend provides a way to customize the default 404 page. This helps but sometimes is not enough. -Using the flag `--custom-error-service` is possible to use an image that must be 404 compatible and provide the route /error -[Here](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) there is an example of the the image - -The route `/error` expects two arguments: code and format -* code defines the wich error code is expected to be returned (502,503,etc.) -* format the format that should be returned For instance /error?code=504&format=json or /error?code=502&format=html - -Using a volume pointing to `/var/www/html` directory is possible to use a custom error - - ### Debug Using the flag `--v=XX` it is possible to increase the level of logging. @@ -241,3 +252,5 @@ The previous behavior can be restored using `retry-non-idempotent=true` in the c ## Limitations - Ingress rules for TLS require the definition of the field `host` +- The IP address in the status of loadBalancer could contain old values + diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 85f478a9c..f2cb6173f 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "encoding/json" "fmt" "reflect" "sort" @@ -28,11 +29,13 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + podutil "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/record" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/watch" @@ -41,14 +44,40 @@ import ( ) const ( - defUpstreamName = "upstream-default-backend" - defServerName = "_" + defUpstreamName = "upstream-default-backend" + defServerName = "_" + namedPortAnnotation = "kubernetes.io/ingress-named-ports" + podStoreSyncedPollPeriod = 1 * time.Second + rootLocation = "/" ) var ( keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc ) +type namedPortMapping map[string]string + +// getPort returns the port defined in a named port +func (npm namedPortMapping) getPort(name string) (string, bool) { + val, ok := npm.getPortMappings()[name] + return val, ok +} + +// getPortMappings returns the map containing the +// mapping of named port names and the port number +func (npm namedPortMapping) getPortMappings() map[string]string { + data := npm[namedPortAnnotation] + var mapping map[string]string + if data == "" { + return mapping + } + if err := json.Unmarshal([]byte(data), &mapping); err != nil { + glog.Errorf("unexpected error reading annotations: %v", err) + } + + return mapping +} + // loadBalancerController watches the kubernetes api and adds/removes services // from the loadbalancer type loadBalancerController struct { @@ -88,7 +117,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) + eventBroadcaster.StartRecordingToSink(kubeClient.Events(namespace)) lbc := loadBalancerController{ client: kubeClient, @@ -99,7 +128,9 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura tcpConfigMap: tcpConfigMapName, udpConfigMap: udpConfigMapName, defaultSvc: defaultSvc, - recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "loadbalancer-controller"}), + recorder: eventBroadcaster.NewRecorder(api.EventSource{ + Component: "nginx-ingress-controller", + }), } lbc.syncQueue = NewTaskQueue(lbc.sync) @@ -217,8 +248,81 @@ func (lbc *loadBalancerController) getUDPConfigMap(ns, name string) (*api.Config return lbc.client.ConfigMaps(ns).Get(name) } +// checkSvcForUpdate verifies if one of the running pods for a service contains +// named port. If the annotation in the service does not exists or is not equals +// to the port mapping obtained from the pod the service must be updated to reflect +// the current state +func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) (map[string]string, error) { + // get the pods associated with the service + // TODO: switch this to a watch + pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{ + LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(), + }) + + namedPorts := map[string]string{} + if err != nil { + return namedPorts, fmt.Errorf("error searching service pods %v/%v: %v", svc.Namespace, svc.Name, err) + } + + if len(pods.Items) == 0 { + return namedPorts, nil + } + + // we need to check only one pod searching for named ports + pod := &pods.Items[0] + glog.V(4).Infof("checking pod %v/%v for named port information", pod.Namespace, pod.Name) + for i := range svc.Spec.Ports { + servicePort := &svc.Spec.Ports[i] + + _, err := strconv.Atoi(servicePort.TargetPort.StrVal) + if err != nil { + portNum, err := podutil.FindPort(pod, servicePort) + if err != nil { + glog.V(4).Infof("failed to find port for service %s/%s: %v", svc.Namespace, svc.Name, err) + continue + } + + if servicePort.TargetPort.StrVal == "" { + continue + } + + namedPorts[servicePort.TargetPort.StrVal] = fmt.Sprintf("%v", portNum) + } + } + + if svc.ObjectMeta.Annotations == nil { + svc.ObjectMeta.Annotations = map[string]string{} + } + + curNamedPort := svc.ObjectMeta.Annotations[namedPortAnnotation] + if len(namedPorts) > 0 && !reflect.DeepEqual(curNamedPort, namedPorts) { + data, _ := json.Marshal(namedPorts) + + newSvc, err := lbc.client.Services(svc.Namespace).Get(svc.Name) + if err != nil { + return namedPorts, fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err) + } + + if newSvc.ObjectMeta.Annotations == nil { + newSvc.ObjectMeta.Annotations = map[string]string{} + } + + newSvc.ObjectMeta.Annotations[namedPortAnnotation] = string(data) + glog.Infof("updating service %v with new named port mappings", svc.Name) + _, err = lbc.client.Services(svc.Namespace).Update(newSvc) + if err != nil { + return namedPorts, fmt.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err) + } + + return newSvc.ObjectMeta.Annotations, nil + } + + return namedPorts, nil +} + func (lbc *loadBalancerController) sync(key string) { if !lbc.controllersInSync() { + time.Sleep(podStoreSyncedPollPeriod) lbc.syncQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) return } @@ -245,6 +349,7 @@ func (lbc *loadBalancerController) sync(key string) { func (lbc *loadBalancerController) updateIngressStatus(key string) { if !lbc.controllersInSync() { + time.Sleep(podStoreSyncedPollPeriod) lbc.ingQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) return } @@ -311,7 +416,7 @@ func (lbc *loadBalancerController) getTCPServices() []*nginx.Location { return []*nginx.Location{} } - return lbc.getServices(tcpMap.Data, api.ProtocolTCP) + return lbc.getStreamServices(tcpMap.Data, api.ProtocolTCP) } func (lbc *loadBalancerController) getUDPServices() []*nginx.Location { @@ -331,10 +436,10 @@ func (lbc *loadBalancerController) getUDPServices() []*nginx.Location { return []*nginx.Location{} } - return lbc.getServices(tcpMap.Data, api.ProtocolUDP) + return lbc.getStreamServices(tcpMap.Data, api.ProtocolUDP) } -func (lbc *loadBalancerController) getServices(data map[string]string, proto api.Protocol) []*nginx.Location { +func (lbc *loadBalancerController) getStreamServices(data map[string]string, proto api.Protocol) []*nginx.Location { var svcs []*nginx.Location // k -> port to expose in nginx // v -> /: @@ -345,35 +450,49 @@ func (lbc *loadBalancerController) getServices(data map[string]string, proto api continue } - svcPort := strings.Split(v, ":") - if len(svcPort) != 2 { + // this ports are required for NGINX + if k == "80" || k == "443" || k == "8181" { + glog.Warningf("port %v cannot be used for TCP or UDP services. Is reserved for NGINX", k) + continue + } + + nsSvcPort := strings.Split(v, ":") + if len(nsSvcPort) != 2 { glog.Warningf("invalid format (namespace/name:port) '%v'", k) continue } - svcNs, svcName, err := parseNsName(svcPort[0]) + nsName := nsSvcPort[0] + svcPort := nsSvcPort[1] + + svcNs, svcName, err := parseNsName(nsName) if err != nil { glog.Warningf("%v", err) continue } - svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcPort[0]) + svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(nsName) if err != nil { - glog.Warningf("error getting service %v: %v", svcPort[0], err) + glog.Warningf("error getting service %v: %v", nsName, err) continue } if !svcExists { - glog.Warningf("service %v was not found", svcPort[0]) + glog.Warningf("service %v was not found", nsName) continue } svc := svcObj.(*api.Service) var endps []nginx.UpstreamServer - targetPort, err := strconv.Atoi(svcPort[1]) + targetPort, err := strconv.Atoi(svcPort) if err != nil { - endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1]), proto) + for _, sp := range svc.Spec.Ports { + if sp.Name == svcPort { + endps = lbc.getEndpoints(svc, sp.TargetPort, proto) + break + } + } } else { // we need to use the TargetPort (where the endpoints are running) for _, sp := range svc.Spec.Ports { @@ -439,14 +558,18 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng upstreams[defUpstreamName] = lbc.getDefaultUpstream() servers := lbc.createServers(data) - // default server - no servername. - servers[defServerName] = &nginx.Server{ - Name: defServerName, - Locations: []*nginx.Location{{ - Path: "/", - Upstream: *lbc.getDefaultUpstream(), - }, - }, + if _, ok := servers[defServerName]; !ok { + // default server - no servername. + // there is no rule with default backend + servers[defServerName] = &nginx.Server{ + Name: defServerName, + Locations: []*nginx.Location{{ + Path: rootLocation, + IsDefBackend: true, + Upstream: *lbc.getDefaultUpstream(), + }, + }, + } } for _, ingIf := range data { @@ -457,51 +580,51 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng continue } - server := servers[rule.Host] - locations := []*nginx.Location{} - - for _, path := range rule.HTTP.Paths { - upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue()) - ups := upstreams[upsName] - - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) - svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) - if err != nil { - glog.Infof("error getting service %v from the cache: %v", svcKey, err) - continue - } - - if !svcExists { - glog.Warningf("service %v does no exists", svcKey) - continue - } - - svc := svcObj.(*api.Service) - - for _, servicePort := range svc.Spec.Ports { - if servicePort.Port == path.Backend.ServicePort.IntValue() { - endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP) - if len(endps) == 0 { - glog.Warningf("service %v does no have any active endpoints", svcKey) - } - - ups.Backends = append(ups.Backends, endps...) - break - } - } - - for _, ups := range upstreams { - if upsName == ups.Name { - loc := &nginx.Location{Path: path.Path} - loc.Upstream = *ups - locations = append(locations, loc) - break - } - } + host := rule.Host + if host == "" { + host = defServerName + } + server := servers[host] + if server == nil { + server = servers["_"] } - for _, loc := range locations { - server.Locations = append(server.Locations, loc) + for _, path := range rule.HTTP.Paths { + upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String()) + ups := upstreams[upsName] + + nginxPath := path.Path + // if there's no path defined we assume / + if nginxPath == "" { + lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", + "Ingress rule '%v/%v' contains no path definition. Assuming /", ing.GetNamespace(), ing.GetName()) + nginxPath = rootLocation + } + + // Validate that there is no another previuous + // rule for the same host and path. + addLoc := true + for _, loc := range server.Locations { + if loc.Path == rootLocation && nginxPath == rootLocation && loc.IsDefBackend { + loc.Upstream = *ups + addLoc = false + continue + } + + if loc.Path == nginxPath { + lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", + "Path '%v' already defined in another Ingress rule", nginxPath) + addLoc = false + break + } + } + + if addLoc { + server.Locations = append(server.Locations, &nginx.Location{ + Path: nginxPath, + Upstream: *ups, + }) + } } } } @@ -512,6 +635,7 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng aUpstreams := make([]*nginx.Upstream, 0, len(upstreams)) for _, value := range upstreams { if len(value.Backends) == 0 { + glog.Warningf("upstream %v does no have any active endpoints. Using default backend", value.Name) value.Backends = append(value.Backends, nginx.NewDefaultServer()) } sort.Sort(nginx.UpstreamServerByAddrPort(value.Backends)) @@ -529,6 +653,8 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng return aUpstreams, aServers } +// createUpstreams creates the NGINX upstreams for each service referenced in +// Ingress rules. The servers inside the upstream are endpoints. func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[string]*nginx.Upstream { upstreams := make(map[string]*nginx.Upstream) @@ -541,9 +667,40 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin } for _, path := range rule.HTTP.Paths { - name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue()) - if _, ok := upstreams[name]; !ok { - upstreams[name] = nginx.NewUpstream(name) + name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String()) + if _, ok := upstreams[name]; ok { + continue + } + + glog.V(3).Infof("creating upstream %v", name) + upstreams[name] = nginx.NewUpstream(name) + + svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) + svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey) + if err != nil { + glog.Infof("error getting service %v from the cache: %v", svcKey, err) + continue + } + + if !svcExists { + glog.Warningf("service %v does no exists", svcKey) + continue + } + + svc := svcObj.(*api.Service) + glog.V(3).Infof("obtaining port information for service %v", svcKey) + bp := path.Backend.ServicePort.String() + for _, servicePort := range svc.Spec.Ports { + // targetPort could be a string, use the name or the port (int) + if strconv.Itoa(servicePort.Port) == bp || servicePort.TargetPort.String() == bp || servicePort.Name == bp { + endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP) + if len(endps) == 0 { + glog.Warningf("service %v does no have any active endpoints", svcKey) + } + + upstreams[name].Backends = append(upstreams[name].Backends, endps...) + break + } } } } @@ -561,12 +718,23 @@ func (lbc *loadBalancerController) createServers(data []interface{}) map[string] ing := ingIf.(*extensions.Ingress) for _, rule := range ing.Spec.Rules { - if _, ok := servers[rule.Host]; !ok { - servers[rule.Host] = &nginx.Server{Name: rule.Host, Locations: []*nginx.Location{}} + host := rule.Host + if host == "" { + host = defServerName } - if pemFile, ok := pems[rule.Host]; ok { - server := servers[rule.Host] + if _, ok := servers[host]; !ok { + locs := []*nginx.Location{} + locs = append(locs, &nginx.Location{ + Path: rootLocation, + IsDefBackend: true, + Upstream: *lbc.getDefaultUpstream(), + }) + servers[host] = &nginx.Server{Name: host, Locations: locs} + } + + if pemFile, ok := pems[host]; ok { + server := servers[host] server.SSL = true server.SSLCertificate = pemFile server.SSLCertificateKey = pemFile @@ -661,8 +829,32 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints targetPort = epPort.Port } case intstr.String: - if epPort.Name == servicePort.StrVal { - targetPort = epPort.Port + namedPorts := s.ObjectMeta.Annotations + val, ok := namedPortMapping(namedPorts).getPort(servicePort.StrVal) + if ok { + port, err := strconv.Atoi(val) + if err != nil { + glog.Warningf("%v is not valid as a port", val) + continue + } + + targetPort = port + } else { + newnp, err := lbc.checkSvcForUpdate(s) + if err != nil { + glog.Warningf("error mapping service ports: %v", err) + continue + } + val, ok := namedPortMapping(newnp).getPort(servicePort.StrVal) + if ok { + port, err := strconv.Atoi(val) + if err != nil { + glog.Warningf("%v is not valid as a port", val) + continue + } + + targetPort = port + } } } @@ -703,6 +895,9 @@ func (lbc *loadBalancerController) Stop() error { return fmt.Errorf("shutdown already in progress") } +// removeFromIngress removes the IP address of the node where the Ingres +// controller is running before shutdown to avoid incorrect status +// information in Ingress rules func (lbc *loadBalancerController) removeFromIngress() { ings := lbc.ingLister.Store.List() glog.Infof("updating %v Ingress rule/s", len(ings)) diff --git a/controllers/nginx/examples/README.md b/controllers/nginx/examples/README.md new file mode 100644 index 000000000..30348b639 --- /dev/null +++ b/controllers/nginx/examples/README.md @@ -0,0 +1,8 @@ + +All the examples references the services `echoheaders-x` and `echoheaders-y` + +``` +kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.3 --replicas=1 --port=8080 +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +``` diff --git a/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml b/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml index 7e43da8b9..10ae3fa8e 100644 --- a/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml +++ b/controllers/nginx/examples/custom-configuration/rc-custom-configuration.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/custom-template/README.md b/controllers/nginx/examples/custom-template/README.md new file mode 100644 index 000000000..6d6375d0b --- /dev/null +++ b/controllers/nginx/examples/custom-template/README.md @@ -0,0 +1,9 @@ + +This example shows how is possible to use a custom template + +First create a configmap with a template inside running: +``` +kubectl create configmap nginx-template --from-file=nginx.tmpl=../../nginx.tmpl +``` + +Next create the rc `kubectl create -f custom-template.yaml` diff --git a/controllers/nginx/examples/custom-template/custom-template.yaml b/controllers/nginx/examples/custom-template/custom-template.yaml new file mode 100644 index 000000000..1b33910e9 --- /dev/null +++ b/controllers/nginx/examples/custom-template/custom-template.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-lb +spec: + replicas: 1 + selector: + k8s-app: nginx-ingress-lb + template: + metadata: + labels: + k8s-app: nginx-ingress-lb + name: nginx-ingress-lb + spec: + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 + name: nginx-ingress-lb + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: 10249 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + # use downward API + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + args: + - /nginx-ingress-controller + - --default-backend-service=default/default-http-backend + volumeMounts: + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + volumes: + - name: nginx-template-volume + configMap: + name: nginx-template + items: + - key: nginx.tmpl + path: nginx.tmpl diff --git a/controllers/nginx/examples/daemonset/as-daemonset.yaml b/controllers/nginx/examples/daemonset/as-daemonset.yaml index 9cc493969..bf46f89ac 100644 --- a/controllers/nginx/examples/daemonset/as-daemonset.yaml +++ b/controllers/nginx/examples/daemonset/as-daemonset.yaml @@ -10,7 +10,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -34,7 +34,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/default/rc-default.yaml b/controllers/nginx/examples/default/rc-default.yaml index 842a372cc..ea8e2d924 100644 --- a/controllers/nginx/examples/default/rc-default.yaml +++ b/controllers/nginx/examples/default/rc-default.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -40,7 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/full/rc-full.yaml b/controllers/nginx/examples/full/rc-full.yaml index d54fe4dbb..9623ba6af 100644 --- a/controllers/nginx/examples/full/rc-full.yaml +++ b/controllers/nginx/examples/full/rc-full.yaml @@ -21,7 +21,7 @@ spec: secret: secretName: dhparam-example containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -45,7 +45,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 + hostPort: 443 - containerPort: 8080 hostPort: 9000 volumeMounts: diff --git a/controllers/nginx/examples/tcp/rc-tcp.yaml b/controllers/nginx/examples/tcp/rc-tcp.yaml index ef64d30b7..36420c831 100644 --- a/controllers/nginx/examples/tcp/rc-tcp.yaml +++ b/controllers/nginx/examples/tcp/rc-tcp.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -40,11 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 - # we expose 8080 to access nginx stats in url /nginx-status - # this is optional - - containerPort: 8080 - hostPort: 8081 + hostPort: 443 # service echoheaders as TCP service default/echoheaders:9000 # 9000 indicates the port used to expose the service - containerPort: 9000 diff --git a/controllers/nginx/examples/tls/README.md b/controllers/nginx/examples/tls/README.md index e69de29bb..39ea28406 100644 --- a/controllers/nginx/examples/tls/README.md +++ b/controllers/nginx/examples/tls/README.md @@ -0,0 +1,90 @@ +This is an example to use a TLS Ingress rule to use SSL in NGINX + +# TLS certificate termination + +This examples uses 2 different certificates to terminate SSL for 2 hostnames. + +1. Deploy the controller by creating the rc in the parent dir +2. Create tls secret for foo.bar.com +3. Create rc-ssl.yaml + +*Next create a SSL certificate for `foo.bar.com` host:* + +``` +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foo.bar.com" +``` + +*Now store the SSL certificate in a secret:* + +``` +echo " +apiVersion: v1 +kind: Secret +metadata: + name: foo-secret +data: + tls.crt: `base64 /tmp/tls.crt` + tls.key: `base64 /tmp/tls.key` +" | kubectl create -f - +``` + +*Finally create a tls Ingress rule:* + +``` +echo " +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: foo + namespace: default +spec: + tls: + - hosts: + - foo.bar.com + secretName: foo-secret + rules: + - host: foo.bar.com + http: + paths: + - backend: + serviceName: echoheaders-x + servicePort: 80 + path: / +" | kubectl create -f - +``` + +You should be able to reach your nginx service or echoheaders service using a hostname: +``` +$ kubectl get ing +NAME RULE BACKEND ADDRESS +foo - 10.4.0.3 + foo.bar.com + / echoheaders-x:80 +``` + +``` +$ curl https://10.4.0.3 -H 'Host:foo.bar.com' -k +old-mbp:contrib aledbf$ curl https://10.4.0.3 -H 'Host:foo.bar.com' -k +CLIENT VALUES: +client_address=10.2.48.4 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://foo.bar.com:8080/ + +SERVER VALUES: +server_version=nginx: 1.9.7 - lua: 9019 + +HEADERS RECEIVED: +accept=*/* +connection=close +host=foo.bar.com +user-agent=curl/7.43.0 +x-forwarded-for=10.2.48.1 +x-forwarded-host=foo.bar.com +x-forwarded-proto=https +x-real-ip=10.2.48.1 +BODY: +-no body in request- +``` diff --git a/controllers/nginx/examples/tls/rc-ssl.yaml b/controllers/nginx/examples/tls/rc-ssl.yaml index f98a71902..ea8e2d924 100644 --- a/controllers/nginx/examples/tls/rc-ssl.yaml +++ b/controllers/nginx/examples/tls/rc-ssl.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -40,9 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 - - containerPort: 8080 - hostPort: 9000 + hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend diff --git a/controllers/nginx/examples/udp/rc-udp.yaml b/controllers/nginx/examples/udp/rc-udp.yaml index 283c2211b..01eafd613 100644 --- a/controllers/nginx/examples/udp/rc-udp.yaml +++ b/controllers/nginx/examples/udp/rc-udp.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -40,11 +40,7 @@ spec: - containerPort: 80 hostPort: 80 - containerPort: 443 - hostPort: 4444 - # we expose 8080 to access nginx stats in url /nginx-status - # this is optional - - containerPort: 8080 - hostPort: 8081 + hostPort: 443 - containerPort: 53 hostPort: 53 args: diff --git a/controllers/nginx/main.go b/controllers/nginx/main.go index bc19f8188..75b6f07a8 100644 --- a/controllers/nginx/main.go +++ b/controllers/nginx/main.go @@ -43,7 +43,7 @@ const ( var ( // value overwritten during build. This can be used to resolve issues. - version = "0.5" + version = "0.6" gitRepo = "https://github.com/kubernetes/contrib" flags = pflag.NewFlagSet("", pflag.ExitOnError) diff --git a/controllers/nginx/nginx.tmpl b/controllers/nginx/nginx.tmpl index facbb766e..77ff2ac21 100644 --- a/controllers/nginx/nginx.tmpl +++ b/controllers/nginx/nginx.tmpl @@ -80,10 +80,6 @@ http { '' $scheme; } - map $pass_access_scheme $sts { - 'https' 'max-age={{ $cfg.htsMaxAge }}{{ if $cfg.htsIncludeSubdomains }}; includeSubDomains{{ end }}; preload'; - } - # Map a response error watching the header Content-Type map $http_accept $httpAccept { default html; @@ -145,28 +141,31 @@ http { {{range $name, $upstream := .upstreams}} upstream {{$upstream.Name}} { + {{ if $cfg.enableStickySessions }} + sticky hash=sha1 httponly; + {{ else }} least_conn; - {{range $server := $upstream.Backends}}server {{$server.Address}}:{{$server.Port}}; - {{end}} + {{ end }} + {{ range $server := $upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }}; + {{ end }} } {{end}} {{ range $server := .servers }} server { - listen 80; - {{ if $server.SSL }}listen 443 ssl http2; + listen 80{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }}; + {{ if $server.SSL }}listen 443{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} ssl http2; ssl_certificate {{ $server.SSLCertificate }}; ssl_certificate_key {{ $server.SSLCertificateKey }};{{ end }} - {{ if $cfg.enableVtsStatus }} - vhost_traffic_status_filter_by_set_key {{ $server.Name }} application::*; - {{ end }} server_name {{ $server.Name }}; - {{ if $server.SSL }} + {{ if (and $server.SSL $cfg.hsts) }} if ($scheme = http) { return 301 https://$host$request_uri; } + + more_set_headers "Strict-Transport-Security: max-age={{ $cfg.hstsMaxAge }}{{ if $cfg.hstsIncludeSubdomains }}; includeSubDomains{{ end }}; preload"; {{ end }} {{ range $location := $server.Locations }} @@ -182,6 +181,7 @@ http { proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Proto $pass_access_scheme; proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s; @@ -214,18 +214,13 @@ http { # default server, including healthcheck server { - listen 8080 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} reuseport; + listen 8080 default_server reuseport; location /healthz { access_log off; return 200; } - location /health-check { - access_log off; - proxy_pass http://127.0.0.1:10249/healthz; - } - location /nginx_status { {{ if $cfg.enableVtsStatus }} vhost_traffic_status_display; @@ -254,9 +249,7 @@ http { } } - stream { - # TCP services {{ range $i, $tcpServer := .tcpUpstreams }} upstream tcp-{{ $tcpServer.Upstream.Name }} { @@ -286,7 +279,6 @@ stream { proxy_pass udp-{{ $udpServer.Upstream.Name }}; } {{ end }} - } {{/* definition of templates to avoid repetitions */}} diff --git a/controllers/nginx/nginx/main.go b/controllers/nginx/nginx/main.go index 5c6deb1d9..5c7d5a526 100644 --- a/controllers/nginx/nginx/main.go +++ b/controllers/nginx/nginx/main.go @@ -49,7 +49,7 @@ const ( // that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. // https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security // max-age is the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. - htsMaxAge = "15724800" + hstsMaxAge = "15724800" // If UseProxyProtocol is enabled defIPCIDR defines the default the IP/network address of your external load balancer defIPCIDR = "0.0.0.0/0" @@ -89,6 +89,11 @@ type nginxConfiguration struct { // Sets the maximum allowed size of the client request body BodySize string `structs:"body-size,omitempty"` + // EnableStickySessions enabled sticky sessions using cookies + // https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng + // By default this is disabled + EnableStickySessions bool `structs:"enable-sticky-sessions,omitempty"` + // EnableVtsStatus allows the replacement of the default status page with a third party module named // nginx-module-vts - https://github.com/vozlt/nginx-module-vts // By default this is disabled @@ -105,18 +110,19 @@ type nginxConfiguration struct { // Log levels above are listed in the order of increasing severity ErrorLogLevel string `structs:"error-log-level,omitempty"` - // Enables or disables the header HTS in servers running SSL - UseHTS bool `structs:"use-hts,omitempty"` + // Enables or disables the header HSTS in servers running SSL + HSTS bool `structs:"hsts,omitempty"` - // Enables or disables the use of HTS in all the subdomains of the servername - HTSIncludeSubdomains bool `structs:"hts-include-subdomains,omitempty"` + // Enables or disables the use of HSTS in all the subdomains of the servername + // Default: true + HSTSIncludeSubdomains bool `structs:"hsts-include-subdomains,omitempty"` // HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) // that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. // https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security // max-age is the time, in seconds, that the browser should remember that this site is only to be // accessed using HTTPS. - HTSMaxAge string `structs:"hts-max-age,omitempty"` + HSTSMaxAge string `structs:"hsts-max-age,omitempty"` // Time during which a keep-alive client connection will stay open on the server side. // The zero value disables keep-alive client connections @@ -239,11 +245,11 @@ type Manager struct { // in the file default-conf.json func newDefaultNginxCfg() nginxConfiguration { cfg := nginxConfiguration{ - BodySize: bodySize, - ErrorLogLevel: errorLevel, - UseHTS: true, - HTSIncludeSubdomains: true, - HTSMaxAge: htsMaxAge, + BodySize: bodySize, + ErrorLogLevel: errorLevel, + HSTS: true, + HSTSIncludeSubdomains: true, + HSTSMaxAge: hstsMaxAge, GzipTypes: gzipTypes, KeepAlive: 75, MaxWorkerConnections: 16384, diff --git a/controllers/nginx/nginx/nginx.go b/controllers/nginx/nginx/nginx.go index 7f05650a6..0a670a3b8 100644 --- a/controllers/nginx/nginx/nginx.go +++ b/controllers/nginx/nginx/nginx.go @@ -82,8 +82,9 @@ func (c ServerByName) Less(i, j int) bool { // Location describes an NGINX location type Location struct { - Path string - Upstream Upstream + Path string + IsDefBackend bool + Upstream Upstream } // LocationByPath sorts location by path diff --git a/controllers/nginx/nginx/ssl.go b/controllers/nginx/nginx/ssl.go index 65ad0608c..82d192303 100644 --- a/controllers/nginx/nginx/ssl.go +++ b/controllers/nginx/nginx/ssl.go @@ -53,8 +53,10 @@ func (nginx *Manager) CheckSSLCertificate(pemFileName string) ([]string, error) return []string{}, err } - var block *pem.Block - block, _ = pem.Decode(pemCerts) + block, _ := pem.Decode(pemCerts) + if block == nil { + return []string{}, fmt.Errorf("No valid PEM formatted block found") + } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { diff --git a/controllers/nginx/nginx/template.go b/controllers/nginx/nginx/template.go index fc67cfd1c..8a5b1626f 100644 --- a/controllers/nginx/nginx/template.go +++ b/controllers/nginx/nginx/template.go @@ -29,6 +29,7 @@ import ( var ( camelRegexp = regexp.MustCompile("[0-9A-Za-z]+") + tmplPath = "/etc/nginx/template/nginx.tmpl" funcMap = template.FuncMap{ "empty": func(input interface{}) bool { @@ -43,7 +44,7 @@ var ( ) func (ngx *Manager) loadTemplate() { - tmpl, _ := template.New("nginx.tmpl").Funcs(funcMap).ParseFiles("./nginx.tmpl") + tmpl, _ := template.New("nginx.tmpl").Funcs(funcMap).ParseFiles(tmplPath) ngx.template = tmpl } diff --git a/controllers/nginx/rc.yaml b/controllers/nginx/rc.yaml index 527ef42e5..a6f2bc760 100644 --- a/controllers/nginx/rc.yaml +++ b/controllers/nginx/rc.yaml @@ -68,7 +68,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.5 + - image: gcr.io/google_containers/nginx-ingress-controller:0.6 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: @@ -93,6 +93,10 @@ spec: hostPort: 80 - containerPort: 443 hostPort: 443 + # we expose 8080 to access nginx stats in url /nginx-status + # this is optional + - containerPort: 8080 + hostPort: 8080 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend