diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index 9e27f61d6..0e1d0779a 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -36,8 +36,8 @@ kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.3 --replic Now we expose the same application in two different services (so we can create different Ingress rules) ``` -kubectl expose rc echoheaders --port=80 --target-port=8080 --name=echoheaders-x -kubectl expose rc echoheaders --port=80 --target-port=8080 --name=echoheaders-y +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y ``` Next we create a couple of Ingress rules @@ -215,6 +215,13 @@ I0316 12:24:37.610073 1 command.go:69] change in configuration detected. R - `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html) + +### Retries in no idempotent methods + +Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error. +The previous behavior can be restored using `retry-non-idempotent=true` in the configuration ConfigMap + + ## Limitations TODO diff --git a/controllers/nginx/controller.go b/controllers/nginx/controller.go index 31dda25bc..8f09eafea 100644 --- a/controllers/nginx/controller.go +++ b/controllers/nginx/controller.go @@ -198,11 +198,11 @@ func (lbc *loadBalancerController) sync(key string) { } ngxConfig := lbc.nginx.ReadConfig(cfg) - tcpServices := lbc.getTCPServices() lbc.nginx.CheckAndReload(ngxConfig, nginx.IngressConfig{ Upstreams: upstreams, Servers: servers, - TCPUpstreams: tcpServices, + TCPUpstreams: lbc.getTCPServices(), + UDPUpstreams: lbc.getUDPServices(), }) } @@ -285,12 +285,12 @@ func (lbc *loadBalancerController) getServices(data map[string]string, proto api var endps []nginx.UpstreamServer targetPort, err := strconv.Atoi(svcPort[1]) if err != nil { - endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1])) + endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1]), proto) } else { // we need to use the TargetPort (where the endpoints are running) for _, sp := range svc.Spec.Ports { if sp.Port == targetPort { - endps = lbc.getEndpoints(svc, sp.TargetPort) + endps = lbc.getEndpoints(svc, sp.TargetPort, proto) break } } @@ -335,7 +335,7 @@ func (lbc *loadBalancerController) getDefaultUpstream() *nginx.Upstream { svc := svcObj.(*api.Service) - endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort) + endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort, api.ProtocolTCP) if len(endps) == 0 { glog.Warningf("service %v does no have any active endpoints", svcKey) upstream.Backends = append(upstream.Backends, nginx.NewDefaultServer()) @@ -383,7 +383,7 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng for _, servicePort := range svc.Spec.Ports { if servicePort.Port == path.Backend.ServicePort.IntValue() { - endps := lbc.getEndpoints(svc, servicePort.TargetPort) + endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP) if len(endps) == 0 { glog.Warningf("service %v does no have any active endpoints", svcKey) } @@ -526,7 +526,7 @@ func (lbc *loadBalancerController) getPemsFromIngress(data []interface{}) map[st } // getEndpoints returns a list of : for a given service/target port combination. -func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString) []nginx.UpstreamServer { +func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString, proto api.Protocol) []nginx.UpstreamServer { glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String()) ep, err := lbc.endpLister.GetServiceEndpoints(s) if err != nil { @@ -538,6 +538,11 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints for _, ss := range ep.Subsets { for _, epPort := range ss.Ports { + + if !reflect.DeepEqual(epPort.Protocol, proto) { + continue + } + var targetPort int switch servicePort.Type { case intstr.Int: diff --git a/controllers/nginx/examples/udp/README.md b/controllers/nginx/examples/udp/README.md new file mode 100644 index 000000000..454873706 --- /dev/null +++ b/controllers/nginx/examples/udp/README.md @@ -0,0 +1,13 @@ + +To configure which services and ports will be exposed +``` +kubectl create -f udp-configmap-example.yaml +``` + +The file `udp-configmap-example.yaml` uses a ConfigMap where the key is the external port to use and the value is +`:` +It is possible to use a number or the name of the port. + +``` +kubectl create -f rc-udp.yaml +``` diff --git a/controllers/nginx/examples/udp/rc-udp.yaml b/controllers/nginx/examples/udp/rc-udp.yaml new file mode 100644 index 000000000..4a057fc5c --- /dev/null +++ b/controllers/nginx/examples/udp/rc-udp.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-lb +spec: + replicas: 1 + selector: + k8s-app: nginx-ingress-lb + template: + metadata: + labels: + k8s-app: nginx-ingress-lb + name: nginx-ingress-lb + spec: + containers: + - image: aledbf/nginx-third-party:0.9 + name: nginx-ingress-lb + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: 10249 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + # use downward API + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 4444 + # we expose 8080 to access nginx stats in url /nginx-status + # this is optional + - containerPort: 8080 + hostPort: 8081 + - containerPort: 53 + hostPort: 53 + args: + - /nginx-ingress-controller + - --default-backend-service=default/default-http-backend + - --udp-services-configmap=default/udp-configmap-example diff --git a/controllers/nginx/examples/udp/udp-configmap-example.yaml b/controllers/nginx/examples/udp/udp-configmap-example.yaml new file mode 100644 index 000000000..bf368cb49 --- /dev/null +++ b/controllers/nginx/examples/udp/udp-configmap-example.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: udp-configmap-example +data: + 53: "kube-system/kube-dns:53" diff --git a/controllers/nginx/naxsi/basic.rules b/controllers/nginx/naxsi/basic.rules deleted file mode 100644 index a35777d81..000000000 --- a/controllers/nginx/naxsi/basic.rules +++ /dev/null @@ -1,8 +0,0 @@ -SecRulesEnabled; -DeniedUrl "/RequestDenied"; -## check rules -CheckRule "$SQL >= 8" BLOCK; -CheckRule "$RFI >= 8" BLOCK; -CheckRule "$TRAVERSAL >= 4" BLOCK; -CheckRule "$EVADE >= 4" BLOCK; -CheckRule "$XSS >= 8" BLOCK; \ No newline at end of file diff --git a/controllers/nginx/nginx.tmpl b/controllers/nginx/nginx.tmpl index 8b9328f1c..6b069028b 100644 --- a/controllers/nginx/nginx.tmpl +++ b/controllers/nginx/nginx.tmpl @@ -24,11 +24,6 @@ http { require("error_page") } - {{ if $cfg.enableWaf}} - # https://github.com/nbs-system/naxsi/wiki/basicsetup - include /etc/nginx/naxsi/*.rules; - {{ end }} - sendfile on; aio threads; tcp_nopush on; @@ -146,7 +141,7 @@ http { error_page 504 = @custom_504; # In case of errors try the next upstream server before returning an error - proxy_next_upstream error timeout invalid_header http_502 http_503 http_504; + proxy_next_upstream error timeout invalid_header http_502 http_503 http_504 {{ if $cfg.retryNonIdempotent }}non_idempotent{{ end }}; server { listen 80 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }}; @@ -219,11 +214,6 @@ http { proxy_pass http://{{ $location.Upstream.Name }}; } {{ end }} - {{ if $cfg.enableWaf}} - location /RequestDenied { - return 418; - } - {{ end }} {{ template "CUSTOM_ERRORS" $cfg }} } {{ end }} @@ -291,15 +281,15 @@ stream { # UDP services {{ range $i, $udpServer := .udpUpstreams }} upstream udp-{{ $udpServer.Upstream.Name }} { - {{ range $server := $tcpServer.Upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }}; + {{ range $server := $udpServer.Upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }}; {{ end }} } server { - listen {{ $tcpServer.Path }} udp; - proxy_timeout 1s; + listen {{ $udpServer.Path }} udp; + proxy_timeout 10s; proxy_responses 1; - proxy_pass udp-{{ $tcpServer.Upstream.Name }}; + proxy_pass udp-{{ $udpServer.Upstream.Name }}; } {{ end }} diff --git a/controllers/nginx/nginx/main.go b/controllers/nginx/nginx/main.go index 0c4aff112..b78a1e3a2 100644 --- a/controllers/nginx/nginx/main.go +++ b/controllers/nginx/nginx/main.go @@ -94,6 +94,10 @@ type nginxConfiguration struct { VtsStatusZoneSize string `structs:"vts-status-zone-size,omitempty"` + // RetryNonIdempotent since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) + // in case of an error. The previous behavior can be restored using the value true + RetryNonIdempotent bool `structs:"retry-non-idempotent"` + // http://nginx.org/en/docs/ngx_core_module.html#error_log // Configures logging level [debug | info | notice | warn | error | crit | alert | emerg] // Log levels above are listed in the order of increasing severity