From b479f09b9778ddf6502cf57c15ffab8e1e695b3c Mon Sep 17 00:00:00 2001 From: Travis Bot Date: Thu, 30 Aug 2018 19:50:29 +0000 Subject: [PATCH] Deploy GitHub Pages --- 404.html | 8 +- deploy/index.html | 8 +- deploy/rbac/index.html | 8 +- deploy/upgrade/index.html | 10 +- development/index.html | 8 +- examples/PREREQUISITES/index.html | 8 +- examples/affinity/cookie/README/index.html | 8 +- examples/auth/basic/README/index.html | 8 +- examples/auth/client-certs/README/index.html | 12 +- examples/auth/external-auth/README/index.html | 22 +- .../oauth-external-auth/README/index.html | 24 +- .../configuration-snippets/README/index.html | 12 +- .../custom-configuration/README/index.html | 8 +- .../custom-errors/README/index.html | 8 +- .../custom-headers/README/index.html | 8 +- .../custom-upstream-check/README/index.html | 8 +- .../external-auth-headers/README/index.html | 8 +- .../ssl-dh-param/README/index.html | 8 +- .../customization/sysctl/README/index.html | 8 +- examples/docker-registry/README/index.html | 10 +- examples/grpc/README/index.html | 8 +- examples/grpc/ingress.yaml | 2 +- examples/index.html | 8 +- examples/multi-tls/README/index.html | 8 +- examples/rewrite/README/index.html | 8 +- examples/static-ip/README/index.html | 8 +- examples/tls-termination/README/index.html | 8 +- how-it-works/index.html | 8 +- index.html | 8 +- search/search_index.json | 1809 +++++++++++++++++ sitemap.xml | 88 +- troubleshooting/index.html | 1578 ++++++++++++++ user-guide/cli-arguments/index.html | 8 +- user-guide/custom-errors/index.html | 12 +- user-guide/default-backend/index.html | 8 +- .../exposing-tcp-udp-services/index.html | 8 +- user-guide/external-articles/index.html | 8 +- user-guide/miscellaneous/index.html | 8 +- user-guide/monitoring/index.html | 13 +- user-guide/multiple-ingress/index.html | 28 +- .../annotations/index.html | 8 +- .../nginx-configuration/configmap/index.html | 8 +- .../custom-template/index.html | 8 +- user-guide/nginx-configuration/index.html | 8 +- .../nginx-configuration/log-format/index.html | 8 +- .../third-party-addons/modsecurity/index.html | 8 +- .../third-party-addons/opentracing/index.html | 8 +- user-guide/tls/index.html | 8 +- 48 files changed, 3639 insertions(+), 261 deletions(-) create mode 100644 search/search_index.json create mode 100644 troubleshooting/index.html diff --git a/404.html b/404.html index 498d234fb..af4d32d3c 100644 --- a/404.html +++ b/404.html @@ -744,8 +744,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -756,8 +756,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/deploy/index.html b/deploy/index.html index c8a3ee17b..bbf12e618 100644 --- a/deploy/index.html +++ b/deploy/index.html @@ -906,8 +906,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -918,8 +918,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/deploy/rbac/index.html b/deploy/rbac/index.html index 2f28ded2c..fb1d4e465 100644 --- a/deploy/rbac/index.html +++ b/deploy/rbac/index.html @@ -831,8 +831,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -843,8 +843,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/deploy/upgrade/index.html b/deploy/upgrade/index.html index 9127b3546..ab75a6d56 100644 --- a/deploy/upgrade/index.html +++ b/deploy/upgrade/index.html @@ -797,8 +797,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -809,8 +809,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1109,9 +1109,9 @@

    Upgrading

    Important

    -

    No matter the method you use for upgrading, if you use template overrides, make sure your templates are compatible with the new version of ingress-nginx.

    +

    Without Helm

    To upgrade your ingress-nginx installation, it should be enough to change the version of the image in the controller Deployment.

    diff --git a/development/index.html b/development/index.html index 0d058a460..a7bd6bd7b 100644 --- a/development/index.html +++ b/development/index.html @@ -748,8 +748,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -760,8 +760,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/PREREQUISITES/index.html b/examples/PREREQUISITES/index.html index 5faa3324b..6fa8c6cee 100644 --- a/examples/PREREQUISITES/index.html +++ b/examples/PREREQUISITES/index.html @@ -831,8 +831,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -843,8 +843,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/affinity/cookie/README/index.html b/examples/affinity/cookie/README/index.html index 6c5605212..39d55b407 100644 --- a/examples/affinity/cookie/README/index.html +++ b/examples/affinity/cookie/README/index.html @@ -797,8 +797,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -809,8 +809,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/auth/basic/README/index.html b/examples/auth/basic/README/index.html index e98afce57..728472bda 100644 --- a/examples/auth/basic/README/index.html +++ b/examples/auth/basic/README/index.html @@ -763,8 +763,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -775,8 +775,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/auth/client-certs/README/index.html b/examples/auth/client-certs/README/index.html index f6709d4c7..982198520 100644 --- a/examples/auth/client-certs/README/index.html +++ b/examples/auth/client-certs/README/index.html @@ -792,8 +792,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -804,8 +804,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1144,13 +1144,13 @@ -
    + Skip to content @@ -126,7 +126,7 @@ NGINX Ingress Controller - External authentication + External Basic Authentication @@ -763,11 +763,11 @@ - - External authentication + + External Basic Authentication @@ -804,8 +804,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1094,7 +1094,7 @@ -

    External authentication

    +

    External Basic Authentication

    Example 1:

    Use an external service (Basic Auth) located in https://httpbin.org

    $ kubectl create -f ingress.yaml
    @@ -1275,13 +1275,13 @@ BODY:
               
             
             
    -          
    + Skip to content @@ -126,7 +126,7 @@ NGINX Ingress Controller - External Authentication + External OAUTH Authentication @@ -754,8 +754,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -775,11 +775,11 @@ - - External Authentication + + External OAUTH Authentication @@ -1148,13 +1148,13 @@ -

    External Authentication

    +

    External OAUTH Authentication

    Overview

    The auth-url and auth-signin annotations allow you to use an external authentication provider to protect your Ingress resources.

    Important

    -

    this annotation requires nginx-ingress-controller v0.9.0 or greater.)

    +

    This annotation requires nginx-ingress-controller v0.9.0 or greater.)

    Key Detail

    This functionality is enabled by deploying multiple Ingress objects for a single host. @@ -1240,7 +1240,7 @@ into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using g

    diff --git a/examples/customization/configuration-snippets/README/index.html b/examples/customization/configuration-snippets/README/index.html index f739df6e1..921c515bb 100644 --- a/examples/customization/configuration-snippets/README/index.html +++ b/examples/customization/configuration-snippets/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1138,7 +1138,7 @@ diff --git a/examples/customization/custom-configuration/README/index.html b/examples/customization/custom-configuration/README/index.html index b20cad28c..014aa8741 100644 --- a/examples/customization/custom-configuration/README/index.html +++ b/examples/customization/custom-configuration/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/customization/custom-errors/README/index.html b/examples/customization/custom-errors/README/index.html index 6d9fb959e..66d44347d 100644 --- a/examples/customization/custom-errors/README/index.html +++ b/examples/customization/custom-errors/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/customization/custom-headers/README/index.html b/examples/customization/custom-headers/README/index.html index e8d656f9c..0cf2ae395 100644 --- a/examples/customization/custom-headers/README/index.html +++ b/examples/customization/custom-headers/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/customization/custom-upstream-check/README/index.html b/examples/customization/custom-upstream-check/README/index.html index a402a9d7a..c87dec7b6 100644 --- a/examples/customization/custom-upstream-check/README/index.html +++ b/examples/customization/custom-upstream-check/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/customization/external-auth-headers/README/index.html b/examples/customization/external-auth-headers/README/index.html index 2138ef71e..8374801a0 100644 --- a/examples/customization/external-auth-headers/README/index.html +++ b/examples/customization/external-auth-headers/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/customization/ssl-dh-param/README/index.html b/examples/customization/ssl-dh-param/README/index.html index c9213b914..b3d1a5944 100644 --- a/examples/customization/ssl-dh-param/README/index.html +++ b/examples/customization/ssl-dh-param/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/customization/sysctl/README/index.html b/examples/customization/sysctl/README/index.html index fe1b28c34..fa9ecfb6f 100644 --- a/examples/customization/sysctl/README/index.html +++ b/examples/customization/sysctl/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/docker-registry/README/index.html b/examples/docker-registry/README/index.html index 9e70fa06c..b4783c925 100644 --- a/examples/docker-registry/README/index.html +++ b/examples/docker-registry/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1168,9 +1168,9 @@

    Important

    +

    Running a docker registry without TLS requires we configure our local docker daemon with the insecure registry flag.

    Please check deploy a plain http registry

    -

    With TLS

    Download and edit the yaml deployment replacing registry.<your domain> with a valid DNS name pointing to the ingress controller:

    wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-with-tls.yaml
    diff --git a/examples/grpc/README/index.html b/examples/grpc/README/index.html
    index 2705adfdc..88535d488 100644
    --- a/examples/grpc/README/index.html
    +++ b/examples/grpc/README/index.html
    @@ -752,8 +752,8 @@
     
     
       
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/grpc/ingress.yaml b/examples/grpc/ingress.yaml index 9f7739f92..02174c2db 100644 --- a/examples/grpc/ingress.yaml +++ b/examples/grpc/ingress.yaml @@ -4,7 +4,7 @@ metadata: annotations: kubernetes.io/ingress.class: "nginx" nginx.ingress.kubernetes.io/ssl-redirect: "true" - nginx.ingress.kubernetes.io/grpc-backend: "true" + nginx.ingress.kubernetes.io/backend-protocol: "GRPC" name: fortune-ingress namespace: default spec: diff --git a/examples/index.html b/examples/index.html index e4c07fb1b..39038039b 100644 --- a/examples/index.html +++ b/examples/index.html @@ -761,8 +761,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -773,8 +773,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/multi-tls/README/index.html b/examples/multi-tls/README/index.html index dc3c3255e..dfd083d9d 100644 --- a/examples/multi-tls/README/index.html +++ b/examples/multi-tls/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/rewrite/README/index.html b/examples/rewrite/README/index.html index c71d38456..f8e7f0822 100644 --- a/examples/rewrite/README/index.html +++ b/examples/rewrite/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/static-ip/README/index.html b/examples/static-ip/README/index.html index 8c3bd464c..40c742fe0 100644 --- a/examples/static-ip/README/index.html +++ b/examples/static-ip/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/examples/tls-termination/README/index.html b/examples/tls-termination/README/index.html index 6820569ae..3beb0eff0 100644 --- a/examples/tls-termination/README/index.html +++ b/examples/tls-termination/README/index.html @@ -752,8 +752,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -764,8 +764,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/how-it-works/index.html b/how-it-works/index.html index 149853eb6..0e5ed9062 100644 --- a/how-it-works/index.html +++ b/how-it-works/index.html @@ -748,8 +748,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -760,8 +760,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/index.html b/index.html index 8fe1245de..dfe9a3db9 100644 --- a/index.html +++ b/index.html @@ -786,8 +786,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -798,8 +798,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 000000000..2a2d821ef --- /dev/null +++ b/search/search_index.json @@ -0,0 +1,1809 @@ +{ + "docs": [ + { + "location": "/", + "text": "Welcome\n\u00b6\n\n\nThis is the documentation for the NGINX Ingress Controller.\n\n\nIt is built around the \nKubernetes Ingress resource\n, using a \nConfigMap\n to store the NGINX configuration.\n\n\nLearn more about using Ingress on \nk8s.io\n.\n\n\nGetting Started\n\u00b6\n\n\nSee \nDeployment\n for a whirlwind tour that will get you started.", + "title": "Welcome" + }, + { + "location": "/#welcome", + "text": "This is the documentation for the NGINX Ingress Controller. It is built around the Kubernetes Ingress resource , using a ConfigMap to store the NGINX configuration. Learn more about using Ingress on k8s.io .", + "title": "Welcome" + }, + { + "location": "/#getting-started", + "text": "See Deployment for a whirlwind tour that will get you started.", + "title": "Getting Started" + }, + { + "location": "/deploy/", + "text": "Installation Guide\n\u00b6\n\n\nContents\n\u00b6\n\n\n\n\nGeneric Deployment\n\n\nMandatory command\n\n\nProvider Specific Steps\n\n\nDocker for Mac\n\n\nminikube\n\n\nAWS\n\n\nGCE - GKE\n\n\nAzure\n\n\nBaremetal\n\n\n\n\n\n\nVerify installation\n\n\nDetect installed version\n\n\nUsing Helm\n\n\n\n\nGeneric Deployment\n\u00b6\n\n\nThe following resources are required for a generic deployment.\n\n\nMandatory command\n\u00b6\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml\n\n\n\n\n\n\nProvider Specific Steps\n\u00b6\n\n\nThere are cloud provider specific yaml files.\n\n\nDocker for Mac\n\u00b6\n\n\nKubernetes is available in Docker for Mac (from \nversion 18.06.0-ce\n)\n\n\nCreate a service\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml\n\n\n\n\n\n\nminikube\n\u00b6\n\n\nFor standard usage:\n\n\nminikube addons enable ingress\n\n\n\n\n\n\nFor development:\n\n\n\n\nDisable the ingress addon:\n\n\n\n\n$\n minikube addons disable ingress\n\n\n\n\n\n\n\nExecute \nmake dev-env\n\n\nConfirm the \nnginx-ingress-controller\n deployment exists:\n\n\n\n\n$\n kubectl get pods -n ingress-nginx \n\nNAME READY STATUS RESTARTS AGE\n\n\ndefault-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s\n\n\nnginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s\n\n\n\n\n\n\nAWS\n\u00b6\n\n\nIn AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of \nType=LoadBalancer\n.\nSince Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB)\nPlease check the \nelastic load balancing AWS details page\n\n\nElastic Load Balancer - ELB\n\u00b6\n\n\nThis setup requires to choose in which layer (L4 or L7) we want to configure the ELB:\n\n\n\n\nLayer 4\n: use TCP as the listener protocol for ports 80 and 443.\n\n\nLayer 7\n: use HTTP as the listener protocol for port 80 and terminate TLS in the ELB\n\n\n\n\nFor L4:\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l4.yaml\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l4.yaml\n\n\n\n\n\n\nFor L7:\n\n\nChange line of the file \nprovider/aws/service-l7.yaml\n replacing the dummy id with a valid one \n\"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX\"\n\nThen execute:\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l7.yaml\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l7.yaml\n\n\n\n\n\n\nThis example creates an ELB with just two listeners, one in port 80 and another in port 443\n\n\n\n\nNetwork Load Balancer (NLB)\n\u00b6\n\n\nThis type of load balancer is supported since v1.10.0 as an ALPHA feature.\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-nlb.yaml\n\n\n\n\n\n\nGCE - GKE\n\u00b6\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml\n\n\n\n\n\n\nImportant Note:\n proxy protocol is not supported in GCE/GKE\n\n\nAzure\n\u00b6\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml\n\n\n\n\n\n\nBaremetal\n\u00b6\n\n\nUsing \nNodePort\n:\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/baremetal/service-nodeport.yaml\n\n\n\n\n\n\nVerify installation\n\u00b6\n\n\nTo check if the ingress controller pods have started, run the following command:\n\n\nkubectl get pods --all-namespaces -l app=ingress-nginx --watch\n\n\n\n\n\n\nOnce the operator pods are running, you can cancel the above command by typing \nCtrl+C\n.\nNow, you are ready to create your first ingress.\n\n\nDetect installed version\n\u00b6\n\n\nTo detect which version of the ingress controller is running, exec into the pod and run \nnginx-ingress-controller version\n command.\n\n\nPOD_NAMESPACE=ingress-nginx\n\n\nPOD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app=ingress-nginx -o jsonpath='{.items[0].metadata.name}')\n\n\nkubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version\n\n\n\n\n\n\nUsing Helm\n\u00b6\n\n\nNGINX Ingress controller can be installed via \nHelm\n using the chart \nstable/nginx-ingress\n from the official charts repository. \nTo install the chart with the release name \nmy-nginx\n:\n\n\nhelm install stable/nginx-ingress --name my-nginx\n\n\n\n\n\n\nIf the kubernetes cluster has RBAC enabled, then run:\n\n\nhelm install stable/nginx-ingress --name my-nginx --set rbac.create=true\n\n\n\n\n\n\nDetect installed version:\n\n\nPOD_NAME=$(kubectl get pods -l app=nginx-ingress -o jsonpath='{.items[0].metadata.name}')\n\n\nkubectl exec -it $POD_NAME -- /nginx-ingress-controller --version", + "title": "Installation Guide" + }, + { + "location": "/deploy/#installation-guide", + "text": "", + "title": "Installation Guide" + }, + { + "location": "/deploy/#contents", + "text": "Generic Deployment Mandatory command Provider Specific Steps Docker for Mac minikube AWS GCE - GKE Azure Baremetal Verify installation Detect installed version Using Helm", + "title": "Contents" + }, + { + "location": "/deploy/#generic-deployment", + "text": "The following resources are required for a generic deployment.", + "title": "Generic Deployment" + }, + { + "location": "/deploy/#mandatory-command", + "text": "kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml", + "title": "Mandatory command" + }, + { + "location": "/deploy/#provider-specific-steps", + "text": "There are cloud provider specific yaml files.", + "title": "Provider Specific Steps" + }, + { + "location": "/deploy/#docker-for-mac", + "text": "Kubernetes is available in Docker for Mac (from version 18.06.0-ce ) Create a service kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml", + "title": "Docker for Mac" + }, + { + "location": "/deploy/#minikube", + "text": "For standard usage: minikube addons enable ingress For development: Disable the ingress addon: $ minikube addons disable ingress Execute make dev-env Confirm the nginx-ingress-controller deployment exists: $ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s", + "title": "minikube" + }, + { + "location": "/deploy/#aws", + "text": "In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer .\nSince Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB)\nPlease check the elastic load balancing AWS details page", + "title": "AWS" + }, + { + "location": "/deploy/#elastic-load-balancer-elb", + "text": "This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: Layer 4 : use TCP as the listener protocol for ports 80 and 443. Layer 7 : use HTTP as the listener protocol for port 80 and terminate TLS in the ELB For L4: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l4.yaml For L7: Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one \"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX\" \nThen execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l7.yaml This example creates an ELB with just two listeners, one in port 80 and another in port 443", + "title": "Elastic Load Balancer - ELB" + }, + { + "location": "/deploy/#network-load-balancer-nlb", + "text": "This type of load balancer is supported since v1.10.0 as an ALPHA feature. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-nlb.yaml", + "title": "Network Load Balancer (NLB)" + }, + { + "location": "/deploy/#gce-gke", + "text": "kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml Important Note: proxy protocol is not supported in GCE/GKE", + "title": "GCE - GKE" + }, + { + "location": "/deploy/#azure", + "text": "kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml", + "title": "Azure" + }, + { + "location": "/deploy/#baremetal", + "text": "Using NodePort : kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/baremetal/service-nodeport.yaml", + "title": "Baremetal" + }, + { + "location": "/deploy/#verify-installation", + "text": "To check if the ingress controller pods have started, run the following command: kubectl get pods --all-namespaces -l app=ingress-nginx --watch Once the operator pods are running, you can cancel the above command by typing Ctrl+C .\nNow, you are ready to create your first ingress.", + "title": "Verify installation" + }, + { + "location": "/deploy/#detect-installed-version", + "text": "To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version", + "title": "Detect installed version" + }, + { + "location": "/deploy/#using-helm", + "text": "NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. \nTo install the chart with the release name my-nginx : helm install stable/nginx-ingress --name my-nginx If the kubernetes cluster has RBAC enabled, then run: helm install stable/nginx-ingress --name my-nginx --set rbac.create=true Detect installed version: POD_NAME=$(kubectl get pods -l app=nginx-ingress -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version", + "title": "Using Helm" + }, + { + "location": "/deploy/rbac/", + "text": "Role Based Access Control (RBAC)\n\u00b6\n\n\nOverview\n\u00b6\n\n\nThis example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled.\n\n\nRole Based Access Control is comprised of four layers:\n\n\n\n\nClusterRole\n - permissions assigned to a role that apply to an entire cluster\n\n\nClusterRoleBinding\n - binding a ClusterRole to a specific account\n\n\nRole\n - permissions assigned to a role that apply to a specific namespace\n\n\nRoleBinding\n - binding a Role to a specific account\n\n\n\n\nIn order for RBAC to be applied to an nginx-ingress-controller, that controller\nshould be assigned to a \nServiceAccount\n. That \nServiceAccount\n should be\nbound to the \nRole\ns and \nClusterRole\ns defined for the nginx-ingress-controller.\n\n\nService Accounts created in this example\n\u00b6\n\n\nOne ServiceAccount is created in this example, \nnginx-ingress-serviceaccount\n.\n\n\nPermissions Granted in this example\n\u00b6\n\n\nThere are two sets of permissions defined in this example. Cluster-wide\npermissions defined by the \nClusterRole\n named \nnginx-ingress-clusterrole\n, and\nnamespace specific permissions defined by the \nRole\n named \nnginx-ingress-role\n.\n\n\nCluster Permissions\n\u00b6\n\n\nThese permissions are granted in order for the nginx-ingress-controller to be\nable to function as an ingress across the cluster. These permissions are\ngranted to the ClusterRole named \nnginx-ingress-clusterrole\n\n\n\n\nconfigmaps\n, \nendpoints\n, \nnodes\n, \npods\n, \nsecrets\n: list, watch\n\n\nnodes\n: get\n\n\nservices\n, \ningresses\n: get, list, watch\n\n\nevents\n: create, patch\n\n\ningresses/status\n: update\n\n\n\n\nNamespace Permissions\n\u00b6\n\n\nThese permissions are granted specific to the nginx-ingress namespace. These\npermissions are granted to the Role named \nnginx-ingress-role\n\n\n\n\nconfigmaps\n, \npods\n, \nsecrets\n: get\n\n\nendpoints\n: get\n\n\n\n\nFurthermore to support leader-election, the nginx-ingress-controller needs to\nhave access to a \nconfigmap\n using the resourceName \ningress-controller-leader-nginx\n\n\n\n\nNote that resourceNames can NOT be used to limit requests using the \u201ccreate\u201d\nverb because authorizers only have access to information that can be obtained\nfrom the request URL, method, and headers (resource names in a \u201ccreate\u201d request\nare part of the request body).\n\n\n\n\n\n\nconfigmaps\n: get, update (for resourceName \ningress-controller-leader-nginx\n)\n\n\nconfigmaps\n: create\n\n\n\n\nThis resourceName is the concatenation of the \nelection-id\n and the\n\ningress-class\n as defined by the ingress-controller, which defaults to:\n\n\n\n\nelection-id\n: \ningress-controller-leader\n\n\ningress-class\n: \nnginx\n\n\nresourceName\n : \n-\n\n\n\n\nPlease adapt accordingly if you overwrite either parameter when launching the\nnginx-ingress-controller.\n\n\nBindings\n\u00b6\n\n\nThe ServiceAccount \nnginx-ingress-serviceaccount\n is bound to the Role\n\nnginx-ingress-role\n and the ClusterRole \nnginx-ingress-clusterrole\n.\n\n\nThe serviceAccountName associated with the containers in the deployment must\nmatch the serviceAccount. The namespace references in the Deployment metadata, \ncontainer arguments, and POD_NAMESPACE should be in the nginx-ingress namespace.", + "title": "Role Based Access Control (RBAC)" + }, + { + "location": "/deploy/rbac/#role-based-access-control-rbac", + "text": "", + "title": "Role Based Access Control (RBAC)" + }, + { + "location": "/deploy/rbac/#overview", + "text": "This example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled. Role Based Access Control is comprised of four layers: ClusterRole - permissions assigned to a role that apply to an entire cluster ClusterRoleBinding - binding a ClusterRole to a specific account Role - permissions assigned to a role that apply to a specific namespace RoleBinding - binding a Role to a specific account In order for RBAC to be applied to an nginx-ingress-controller, that controller\nshould be assigned to a ServiceAccount . That ServiceAccount should be\nbound to the Role s and ClusterRole s defined for the nginx-ingress-controller.", + "title": "Overview" + }, + { + "location": "/deploy/rbac/#service-accounts-created-in-this-example", + "text": "One ServiceAccount is created in this example, nginx-ingress-serviceaccount .", + "title": "Service Accounts created in this example" + }, + { + "location": "/deploy/rbac/#permissions-granted-in-this-example", + "text": "There are two sets of permissions defined in this example. Cluster-wide\npermissions defined by the ClusterRole named nginx-ingress-clusterrole , and\nnamespace specific permissions defined by the Role named nginx-ingress-role .", + "title": "Permissions Granted in this example" + }, + { + "location": "/deploy/rbac/#cluster-permissions", + "text": "These permissions are granted in order for the nginx-ingress-controller to be\nable to function as an ingress across the cluster. These permissions are\ngranted to the ClusterRole named nginx-ingress-clusterrole configmaps , endpoints , nodes , pods , secrets : list, watch nodes : get services , ingresses : get, list, watch events : create, patch ingresses/status : update", + "title": "Cluster Permissions" + }, + { + "location": "/deploy/rbac/#namespace-permissions", + "text": "These permissions are granted specific to the nginx-ingress namespace. These\npermissions are granted to the Role named nginx-ingress-role configmaps , pods , secrets : get endpoints : get Furthermore to support leader-election, the nginx-ingress-controller needs to\nhave access to a configmap using the resourceName ingress-controller-leader-nginx Note that resourceNames can NOT be used to limit requests using the \u201ccreate\u201d\nverb because authorizers only have access to information that can be obtained\nfrom the request URL, method, and headers (resource names in a \u201ccreate\u201d request\nare part of the request body). configmaps : get, update (for resourceName ingress-controller-leader-nginx ) configmaps : create This resourceName is the concatenation of the election-id and the ingress-class as defined by the ingress-controller, which defaults to: election-id : ingress-controller-leader ingress-class : nginx resourceName : - Please adapt accordingly if you overwrite either parameter when launching the\nnginx-ingress-controller.", + "title": "Namespace Permissions" + }, + { + "location": "/deploy/rbac/#bindings", + "text": "The ServiceAccount nginx-ingress-serviceaccount is bound to the Role nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole . The serviceAccountName associated with the containers in the deployment must\nmatch the serviceAccount. The namespace references in the Deployment metadata, \ncontainer arguments, and POD_NAMESPACE should be in the nginx-ingress namespace.", + "title": "Bindings" + }, + { + "location": "/deploy/upgrade/", + "text": "Upgrading\n\u00b6\n\n\n\n\nImportant\n\n\nNo matter the method you use for upgrading, \nif you use template overrides,\nmake sure your templates are compatible with the new version of ingress-nginx\n.\n\n\n\n\nWithout Helm\n\u00b6\n\n\nTo upgrade your ingress-nginx installation, it should be enough to change the version of the image\nin the controller Deployment.\n\n\nI.e. if your deployment resource looks like (partial example):\n\n\nkind\n:\n \nDeployment\n\n\nmetadata\n:\n\n \nname\n:\n \nnginx-ingress-controller\n\n \nnamespace\n:\n \ningress-nginx\n\n\nspec\n:\n\n \nreplicas\n:\n \n1\n\n \nselector\n:\n \n...\n\n \ntemplate\n:\n\n \nmetadata\n:\n \n...\n\n \nspec\n:\n\n \ncontainers\n:\n\n \n-\n \nname\n:\n \nnginx-ingress-controller\n\n \nimage\n:\n \nquay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0\n\n \nargs\n:\n \n...\n\n\n\n\n\n\nsimply change the \n0.9.0\n tag to the version you wish to upgrade to.\nThe easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation):\n\n\nkubectl set image deployment/nginx-ingress-controller \\\n nginx-ingress-controller=nginx:quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.18.0\n\n\n\n\n\nFor interactive editing, use \nkubectl edit deployment nginx-ingress-controller\n.\n\n\nWith Helm\n\u00b6\n\n\nIf you installed ingress-nginx using the Helm command in the deployment docs so its name is \nngx-ingress\n,\nyou should be able to upgrade using\n\n\nhelm upgrade --reuse-values ngx-ingress stable/nginx-ingress", + "title": "Upgrading" + }, + { + "location": "/deploy/upgrade/#upgrading", + "text": "Important No matter the method you use for upgrading, if you use template overrides,\nmake sure your templates are compatible with the new version of ingress-nginx .", + "title": "Upgrading" + }, + { + "location": "/deploy/upgrade/#without-helm", + "text": "To upgrade your ingress-nginx installation, it should be enough to change the version of the image\nin the controller Deployment. I.e. if your deployment resource looks like (partial example): kind : Deployment metadata : \n name : nginx-ingress-controller \n namespace : ingress-nginx spec : \n replicas : 1 \n selector : ... \n template : \n metadata : ... \n spec : \n containers : \n - name : nginx-ingress-controller \n image : quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 \n args : ... simply change the 0.9.0 tag to the version you wish to upgrade to.\nThe easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation): kubectl set image deployment/nginx-ingress-controller \\\n nginx-ingress-controller=nginx:quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.18.0 For interactive editing, use kubectl edit deployment nginx-ingress-controller .", + "title": "Without Helm" + }, + { + "location": "/deploy/upgrade/#with-helm", + "text": "If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress ,\nyou should be able to upgrade using helm upgrade --reuse-values ngx-ingress stable/nginx-ingress", + "title": "With Helm" + }, + { + "location": "/user-guide/nginx-configuration/", + "text": "NGINX Configuration\n\u00b6\n\n\nThere are three ways to customize NGINX:\n\n\n\n\nConfigMap\n: using a Configmap to set global configurations in NGINX.\n\n\nAnnotations\n: use this if you want a specific configuration for a particular Ingress rule.\n\n\nCustom template\n: when more specific settings are required, like \nopen_file_cache\n, adjust \nlisten\n options as \nrcvbuf\n or when is not possible to change the configuration through the ConfigMap.", + "title": "NGINX Configuration" + }, + { + "location": "/user-guide/nginx-configuration/#nginx-configuration", + "text": "There are three ways to customize NGINX: ConfigMap : using a Configmap to set global configurations in NGINX. Annotations : use this if you want a specific configuration for a particular Ingress rule. Custom template : when more specific settings are required, like open_file_cache , adjust listen options as rcvbuf or when is not possible to change the configuration through the ConfigMap.", + "title": "NGINX Configuration" + }, + { + "location": "/user-guide/nginx-configuration/annotations/", + "text": "Annotations\n\u00b6\n\n\nYou can add these Kubernetes annotations to specific Ingress objects to customize their behavior.\n\n\n\n\nTip\n\n\nAnnotation keys and values can only be strings.\nOther types, such as boolean or numeric values must be quoted,\ni.e. \n\"true\"\n, \n\"false\"\n, \n\"100\"\n.\n\n\n\n\n\n\nNote\n\n\nThe annotation prefix can be changed using the\n\n--annotations-prefix\n command line argument\n,\nbut the default is \nnginx.ingress.kubernetes.io\n, as described in the\ntable below.\n\n\n\n\n\n\n\n\n\n\nName\n\n\ntype\n\n\n\n\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/add-base-url\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/app-root\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/affinity\n\n\ncookie\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-realm\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-secret\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-type\n\n\nbasic or digest\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-tls-secret\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-tls-verify-depth\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-tls-verify-client\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-tls-error-page\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/auth-url\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/backend-protocol\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/base-url-scheme\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/client-body-buffer-size\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/configuration-snippet\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/default-backend\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/enable-cors\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-origin\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-methods\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-headers\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-credentials\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-max-age\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/force-ssl-redirect\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/from-to-www-redirect\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/grpc-backend\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/limit-connections\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/limit-rps\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/permanent-redirect\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/permanent-redirect-code\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-body-size\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-cookie-domain\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-connect-timeout\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-send-timeout\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-read-timeout\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-next-upstream\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-next-upstream-tries\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-request-buffering\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-redirect-from\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-redirect-to\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/rewrite-log\n\n\nURI\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/rewrite-target\n\n\nURI\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/secure-backends\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/secure-verify-ca-secret\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/server-alias\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/server-snippet\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/service-upstream\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/session-cookie-name\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/session-cookie-hash\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/ssl-redirect\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/ssl-passthrough\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/upstream-max-fails\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/upstream-fail-timeout\n\n\nnumber\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/upstream-hash-by\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/load-balance\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/upstream-vhost\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/whitelist-source-range\n\n\nCIDR\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-buffering\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-buffer-size\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/ssl-ciphers\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/connection-proxy-header\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/enable-access-log\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/lua-resty-waf\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/lua-resty-waf-debug\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/lua-resty-waf-extra-rules\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/enable-influxdb\n\n\n\"true\" or \"false\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/influxdb-measurement\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/influxdb-port\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/influxdb-host\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/influxdb-server-name\n\n\nstring\n\n\n\n\n\n\n\n\nRewrite\n\u00b6\n\n\nIn some scenarios the exposed URL in the backend service differs from the specified path in the Ingress rule. Without a rewrite any request will return 404.\nSet the annotation \nnginx.ingress.kubernetes.io/rewrite-target\n to the path expected by the service.\n\n\nIf the application contains relative links it is possible to add an additional annotation \nnginx.ingress.kubernetes.io/add-base-url\n that will prepend a \nbase\n tag\n in the header of the returned HTML from the backend.\n\n\nIf the scheme of \nbase\n tag\n need to be specific, set the annotation \nnginx.ingress.kubernetes.io/base-url-scheme\n to the scheme such as \nhttp\n and \nhttps\n.\n\n\nIf the Application Root is exposed in a different path and needs to be redirected, set the annotation \nnginx.ingress.kubernetes.io/app-root\n to redirect requests for \n/\n.\n\n\n\n\nExample\n\n\nPlease check the \nrewrite\n example.\n\n\n\n\nSession Affinity\n\u00b6\n\n\nThe annotation \nnginx.ingress.kubernetes.io/affinity\n enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server.\nThe only affinity type available for NGINX is \ncookie\n.\n\n\n\n\nExample\n\n\nPlease check the \naffinity\n example.\n\n\n\n\nCookie affinity\n\u00b6\n\n\nIf you use the \ncookie\n affinity type you can also specify the name of the cookie that will be used to route the requests with the annotation \nnginx.ingress.kubernetes.io/session-cookie-name\n. The default is to create a cookie named 'INGRESSCOOKIE'.\n\n\nIn case of NGINX the annotation \nnginx.ingress.kubernetes.io/session-cookie-hash\n defines which algorithm will be used to hash the used upstream. Default value is \nmd5\n and possible values are \nmd5\n, \nsha1\n and \nindex\n.\n\n\n\n\nAttention\n\n\nThe \nindex\n option is not an actual hash; an in-memory index is used instead, which has less overhead.\nHowever, with \nindex\n, matching against a changing upstream server list is inconsistent.\nSo, at reload, if upstream servers have changed, index values are not guaranteed to correspond to the same server as before!\n\nUse \nindex\n with caution\n and only if you need to!\n\n\n\n\nIn NGINX this feature is implemented by the third party module \nnginx-sticky-module-ng\n. The workflow used to define which upstream server will be used is explained \nhere\n\n\nAuthentication\n\u00b6\n\n\nIs possible to add authentication adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords inside the key \nauth\n.\n\n\nThe annotations are:\n\n\nnginx.ingress.kubernetes.io/auth-type: [basic|digest]\n\n\n\n\n\nIndicates the \nHTTP Authentication Type: Basic or Digest Access Authentication\n.\n\n\nnginx.ingress.kubernetes.io/auth-secret: secretName\n\n\n\n\n\nThe name of the Secret that contains the usernames and passwords which are granted access to the \npath\ns defined in the Ingress rules.\nThis annotation also accepts the alternative form \"namespace/secretName\", in which case the Secret lookup is performed in the referenced namespace instead of the Ingress namespace.\n\n\nnginx.ingress.kubernetes.io/auth-realm: \"realm string\"\n\n\n\n\n\n\n\nExample\n\n\nPlease check the \nauth\n example.\n\n\n\n\nCustom NGINX upstream checks\n\u00b6\n\n\nNGINX exposes some flags in the \nupstream configuration\n that enable the configuration of each server in the upstream. The Ingress controller allows custom \nmax_fails\n and \nfail_timeout\n parameters in a global context using \nupstream-max-fails\n and \nupstream-fail-timeout\n in the NGINX ConfigMap or in a particular Ingress rule. \nupstream-max-fails\n defaults to 0. This means NGINX will respect the container's \nreadinessProbe\n if it is defined. If there is no probe and no values for \nupstream-max-fails\n NGINX will continue to send traffic to the container.\n\n\n\n\nTip\n\n\nWith the default configuration NGINX will not health check your backends. Whenever the endpoints controller notices a readiness probe failure, that pod's IP will be removed from the list of endpoints. This will trigger the NGINX controller to also remove it from the upstreams.**\n\n\n\n\nTo use custom values in an Ingress rule define these annotations:\n\n\nnginx.ingress.kubernetes.io/upstream-max-fails\n: number of unsuccessful attempts to communicate with the server that should occur in the duration set by the \nupstream-fail-timeout\n parameter to consider the server unavailable.\n\n\nnginx.ingress.kubernetes.io/upstream-fail-timeout\n: time in seconds during which the specified number of unsuccessful attempts to communicate with the server should occur to consider the server unavailable. This is also the period of time the server will be considered unavailable.\n\n\nIn NGINX, backend server pools are called \"\nupstreams\n\". Each upstream contains the endpoints for a service. An upstream is created for each service that has Ingress rules defined.\n\n\n\n\nAttention\n\n\nAll Ingress rules using the same service will use the same upstream.\n\nOnly one of the Ingress rules should define annotations to configure the upstream servers.\n\n\n\n\n\n\nExample\n\n\nPlease check the \ncustom upstream check\n example.\n\n\n\n\nCustom NGINX upstream hashing\n\u00b6\n\n\nNGINX supports load balancing by client-server mapping based on \nconsistent hashing\n for a given key. The key can contain text, variables or any combination thereof. This feature allows for request stickiness other than client IP or cookies. The \nketama\n consistent hashing method will be used which ensures only a few keys would be remapped to different servers on upstream group changes.\n\n\nTo enable consistent hashing for a backend:\n\n\nnginx.ingress.kubernetes.io/upstream-hash-by\n: the nginx variable, text value or any combination thereof to use for consistent hashing. For example \nnginx.ingress.kubernetes.io/upstream-hash-by: \"$request_uri\"\n to consistently hash upstream requests by the current request URI.\n\n\nCustom NGINX load balancing\n\u00b6\n\n\nThis is similar to (https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#load-balance) but configures load balancing algorithm per ingress.\n\n\n\n\nNote that \nnginx.ingress.kubernetes.io/upstream-hash-by\n takes preference over this. If this and \nnginx.ingress.kubernetes.io/upstream-hash-by\n are not set then we fallback to using globally configured load balancing algorithm.\n\n\n\n\nCustom NGINX upstream vhost\n\u00b6\n\n\nThis configuration setting allows you to control the value for host in the following statement: \nproxy_set_header Host $host\n, which forms part of the location block. This is useful if you need to call the upstream server by something other than \n$host\n.\n\n\nClient Certificate Authentication\n\u00b6\n\n\nIt is possible to enable Client Certificate Authentication using additional annotations in Ingress Rule.\n\n\nThe annotations are:\n\n\n\n\nnginx.ingress.kubernetes.io/auth-tls-secret: secretName\n:\n The name of the Secret that contains the full Certificate Authority chain \nca.crt\n that is enabled to authenticate against this Ingress.\n This annotation also accepts the alternative form \"namespace/secretName\", in which case the Secret lookup is performed in the referenced namespace instead of the Ingress namespace.\n\n\nnginx.ingress.kubernetes.io/auth-tls-verify-depth\n:\n The validation depth between the provided client certificate and the Certification Authority chain.\n\n\nnginx.ingress.kubernetes.io/auth-tls-verify-client\n:\n Enables verification of client certificates.\n\n\nnginx.ingress.kubernetes.io/auth-tls-error-page\n:\n The URL/Page that user should be redirected in case of a Certificate Authentication Error\n\n\nnginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream\n:\n Indicates if the received certificates should be passed or not to the upstream server. By default this is disabled.\n\n\n\n\n\n\nExample\n\n\nPlease check the \nclient-certs\n example.\n\n\n\n\n\n\nAttention\n\n\nTLS with Client Authentication is \nnot\n possible in Cloudflare and might result in unexpected behavior.\n\n\nCloudflare only allows Authenticated Origin Pulls and is required to use their own certificate: \nhttps://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/\n\n\nOnly Authenticated Origin Pulls are allowed and can be configured by following their tutorial: \nhttps://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls\n\n\n\n\nConfiguration snippet\n\u00b6\n\n\nUsing this annotation you can add additional configuration to the NGINX location. For example:\n\n\nnginx.ingress.kubernetes.io/configuration-snippet\n:\n \n|\n\n \nmore_set_headers \"Request-Id: $req_id\";\n\n\n\n\n\n\nDefault Backend\n\u00b6\n\n\nThe ingress controller requires a \ndefault backend\n.\nThis service handles the response when the service in the Ingress rule does not have endpoints.\nThis is a global configuration for the ingress controller. In some cases could be required to return a custom content or format. In this scenario we can use the annotation \nnginx.ingress.kubernetes.io/default-backend: \n to specify a custom default backend.\n\n\nEnable CORS\n\u00b6\n\n\nTo enable Cross-Origin Resource Sharing (CORS) in an Ingress rule,\nadd the annotation \nnginx.ingress.kubernetes.io/enable-cors: \"true\"\n.\nThis will add a section in the server location enabling this functionality.\n\n\nCORS can be controlled with the following annotations:\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-methods\n\n controls which methods are accepted.\n This is a multi-valued field, separated by ',' and accepts only letters (upper and lower case).\n Example: \nnginx.ingress.kubernetes.io/cors-allow-methods: \"PUT, GET, POST, OPTIONS\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-headers\n\n controls which headers are accepted.\n This is a multi-valued field, separated by ',' and accepts letters, numbers, _ and -.\n Example: \nnginx.ingress.kubernetes.io/cors-allow-headers: \"X-Forwarded-For, X-app123-XPTO\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-origin\n\n controls what's the accepted Origin for CORS and defaults to '*'.\n This is a single field value, with the following format: \nhttp(s)://origin-site.com\n or \nhttp(s)://origin-site.com:port\n\n Example: \nnginx.ingress.kubernetes.io/cors-allow-origin: \"https://origin-site.com:4443\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-allow-credentials\n\n controls if credentials can be passed during CORS operations.\n Example: \nnginx.ingress.kubernetes.io/cors-allow-credentials: \"true\"\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/cors-max-age\n\n controls how long preflight requests can be cached.\n Example: \nnginx.ingress.kubernetes.io/cors-max-age: 600\n\n\n\n\n\n\n\n\nNote\n\n\nFor more information please see \nhttps://enable-cors.org\n\n\n\n\nServer Alias\n\u00b6\n\n\nTo add Server Aliases to an Ingress rule add the annotation \nnginx.ingress.kubernetes.io/server-alias: \"\"\n.\nThis will create a server with the same configuration, but a different \nserver_name\n as the provided host.\n\n\n\n\nNote\n\n\nA server-alias name cannot conflict with the hostname of an existing server. If it does the server-alias annotation will be ignored.\nIf a server-alias is created and later a new server with the same hostname is created,\nthe new server configuration will take place over the alias configuration.\n\n\n\n\nFor more information please see \nthe \nserver_name\n documentation\n.\n\n\nServer snippet\n\u00b6\n\n\nUsing the annotation \nnginx.ingress.kubernetes.io/server-snippet\n it is possible to add custom configuration in the server configuration block.\n\n\napiVersion\n:\n \nextensions/v1beta1\n\n\nkind\n:\n \nIngress\n\n\nmetadata\n:\n\n \nannotations\n:\n\n \nnginx.ingress.kubernetes.io/server-snippet\n:\n \n|\n\n\nset $agentflag 0;\n\n\n\nif ($http_user_agent ~* \"(Mobile)\" ){\n\n \nset $agentflag 1;\n\n\n}\n\n\n\nif ( $agentflag = 1 ) {\n\n \nreturn 301 https://m.example.com;\n\n\n}\n\n\n\n\n\n\n\n\nAttention\n\n\nThis annotation can be used only once per host.\n\n\n\n\nClient Body Buffer Size\n\u00b6\n\n\nSets buffer size for reading client request body per location. In case the request body is larger than the buffer,\nthe whole body or only its part is written to a temporary file. By default, buffer size is equal to two memory pages.\nThis is 8K on x86, other 32-bit platforms, and x86-64. It is usually 16K on other 64-bit platforms. This annotation is\napplied to each location provided in the ingress rule.\n\n\n\n\nNote\n\n\nThe annotation value must be given in a format understood by Nginx.\n\n\n\n\n\n\nExample\n\n\n\n\nnginx.ingress.kubernetes.io/client-body-buffer-size: \"1000\"\n # 1000 bytes\n\n\nnginx.ingress.kubernetes.io/client-body-buffer-size: 1k\n # 1 kilobyte\n\n\nnginx.ingress.kubernetes.io/client-body-buffer-size: 1K\n # 1 kilobyte\n\n\nnginx.ingress.kubernetes.io/client-body-buffer-size: 1m\n # 1 megabyte\n\n\nnginx.ingress.kubernetes.io/client-body-buffer-size: 1M\n # 1 megabyte\n\n\n\n\n\n\nFor more information please see \nhttp://nginx.org\n\n\nExternal Authentication\n\u00b6\n\n\nTo use an existing service that provides authentication the Ingress rule can be annotated with \nnginx.ingress.kubernetes.io/auth-url\n to indicate the URL where the HTTP request should be sent.\n\n\nnginx.ingress.kubernetes.io/auth-url\n:\n \n\"URL\n \nto\n \nthe\n \nauthentication\n \nservice\"\n\n\n\n\n\n\nAdditionally it is possible to set:\n\n\n\n\nnginx.ingress.kubernetes.io/auth-method\n:\n \n\n to specify the HTTP method to use.\n\n\nnginx.ingress.kubernetes.io/auth-signin\n:\n \n\n to specify the location of the error page.\n\n\nnginx.ingress.kubernetes.io/auth-response-headers\n:\n \n\n to specify headers to pass to backend once authentication request completes.\n\n\nnginx.ingress.kubernetes.io/auth-request-redirect\n:\n \n\n to specify the X-Auth-Request-Redirect header value.\n\n\n\n\n\n\nExample\n\n\nPlease check the \nexternal-auth\n example.\n\n\n\n\nRate limiting\n\u00b6\n\n\nThese annotations define a limit on the connections that can be opened by a single client IP address.\nThis can be used to mitigate \nDDoS Attacks\n.\n\n\n\n\nnginx.ingress.kubernetes.io/limit-connections\n: number of concurrent connections allowed from a single IP address.\n\n\nnginx.ingress.kubernetes.io/limit-rps\n: number of connections that may be accepted from a given IP each second.\n\n\nnginx.ingress.kubernetes.io/limit-rpm\n: number of connections that may be accepted from a given IP each minute.\n\n\nnginx.ingress.kubernetes.io/limit-rate-after\n: sets the initial amount after which the further transmission of a response to a client will be rate limited.\n\n\nnginx.ingress.kubernetes.io/limit-rate\n: rate of request that accepted from a client each second.\n\n\n\n\nYou can specify the client IP source ranges to be excluded from rate-limiting through the \nnginx.ingress.kubernetes.io/limit-whitelist\n annotation. The value is a comma separated list of CIDRs.\n\n\nIf you specify multiple annotations in a single Ingress rule, \nlimit-rpm\n, and then \nlimit-rps\n takes precedence.\n\n\nThe annotation \nnginx.ingress.kubernetes.io/limit-rate\n, \nnginx.ingress.kubernetes.io/limit-rate-after\n define a limit the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit.\n\n\nTo configure this setting globally for all Ingress rules, the \nlimit-rate-after\n and \nlimit-rate\n value may be set in the \nNGINX ConfigMap\n. if you set the value in ingress annotation will cover global setting.\n\n\nPermanent Redirect\n\u00b6\n\n\nThis annotation allows to return a permanent redirect instead of sending data to the upstream. For example \nnginx.ingress.kubernetes.io/permanent-redirect: https://www.google.com\n would redirect everything to Google.\n\n\nPermanent Redirect Code\n\u00b6\n\n\nThis annotation allows you to modify the status code used for permanent redirects. For example \nnginx.ingress.kubernetes.io/permanent-redirect-code: '308'\n would return your permanent-redirect with a 308.\n\n\nSSL Passthrough\n\u00b6\n\n\nThe annotation \nnginx.ingress.kubernetes.io/ssl-passthrough\n allows to configure TLS termination in the pod and not in NGINX.\n\n\n\n\nAttention\n\n\nUsing the annotation \nnginx.ingress.kubernetes.io/ssl-passthrough\n invalidates all the other available annotations.\nThis is because SSL Passthrough works on level 4 of the OSI stack (TCP), not on the HTTP/HTTPS level.\n\n\n\n\n\n\nAttention\n\n\nThe use of this annotation requires the flag \n--enable-ssl-passthrough\n (By default it is disabled).\n\n\n\n\nSecure backends DEPRECATED (since 0.18.0)\n\u00b6\n\n\nPlease use \nnginx.ingress.kubernetes.io/backend-protocol: \"HTTPS\"\n\n\nBy default NGINX uses plain HTTP to reach the services.\nAdding the annotation \nnginx.ingress.kubernetes.io/secure-backends: \"true\"\n in the Ingress rule changes the protocol to HTTPS.\nIf you want to validate the upstream against a specific certificate, you can create a secret with it and reference the secret with the annotation \nnginx.ingress.kubernetes.io/secure-verify-ca-secret\n.\n\n\n\n\nAttention\n\n\nNote that if an invalid or non-existent secret is given,\nthe ingress controller will ignore the \nsecure-backends\n annotation.\n\n\n\n\nService Upstream\n\u00b6\n\n\nBy default the NGINX ingress controller uses a list of all endpoints (Pod IP/port) in the NGINX upstream configuration.\n\n\nThe \nnginx.ingress.kubernetes.io/service-upstream\n annotation disables that behavior and instead uses a single upstream in NGINX, the service's Cluster IP and port.\n\n\nThis can be desirable for things like zero-downtime deployments as it reduces the need to reload NGINX configuration when Pods come up and down. See issue \n#257\n.\n\n\nKnown Issues\n\u00b6\n\n\nIf the \nservice-upstream\n annotation is specified the following things should be taken into consideration:\n\n\n\n\nSticky Sessions will not work as only round-robin load balancing is supported.\n\n\nThe \nproxy_next_upstream\n directive will not have any effect meaning on error the request will not be dispatched to another upstream.\n\n\n\n\nServer-side HTTPS enforcement through redirect\n\u00b6\n\n\nBy default the controller redirects (308) to HTTPS if TLS is enabled for that ingress.\nIf you want to disable this behavior globally, you can use \nssl-redirect: \"false\"\n in the NGINX \nconfig map\n.\n\n\nTo configure this feature for specific ingress resources, you can use the \nnginx.ingress.kubernetes.io/ssl-redirect: \"false\"\n\nannotation in the particular resource.\n\n\nWhen using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a redirect to HTTPS\neven when there is no TLS certificate available.\nThis can be achieved by using the \nnginx.ingress.kubernetes.io/force-ssl-redirect: \"true\"\n annotation in the particular resource.\n\n\nRedirect from/to www.\n\u00b6\n\n\nIn some scenarios is required to redirect from \nwww.domain.com\n to \ndomain.com\n or vice versa.\nTo enable this feature use the annotation \nnginx.ingress.kubernetes.io/from-to-www-redirect: \"true\"\n\n\n\n\nAttention\n\n\nIf at some point a new Ingress is created with a host equal to one of the options (like \ndomain.com\n) the annotation will be omitted.\n\n\n\n\nWhitelist source range\n\u00b6\n\n\nYou can specify allowed client IP source ranges through the \nnginx.ingress.kubernetes.io/whitelist-source-range\n annotation.\nThe value is a comma separated list of \nCIDRs\n, e.g. \n10.0.0.0/24,172.10.0.1\n.\n\n\nTo configure this setting globally for all Ingress rules, the \nwhitelist-source-range\n value may be set in the \nNGINX ConfigMap\n.\n\n\n\n\nNote\n\n\nAdding an annotation to an Ingress rule overrides any global restriction.\n\n\n\n\nCustom timeouts\n\u00b6\n\n\nUsing the configuration configmap it is possible to set the default global timeout for connections to the upstream servers.\nIn some scenarios is required to have different values. To allow this we provide annotations that allows this customization:\n\n\n\n\nnginx.ingress.kubernetes.io/proxy-connect-timeout\n\n\nnginx.ingress.kubernetes.io/proxy-send-timeout\n\n\nnginx.ingress.kubernetes.io/proxy-read-timeout\n\n\nnginx.ingress.kubernetes.io/proxy-next-upstream\n\n\nnginx.ingress.kubernetes.io/proxy-next-upstream-tries\n\n\nnginx.ingress.kubernetes.io/proxy-request-buffering\n\n\n\n\nProxy redirect\n\u00b6\n\n\nWith the annotations \nnginx.ingress.kubernetes.io/proxy-redirect-from\n and \nnginx.ingress.kubernetes.io/proxy-redirect-to\n it is possible to\nset the text that should be changed in the \nLocation\n and \nRefresh\n header fields of a proxied server response (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect)\n\n\nSetting \"off\" or \"default\" in the annotation \nnginx.ingress.kubernetes.io/proxy-redirect-from\n disables \nnginx.ingress.kubernetes.io/proxy-redirect-to\n,\notherwise, both annotations must be used in unison. Note that each annotation must be a string without spaces.\n\n\nBy default the value of each annotation is \"off\".\n\n\nCustom max body size\n\u00b6\n\n\nFor NGINX, an 413 error will be returned to the client when the size in a request exceeds the maximum allowed size of the client request body. This size can be configured by the parameter \nclient_max_body_size\n.\n\n\nTo configure this setting globally for all Ingress rules, the \nproxy-body-size\n value may be set in the \nNGINX ConfigMap\n.\nTo use custom values in an Ingress rule define these annotation:\n\n\nnginx.ingress.kubernetes.io/proxy-body-size\n:\n \n8m\n\n\n\n\n\n\nProxy cookie domain\n\u00b6\n\n\nSets a text that \nshould be changed in the domain attribute\n of the \"Set-Cookie\" header fields of a proxied server response.\n\n\nTo configure this setting globally for all Ingress rules, the \nproxy-cookie-domain\n value may be set in the \nNGINX ConfigMap\n.\n\n\nProxy buffering\n\u00b6\n\n\nEnable or disable proxy buffering \nproxy_buffering\n.\nBy default proxy buffering is disabled in the NGINX config.\n\n\nTo configure this setting globally for all Ingress rules, the \nproxy-buffering\n value may be set in the \nNGINX ConfigMap\n.\nTo use custom values in an Ingress rule define these annotation:\n\n\nnginx.ingress.kubernetes.io/proxy-buffering\n:\n \n\"on\"\n\n\n\n\n\n\nProxy buffer size\n\u00b6\n\n\nSets the size of the buffer \nproxy_buffer_size\n used for reading the first part of the response received from the proxied server.\nBy default proxy buffer size is set as \"4k\"\n\n\nTo configure this setting globally, set \nproxy-buffer-size\n in \nNGINX ConfigMap\n. To use custom values in an Ingress rule, define this annotation:\n\n\nnginx.ingress.kubernetes.io/proxy-buffer-size\n:\n \n\"8k\"\n\n\n\n\n\n\nSSL ciphers\n\u00b6\n\n\nSpecifies the \nenabled ciphers\n.\n\n\nUsing this annotation will set the \nssl_ciphers\n directive at the server level. This configuration is active for all the paths in the host.\n\n\nnginx.ingress.kubernetes.io/ssl-ciphers\n:\n \n\"ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP\"\n\n\n\n\n\n\nConnection proxy header\n\u00b6\n\n\nUsing this annotation will override the default connection header set by NGINX.\nTo use custom values in an Ingress rule, define the annotation:\n\n\nnginx.ingress.kubernetes.io/connection-proxy-header\n:\n \n\"keep-alive\"\n\n\n\n\n\n\nEnable Access Log\n\u00b6\n\n\nAccess logs are enabled by default, but in some scenarios access logs might be required to be disabled for a given\ningress. To do this, use the annotation:\n\n\nnginx.ingress.kubernetes.io/enable-access-log\n:\n \n\"false\"\n\n\n\n\n\n\nEnable Rewrite Log\n\u00b6\n\n\nRewrite logs are not enabled by default. In some scenarios it could be required to enable NGINX rewrite logs.\nNote that rewrite logs are sent to the error_log file at the notice level. To enable this feature use the annotation:\n\n\nnginx.ingress.kubernetes.io/enable-rewrite-log\n:\n \n\"true\"\n\n\n\n\n\n\nLua Resty WAF\n\u00b6\n\n\nUsing \nlua-resty-waf-*\n annotations we can enable and control the \nlua-resty-waf\n\nWeb Application Firewall per location.\n\n\nFollowing configuration will enable the WAF for the paths defined in the corresponding ingress:\n\n\nnginx.ingress.kubernetes.io/lua-resty-waf\n:\n \n\"active\"\n\n\n\n\n\n\nIn order to run it in debugging mode you can set \nnginx.ingress.kubernetes.io/lua-resty-waf-debug\n to \n\"true\"\n in addition to the above configuration.\nThe other possible values for \nnginx.ingress.kubernetes.io/lua-resty-waf\n are \ninactive\n and \nsimulate\n.\nIn \ninactive\n mode WAF won't do anything, whereas in \nsimulate\n mode it will log a warning message if there's a matching WAF rule for given request. This is useful to debug a rule and eliminate possible false positives before fully deploying it.\n\n\nlua-resty-waf\n comes with predefined set of rules \nhttps://github.com/p0pr0ck5/lua-resty-waf/tree/84b4f40362500dd0cb98b9e71b5875cb1a40f1ad/rules\n that covers ModSecurity CRS.\nYou can use \nnginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets\n to ignore a subset of those rulesets. For an example:\n\n\nnginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets\n:\n \n\"41000_sqli,\n \n42000_xss\"\n\n\n\n\n\n\nwill ignore the two mentioned rulesets.\n\n\nIt is also possible to configure custom WAF rules per ingress using the \nnginx.ingress.kubernetes.io/lua-resty-waf-extra-rules\n annotation. For an example the following snippet will configure a WAF rule to deny requests with query string value that contains word \nfoo\n:\n\n\nnginx.ingress.kubernetes.io/lua-resty-waf-extra-rules\n:\n \n'[=[\n \n{\n \n\"access\":\n \n[\n \n{\n \n\"actions\":\n \n{\n \n\"disrupt\"\n \n:\n \n\"DENY\"\n \n},\n \n\"id\":\n \n10001,\n \n\"msg\":\n \n\"my\n \ncustom\n \nrule\",\n \n\"operator\":\n \n\"STR_CONTAINS\",\n \n\"pattern\":\n \n\"foo\",\n \n\"vars\":\n \n[\n \n{\n \n\"parse\":\n \n[\n \n\"values\",\n \n1\n \n],\n \n\"type\":\n \n\"REQUEST_ARGS\"\n \n}\n \n]\n \n}\n \n],\n \n\"body_filter\":\n \n[],\n \n\"header_filter\":[]\n \n}\n \n]=]'\n\n\n\n\n\n\nFor details on how to write WAF rules, please refer to \nhttps://github.com/p0pr0ck5/lua-resty-waf\n.\n\n\ngRPC backend DEPRECATED (since 0.18.0)\n\u00b6\n\n\nPlease use \nnginx.ingress.kubernetes.io/backend-protocol: \"GRPC\"\n or \nnginx.ingress.kubernetes.io/backend-protocol: \"GRPCS\"\n\n\nSince NGINX 1.13.10 it is possible to expose \ngRPC services natively\n\n\nYou only need to add the annotation \nnginx.ingress.kubernetes.io/grpc-backend: \"true\"\n to enable this feature.\nAdditionally, if the gRPC service requires TLS, add \nnginx.ingress.kubernetes.io/secure-backends: \"true\"\n.\n\n\n\n\nAttention\n\n\nThis feature requires HTTP2 to work which means we need to expose this service using HTTPS.\nExposing a gRPC service using HTTP is not supported.\n\n\n\n\nInfluxDB\n\u00b6\n\n\nUsing \ninfluxdb-*\n annotations we can monitor requests passing through a Location by sending them to an InfluxDB backend exposing the UDP socket\nusing the \nnginx-influxdb-module\n.\n\n\nnginx.ingress.kubernetes.io/enable-influxdb\n:\n \n\"true\"\n\n\nnginx.ingress.kubernetes.io/influxdb-measurement\n:\n \n\"nginx-reqs\"\n\n\nnginx.ingress.kubernetes.io/influxdb-port\n:\n \n\"8089\"\n\n\nnginx.ingress.kubernetes.io/influxdb-host\n:\n \n\"127.0.0.1\"\n\n\nnginx.ingress.kubernetes.io/influxdb-server-name\n:\n \n\"nginx-ingress\"\n\n\n\n\n\n\nFor the \ninfluxdb-host\n parameter you have two options:\n\n\n\n\nUse an InfluxDB server configured with the \nUDP protocol\n enabled. \n\n\nDeploy Telegraf as a sidecar proxy to the Ingress controller configured to listen UDP with the \nsocket listener input\n and to write using\nanyone of the \noutputs plugins\n like InfluxDB, Apache Kafka,\nPrometheus, etc.. (recommended)\n\n\n\n\nIt's important to remember that there's no DNS resolver at this stage so you will have to configure\nan ip address to \nnginx.ingress.kubernetes.io/influxdb-host\n. If you deploy Influx or Telegraf as sidecar (another container in the same pod) this becomes straightforward since you can directly use \n127.0.0.1\n.\n\n\nBackend Protocol\n\u00b6\n\n\nUsing \nbackend-protocol\n annotations is possible to indicate how NGINX should communicate with the backend service.\nValid Values: HTTP, HTTPS, GRPC, GRPCS and AJP\n\n\nBy default NGINX uses \nHTTP\n.\n\n\nExample:\n\n\nnginx.ingress.kubernetes.io/backend-protocol\n:\n \n\"HTTPS\"", + "title": "Annotations" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#annotations", + "text": "You can add these Kubernetes annotations to specific Ingress objects to customize their behavior. Tip Annotation keys and values can only be strings.\nOther types, such as boolean or numeric values must be quoted,\ni.e. \"true\" , \"false\" , \"100\" . Note The annotation prefix can be changed using the --annotations-prefix command line argument ,\nbut the default is nginx.ingress.kubernetes.io , as described in the\ntable below. Name type nginx.ingress.kubernetes.io/add-base-url \"true\" or \"false\" nginx.ingress.kubernetes.io/app-root string nginx.ingress.kubernetes.io/affinity cookie nginx.ingress.kubernetes.io/auth-realm string nginx.ingress.kubernetes.io/auth-secret string nginx.ingress.kubernetes.io/auth-type basic or digest nginx.ingress.kubernetes.io/auth-tls-secret string nginx.ingress.kubernetes.io/auth-tls-verify-depth number nginx.ingress.kubernetes.io/auth-tls-verify-client string nginx.ingress.kubernetes.io/auth-tls-error-page string nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream \"true\" or \"false\" nginx.ingress.kubernetes.io/auth-url string nginx.ingress.kubernetes.io/backend-protocol string nginx.ingress.kubernetes.io/base-url-scheme string nginx.ingress.kubernetes.io/client-body-buffer-size string nginx.ingress.kubernetes.io/configuration-snippet string nginx.ingress.kubernetes.io/default-backend string nginx.ingress.kubernetes.io/enable-cors \"true\" or \"false\" nginx.ingress.kubernetes.io/cors-allow-origin string nginx.ingress.kubernetes.io/cors-allow-methods string nginx.ingress.kubernetes.io/cors-allow-headers string nginx.ingress.kubernetes.io/cors-allow-credentials \"true\" or \"false\" nginx.ingress.kubernetes.io/cors-max-age number nginx.ingress.kubernetes.io/force-ssl-redirect \"true\" or \"false\" nginx.ingress.kubernetes.io/from-to-www-redirect \"true\" or \"false\" nginx.ingress.kubernetes.io/grpc-backend \"true\" or \"false\" nginx.ingress.kubernetes.io/limit-connections number nginx.ingress.kubernetes.io/limit-rps number nginx.ingress.kubernetes.io/permanent-redirect string nginx.ingress.kubernetes.io/permanent-redirect-code number nginx.ingress.kubernetes.io/proxy-body-size string nginx.ingress.kubernetes.io/proxy-cookie-domain string nginx.ingress.kubernetes.io/proxy-connect-timeout number nginx.ingress.kubernetes.io/proxy-send-timeout number nginx.ingress.kubernetes.io/proxy-read-timeout number nginx.ingress.kubernetes.io/proxy-next-upstream string nginx.ingress.kubernetes.io/proxy-next-upstream-tries number nginx.ingress.kubernetes.io/proxy-request-buffering string nginx.ingress.kubernetes.io/proxy-redirect-from string nginx.ingress.kubernetes.io/proxy-redirect-to string nginx.ingress.kubernetes.io/rewrite-log URI nginx.ingress.kubernetes.io/rewrite-target URI nginx.ingress.kubernetes.io/secure-backends \"true\" or \"false\" nginx.ingress.kubernetes.io/secure-verify-ca-secret string nginx.ingress.kubernetes.io/server-alias string nginx.ingress.kubernetes.io/server-snippet string nginx.ingress.kubernetes.io/service-upstream \"true\" or \"false\" nginx.ingress.kubernetes.io/session-cookie-name string nginx.ingress.kubernetes.io/session-cookie-hash string nginx.ingress.kubernetes.io/ssl-redirect \"true\" or \"false\" nginx.ingress.kubernetes.io/ssl-passthrough \"true\" or \"false\" nginx.ingress.kubernetes.io/upstream-max-fails number nginx.ingress.kubernetes.io/upstream-fail-timeout number nginx.ingress.kubernetes.io/upstream-hash-by string nginx.ingress.kubernetes.io/load-balance string nginx.ingress.kubernetes.io/upstream-vhost string nginx.ingress.kubernetes.io/whitelist-source-range CIDR nginx.ingress.kubernetes.io/proxy-buffering string nginx.ingress.kubernetes.io/proxy-buffer-size string nginx.ingress.kubernetes.io/ssl-ciphers string nginx.ingress.kubernetes.io/connection-proxy-header string nginx.ingress.kubernetes.io/enable-access-log \"true\" or \"false\" nginx.ingress.kubernetes.io/lua-resty-waf string nginx.ingress.kubernetes.io/lua-resty-waf-debug \"true\" or \"false\" nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets string nginx.ingress.kubernetes.io/lua-resty-waf-extra-rules string nginx.ingress.kubernetes.io/enable-influxdb \"true\" or \"false\" nginx.ingress.kubernetes.io/influxdb-measurement string nginx.ingress.kubernetes.io/influxdb-port string nginx.ingress.kubernetes.io/influxdb-host string nginx.ingress.kubernetes.io/influxdb-server-name string", + "title": "Annotations" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#rewrite", + "text": "In some scenarios the exposed URL in the backend service differs from the specified path in the Ingress rule. Without a rewrite any request will return 404.\nSet the annotation nginx.ingress.kubernetes.io/rewrite-target to the path expected by the service. If the application contains relative links it is possible to add an additional annotation nginx.ingress.kubernetes.io/add-base-url that will prepend a base tag in the header of the returned HTML from the backend. If the scheme of base tag need to be specific, set the annotation nginx.ingress.kubernetes.io/base-url-scheme to the scheme such as http and https . If the Application Root is exposed in a different path and needs to be redirected, set the annotation nginx.ingress.kubernetes.io/app-root to redirect requests for / . Example Please check the rewrite example.", + "title": "Rewrite" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#session-affinity", + "text": "The annotation nginx.ingress.kubernetes.io/affinity enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server.\nThe only affinity type available for NGINX is cookie . Example Please check the affinity example.", + "title": "Session Affinity" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#cookie-affinity", + "text": "If you use the cookie affinity type you can also specify the name of the cookie that will be used to route the requests with the annotation nginx.ingress.kubernetes.io/session-cookie-name . The default is to create a cookie named 'INGRESSCOOKIE'. In case of NGINX the annotation nginx.ingress.kubernetes.io/session-cookie-hash defines which algorithm will be used to hash the used upstream. Default value is md5 and possible values are md5 , sha1 and index . Attention The index option is not an actual hash; an in-memory index is used instead, which has less overhead.\nHowever, with index , matching against a changing upstream server list is inconsistent.\nSo, at reload, if upstream servers have changed, index values are not guaranteed to correspond to the same server as before! Use index with caution and only if you need to! In NGINX this feature is implemented by the third party module nginx-sticky-module-ng . The workflow used to define which upstream server will be used is explained here", + "title": "Cookie affinity" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#authentication", + "text": "Is possible to add authentication adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords inside the key auth . The annotations are: nginx.ingress.kubernetes.io/auth-type: [basic|digest] Indicates the HTTP Authentication Type: Basic or Digest Access Authentication . nginx.ingress.kubernetes.io/auth-secret: secretName The name of the Secret that contains the usernames and passwords which are granted access to the path s defined in the Ingress rules.\nThis annotation also accepts the alternative form \"namespace/secretName\", in which case the Secret lookup is performed in the referenced namespace instead of the Ingress namespace. nginx.ingress.kubernetes.io/auth-realm: \"realm string\" Example Please check the auth example.", + "title": "Authentication" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#custom-nginx-upstream-checks", + "text": "NGINX exposes some flags in the upstream configuration that enable the configuration of each server in the upstream. The Ingress controller allows custom max_fails and fail_timeout parameters in a global context using upstream-max-fails and upstream-fail-timeout in the NGINX ConfigMap or in a particular Ingress rule. upstream-max-fails defaults to 0. This means NGINX will respect the container's readinessProbe if it is defined. If there is no probe and no values for upstream-max-fails NGINX will continue to send traffic to the container. Tip With the default configuration NGINX will not health check your backends. Whenever the endpoints controller notices a readiness probe failure, that pod's IP will be removed from the list of endpoints. This will trigger the NGINX controller to also remove it from the upstreams.** To use custom values in an Ingress rule define these annotations: nginx.ingress.kubernetes.io/upstream-max-fails : number of unsuccessful attempts to communicate with the server that should occur in the duration set by the upstream-fail-timeout parameter to consider the server unavailable. nginx.ingress.kubernetes.io/upstream-fail-timeout : time in seconds during which the specified number of unsuccessful attempts to communicate with the server should occur to consider the server unavailable. This is also the period of time the server will be considered unavailable. In NGINX, backend server pools are called \" upstreams \". Each upstream contains the endpoints for a service. An upstream is created for each service that has Ingress rules defined. Attention All Ingress rules using the same service will use the same upstream. \nOnly one of the Ingress rules should define annotations to configure the upstream servers. Example Please check the custom upstream check example.", + "title": "Custom NGINX upstream checks" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#custom-nginx-upstream-hashing", + "text": "NGINX supports load balancing by client-server mapping based on consistent hashing for a given key. The key can contain text, variables or any combination thereof. This feature allows for request stickiness other than client IP or cookies. The ketama consistent hashing method will be used which ensures only a few keys would be remapped to different servers on upstream group changes. To enable consistent hashing for a backend: nginx.ingress.kubernetes.io/upstream-hash-by : the nginx variable, text value or any combination thereof to use for consistent hashing. For example nginx.ingress.kubernetes.io/upstream-hash-by: \"$request_uri\" to consistently hash upstream requests by the current request URI.", + "title": "Custom NGINX upstream hashing" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#custom-nginx-load-balancing", + "text": "This is similar to (https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#load-balance) but configures load balancing algorithm per ingress. Note that nginx.ingress.kubernetes.io/upstream-hash-by takes preference over this. If this and nginx.ingress.kubernetes.io/upstream-hash-by are not set then we fallback to using globally configured load balancing algorithm.", + "title": "Custom NGINX load balancing" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#custom-nginx-upstream-vhost", + "text": "This configuration setting allows you to control the value for host in the following statement: proxy_set_header Host $host , which forms part of the location block. This is useful if you need to call the upstream server by something other than $host .", + "title": "Custom NGINX upstream vhost" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#client-certificate-authentication", + "text": "It is possible to enable Client Certificate Authentication using additional annotations in Ingress Rule. The annotations are: nginx.ingress.kubernetes.io/auth-tls-secret: secretName :\n The name of the Secret that contains the full Certificate Authority chain ca.crt that is enabled to authenticate against this Ingress.\n This annotation also accepts the alternative form \"namespace/secretName\", in which case the Secret lookup is performed in the referenced namespace instead of the Ingress namespace. nginx.ingress.kubernetes.io/auth-tls-verify-depth :\n The validation depth between the provided client certificate and the Certification Authority chain. nginx.ingress.kubernetes.io/auth-tls-verify-client :\n Enables verification of client certificates. nginx.ingress.kubernetes.io/auth-tls-error-page :\n The URL/Page that user should be redirected in case of a Certificate Authentication Error nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream :\n Indicates if the received certificates should be passed or not to the upstream server. By default this is disabled. Example Please check the client-certs example. Attention TLS with Client Authentication is not possible in Cloudflare and might result in unexpected behavior. Cloudflare only allows Authenticated Origin Pulls and is required to use their own certificate: https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/ Only Authenticated Origin Pulls are allowed and can be configured by following their tutorial: https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls", + "title": "Client Certificate Authentication" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#configuration-snippet", + "text": "Using this annotation you can add additional configuration to the NGINX location. For example: nginx.ingress.kubernetes.io/configuration-snippet : | \n more_set_headers \"Request-Id: $req_id\";", + "title": "Configuration snippet" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#default-backend", + "text": "The ingress controller requires a default backend .\nThis service handles the response when the service in the Ingress rule does not have endpoints.\nThis is a global configuration for the ingress controller. In some cases could be required to return a custom content or format. In this scenario we can use the annotation nginx.ingress.kubernetes.io/default-backend: to specify a custom default backend.", + "title": "Default Backend" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#enable-cors", + "text": "To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule,\nadd the annotation nginx.ingress.kubernetes.io/enable-cors: \"true\" .\nThis will add a section in the server location enabling this functionality. CORS can be controlled with the following annotations: nginx.ingress.kubernetes.io/cors-allow-methods \n controls which methods are accepted.\n This is a multi-valued field, separated by ',' and accepts only letters (upper and lower case).\n Example: nginx.ingress.kubernetes.io/cors-allow-methods: \"PUT, GET, POST, OPTIONS\" nginx.ingress.kubernetes.io/cors-allow-headers \n controls which headers are accepted.\n This is a multi-valued field, separated by ',' and accepts letters, numbers, _ and -.\n Example: nginx.ingress.kubernetes.io/cors-allow-headers: \"X-Forwarded-For, X-app123-XPTO\" nginx.ingress.kubernetes.io/cors-allow-origin \n controls what's the accepted Origin for CORS and defaults to '*'.\n This is a single field value, with the following format: http(s)://origin-site.com or http(s)://origin-site.com:port \n Example: nginx.ingress.kubernetes.io/cors-allow-origin: \"https://origin-site.com:4443\" nginx.ingress.kubernetes.io/cors-allow-credentials \n controls if credentials can be passed during CORS operations.\n Example: nginx.ingress.kubernetes.io/cors-allow-credentials: \"true\" nginx.ingress.kubernetes.io/cors-max-age \n controls how long preflight requests can be cached.\n Example: nginx.ingress.kubernetes.io/cors-max-age: 600 Note For more information please see https://enable-cors.org", + "title": "Enable CORS" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#server-alias", + "text": "To add Server Aliases to an Ingress rule add the annotation nginx.ingress.kubernetes.io/server-alias: \"\" .\nThis will create a server with the same configuration, but a different server_name as the provided host. Note A server-alias name cannot conflict with the hostname of an existing server. If it does the server-alias annotation will be ignored.\nIf a server-alias is created and later a new server with the same hostname is created,\nthe new server configuration will take place over the alias configuration. For more information please see the server_name documentation .", + "title": "Server Alias" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#server-snippet", + "text": "Using the annotation nginx.ingress.kubernetes.io/server-snippet it is possible to add custom configuration in the server configuration block. apiVersion : extensions/v1beta1 kind : Ingress metadata : \n annotations : \n nginx.ingress.kubernetes.io/server-snippet : | set $agentflag 0; if ($http_user_agent ~* \"(Mobile)\" ){ \n set $agentflag 1; } if ( $agentflag = 1 ) { \n return 301 https://m.example.com; } Attention This annotation can be used only once per host.", + "title": "Server snippet" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#client-body-buffer-size", + "text": "Sets buffer size for reading client request body per location. In case the request body is larger than the buffer,\nthe whole body or only its part is written to a temporary file. By default, buffer size is equal to two memory pages.\nThis is 8K on x86, other 32-bit platforms, and x86-64. It is usually 16K on other 64-bit platforms. This annotation is\napplied to each location provided in the ingress rule. Note The annotation value must be given in a format understood by Nginx. Example nginx.ingress.kubernetes.io/client-body-buffer-size: \"1000\" # 1000 bytes nginx.ingress.kubernetes.io/client-body-buffer-size: 1k # 1 kilobyte nginx.ingress.kubernetes.io/client-body-buffer-size: 1K # 1 kilobyte nginx.ingress.kubernetes.io/client-body-buffer-size: 1m # 1 megabyte nginx.ingress.kubernetes.io/client-body-buffer-size: 1M # 1 megabyte For more information please see http://nginx.org", + "title": "Client Body Buffer Size" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#external-authentication", + "text": "To use an existing service that provides authentication the Ingress rule can be annotated with nginx.ingress.kubernetes.io/auth-url to indicate the URL where the HTTP request should be sent. nginx.ingress.kubernetes.io/auth-url : \"URL to the authentication service\" Additionally it is possible to set: nginx.ingress.kubernetes.io/auth-method :\n to specify the HTTP method to use. nginx.ingress.kubernetes.io/auth-signin :\n to specify the location of the error page. nginx.ingress.kubernetes.io/auth-response-headers :\n to specify headers to pass to backend once authentication request completes. nginx.ingress.kubernetes.io/auth-request-redirect :\n to specify the X-Auth-Request-Redirect header value. Example Please check the external-auth example.", + "title": "External Authentication" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#rate-limiting", + "text": "These annotations define a limit on the connections that can be opened by a single client IP address.\nThis can be used to mitigate DDoS Attacks . nginx.ingress.kubernetes.io/limit-connections : number of concurrent connections allowed from a single IP address. nginx.ingress.kubernetes.io/limit-rps : number of connections that may be accepted from a given IP each second. nginx.ingress.kubernetes.io/limit-rpm : number of connections that may be accepted from a given IP each minute. nginx.ingress.kubernetes.io/limit-rate-after : sets the initial amount after which the further transmission of a response to a client will be rate limited. nginx.ingress.kubernetes.io/limit-rate : rate of request that accepted from a client each second. You can specify the client IP source ranges to be excluded from rate-limiting through the nginx.ingress.kubernetes.io/limit-whitelist annotation. The value is a comma separated list of CIDRs. If you specify multiple annotations in a single Ingress rule, limit-rpm , and then limit-rps takes precedence. The annotation nginx.ingress.kubernetes.io/limit-rate , nginx.ingress.kubernetes.io/limit-rate-after define a limit the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit. To configure this setting globally for all Ingress rules, the limit-rate-after and limit-rate value may be set in the NGINX ConfigMap . if you set the value in ingress annotation will cover global setting.", + "title": "Rate limiting" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#permanent-redirect", + "text": "This annotation allows to return a permanent redirect instead of sending data to the upstream. For example nginx.ingress.kubernetes.io/permanent-redirect: https://www.google.com would redirect everything to Google.", + "title": "Permanent Redirect" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#permanent-redirect-code", + "text": "This annotation allows you to modify the status code used for permanent redirects. For example nginx.ingress.kubernetes.io/permanent-redirect-code: '308' would return your permanent-redirect with a 308.", + "title": "Permanent Redirect Code" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#ssl-passthrough", + "text": "The annotation nginx.ingress.kubernetes.io/ssl-passthrough allows to configure TLS termination in the pod and not in NGINX. Attention Using the annotation nginx.ingress.kubernetes.io/ssl-passthrough invalidates all the other available annotations.\nThis is because SSL Passthrough works on level 4 of the OSI stack (TCP), not on the HTTP/HTTPS level. Attention The use of this annotation requires the flag --enable-ssl-passthrough (By default it is disabled).", + "title": "SSL Passthrough" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#secure-backends-deprecated-since-0180", + "text": "Please use nginx.ingress.kubernetes.io/backend-protocol: \"HTTPS\" By default NGINX uses plain HTTP to reach the services.\nAdding the annotation nginx.ingress.kubernetes.io/secure-backends: \"true\" in the Ingress rule changes the protocol to HTTPS.\nIf you want to validate the upstream against a specific certificate, you can create a secret with it and reference the secret with the annotation nginx.ingress.kubernetes.io/secure-verify-ca-secret . Attention Note that if an invalid or non-existent secret is given,\nthe ingress controller will ignore the secure-backends annotation.", + "title": "Secure backends DEPRECATED (since 0.18.0)" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#service-upstream", + "text": "By default the NGINX ingress controller uses a list of all endpoints (Pod IP/port) in the NGINX upstream configuration. The nginx.ingress.kubernetes.io/service-upstream annotation disables that behavior and instead uses a single upstream in NGINX, the service's Cluster IP and port. This can be desirable for things like zero-downtime deployments as it reduces the need to reload NGINX configuration when Pods come up and down. See issue #257 .", + "title": "Service Upstream" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#known-issues", + "text": "If the service-upstream annotation is specified the following things should be taken into consideration: Sticky Sessions will not work as only round-robin load balancing is supported. The proxy_next_upstream directive will not have any effect meaning on error the request will not be dispatched to another upstream.", + "title": "Known Issues" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#server-side-https-enforcement-through-redirect", + "text": "By default the controller redirects (308) to HTTPS if TLS is enabled for that ingress.\nIf you want to disable this behavior globally, you can use ssl-redirect: \"false\" in the NGINX config map . To configure this feature for specific ingress resources, you can use the nginx.ingress.kubernetes.io/ssl-redirect: \"false\" \nannotation in the particular resource. When using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a redirect to HTTPS\neven when there is no TLS certificate available.\nThis can be achieved by using the nginx.ingress.kubernetes.io/force-ssl-redirect: \"true\" annotation in the particular resource.", + "title": "Server-side HTTPS enforcement through redirect" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#redirect-fromto-www", + "text": "In some scenarios is required to redirect from www.domain.com to domain.com or vice versa.\nTo enable this feature use the annotation nginx.ingress.kubernetes.io/from-to-www-redirect: \"true\" Attention If at some point a new Ingress is created with a host equal to one of the options (like domain.com ) the annotation will be omitted.", + "title": "Redirect from/to www." + }, + { + "location": "/user-guide/nginx-configuration/annotations/#whitelist-source-range", + "text": "You can specify allowed client IP source ranges through the nginx.ingress.kubernetes.io/whitelist-source-range annotation.\nThe value is a comma separated list of CIDRs , e.g. 10.0.0.0/24,172.10.0.1 . To configure this setting globally for all Ingress rules, the whitelist-source-range value may be set in the NGINX ConfigMap . Note Adding an annotation to an Ingress rule overrides any global restriction.", + "title": "Whitelist source range" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#custom-timeouts", + "text": "Using the configuration configmap it is possible to set the default global timeout for connections to the upstream servers.\nIn some scenarios is required to have different values. To allow this we provide annotations that allows this customization: nginx.ingress.kubernetes.io/proxy-connect-timeout nginx.ingress.kubernetes.io/proxy-send-timeout nginx.ingress.kubernetes.io/proxy-read-timeout nginx.ingress.kubernetes.io/proxy-next-upstream nginx.ingress.kubernetes.io/proxy-next-upstream-tries nginx.ingress.kubernetes.io/proxy-request-buffering", + "title": "Custom timeouts" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#proxy-redirect", + "text": "With the annotations nginx.ingress.kubernetes.io/proxy-redirect-from and nginx.ingress.kubernetes.io/proxy-redirect-to it is possible to\nset the text that should be changed in the Location and Refresh header fields of a proxied server response (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect) Setting \"off\" or \"default\" in the annotation nginx.ingress.kubernetes.io/proxy-redirect-from disables nginx.ingress.kubernetes.io/proxy-redirect-to ,\notherwise, both annotations must be used in unison. Note that each annotation must be a string without spaces. By default the value of each annotation is \"off\".", + "title": "Proxy redirect" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#custom-max-body-size", + "text": "For NGINX, an 413 error will be returned to the client when the size in a request exceeds the maximum allowed size of the client request body. This size can be configured by the parameter client_max_body_size . To configure this setting globally for all Ingress rules, the proxy-body-size value may be set in the NGINX ConfigMap .\nTo use custom values in an Ingress rule define these annotation: nginx.ingress.kubernetes.io/proxy-body-size : 8m", + "title": "Custom max body size" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#proxy-cookie-domain", + "text": "Sets a text that should be changed in the domain attribute of the \"Set-Cookie\" header fields of a proxied server response. To configure this setting globally for all Ingress rules, the proxy-cookie-domain value may be set in the NGINX ConfigMap .", + "title": "Proxy cookie domain" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#proxy-buffering", + "text": "Enable or disable proxy buffering proxy_buffering .\nBy default proxy buffering is disabled in the NGINX config. To configure this setting globally for all Ingress rules, the proxy-buffering value may be set in the NGINX ConfigMap .\nTo use custom values in an Ingress rule define these annotation: nginx.ingress.kubernetes.io/proxy-buffering : \"on\"", + "title": "Proxy buffering" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#proxy-buffer-size", + "text": "Sets the size of the buffer proxy_buffer_size used for reading the first part of the response received from the proxied server.\nBy default proxy buffer size is set as \"4k\" To configure this setting globally, set proxy-buffer-size in NGINX ConfigMap . To use custom values in an Ingress rule, define this annotation: nginx.ingress.kubernetes.io/proxy-buffer-size : \"8k\"", + "title": "Proxy buffer size" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#ssl-ciphers", + "text": "Specifies the enabled ciphers . Using this annotation will set the ssl_ciphers directive at the server level. This configuration is active for all the paths in the host. nginx.ingress.kubernetes.io/ssl-ciphers : \"ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP\"", + "title": "SSL ciphers" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#connection-proxy-header", + "text": "Using this annotation will override the default connection header set by NGINX.\nTo use custom values in an Ingress rule, define the annotation: nginx.ingress.kubernetes.io/connection-proxy-header : \"keep-alive\"", + "title": "Connection proxy header" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#enable-access-log", + "text": "Access logs are enabled by default, but in some scenarios access logs might be required to be disabled for a given\ningress. To do this, use the annotation: nginx.ingress.kubernetes.io/enable-access-log : \"false\"", + "title": "Enable Access Log" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#enable-rewrite-log", + "text": "Rewrite logs are not enabled by default. In some scenarios it could be required to enable NGINX rewrite logs.\nNote that rewrite logs are sent to the error_log file at the notice level. To enable this feature use the annotation: nginx.ingress.kubernetes.io/enable-rewrite-log : \"true\"", + "title": "Enable Rewrite Log" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#lua-resty-waf", + "text": "Using lua-resty-waf-* annotations we can enable and control the lua-resty-waf \nWeb Application Firewall per location. Following configuration will enable the WAF for the paths defined in the corresponding ingress: nginx.ingress.kubernetes.io/lua-resty-waf : \"active\" In order to run it in debugging mode you can set nginx.ingress.kubernetes.io/lua-resty-waf-debug to \"true\" in addition to the above configuration.\nThe other possible values for nginx.ingress.kubernetes.io/lua-resty-waf are inactive and simulate .\nIn inactive mode WAF won't do anything, whereas in simulate mode it will log a warning message if there's a matching WAF rule for given request. This is useful to debug a rule and eliminate possible false positives before fully deploying it. lua-resty-waf comes with predefined set of rules https://github.com/p0pr0ck5/lua-resty-waf/tree/84b4f40362500dd0cb98b9e71b5875cb1a40f1ad/rules that covers ModSecurity CRS.\nYou can use nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets to ignore a subset of those rulesets. For an example: nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets : \"41000_sqli, 42000_xss\" will ignore the two mentioned rulesets. It is also possible to configure custom WAF rules per ingress using the nginx.ingress.kubernetes.io/lua-resty-waf-extra-rules annotation. For an example the following snippet will configure a WAF rule to deny requests with query string value that contains word foo : nginx.ingress.kubernetes.io/lua-resty-waf-extra-rules : '[=[ { \"access\": [ { \"actions\": { \"disrupt\" : \"DENY\" }, \"id\": 10001, \"msg\": \"my custom rule\", \"operator\": \"STR_CONTAINS\", \"pattern\": \"foo\", \"vars\": [ { \"parse\": [ \"values\", 1 ], \"type\": \"REQUEST_ARGS\" } ] } ], \"body_filter\": [], \"header_filter\":[] } ]=]' For details on how to write WAF rules, please refer to https://github.com/p0pr0ck5/lua-resty-waf .", + "title": "Lua Resty WAF" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#grpc-backend-deprecated-since-0180", + "text": "Please use nginx.ingress.kubernetes.io/backend-protocol: \"GRPC\" or nginx.ingress.kubernetes.io/backend-protocol: \"GRPCS\" Since NGINX 1.13.10 it is possible to expose gRPC services natively You only need to add the annotation nginx.ingress.kubernetes.io/grpc-backend: \"true\" to enable this feature.\nAdditionally, if the gRPC service requires TLS, add nginx.ingress.kubernetes.io/secure-backends: \"true\" . Attention This feature requires HTTP2 to work which means we need to expose this service using HTTPS.\nExposing a gRPC service using HTTP is not supported.", + "title": "gRPC backend DEPRECATED (since 0.18.0)" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#influxdb", + "text": "Using influxdb-* annotations we can monitor requests passing through a Location by sending them to an InfluxDB backend exposing the UDP socket\nusing the nginx-influxdb-module . nginx.ingress.kubernetes.io/enable-influxdb : \"true\" nginx.ingress.kubernetes.io/influxdb-measurement : \"nginx-reqs\" nginx.ingress.kubernetes.io/influxdb-port : \"8089\" nginx.ingress.kubernetes.io/influxdb-host : \"127.0.0.1\" nginx.ingress.kubernetes.io/influxdb-server-name : \"nginx-ingress\" For the influxdb-host parameter you have two options: Use an InfluxDB server configured with the UDP protocol enabled. Deploy Telegraf as a sidecar proxy to the Ingress controller configured to listen UDP with the socket listener input and to write using\nanyone of the outputs plugins like InfluxDB, Apache Kafka,\nPrometheus, etc.. (recommended) It's important to remember that there's no DNS resolver at this stage so you will have to configure\nan ip address to nginx.ingress.kubernetes.io/influxdb-host . If you deploy Influx or Telegraf as sidecar (another container in the same pod) this becomes straightforward since you can directly use 127.0.0.1 .", + "title": "InfluxDB" + }, + { + "location": "/user-guide/nginx-configuration/annotations/#backend-protocol", + "text": "Using backend-protocol annotations is possible to indicate how NGINX should communicate with the backend service.\nValid Values: HTTP, HTTPS, GRPC, GRPCS and AJP By default NGINX uses HTTP . Example: nginx.ingress.kubernetes.io/backend-protocol : \"HTTPS\"", + "title": "Backend Protocol" + }, + { + "location": "/user-guide/nginx-configuration/configmap/", + "text": "ConfigMaps\n\u00b6\n\n\nConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable.\n\n\nThe ConfigMap API resource stores configuration data as key-value pairs. The data provides the configurations for system\ncomponents for the nginx-controller. Before you can begin using a config-map it must be \ndeployed\n.\n\n\nIn order to overwrite nginx-controller configuration values as seen in \nconfig.go\n,\nyou can add key-value pairs to the data section of the config-map. For Example:\n\n\ndata\n:\n\n \nmap-hash-bucket-size\n:\n \n\"128\"\n\n \nssl-protocols\n:\n \nSSLv2\n\n\n\n\n\n\n\n\nImportant\n\n\nThe key and values in a ConfigMap can only be strings.\nThis means that we want a value with boolean values we need to quote the values, like \"true\" or \"false\".\nSame for numbers, like \"100\".\n\n\n\"Slice\" types (defined below as \n[]string\n or \n[]int\n can be provided as a comma-delimited string.\n\n\n\n\nConfiguration options\n\u00b6\n\n\nThe following table shows a configuration option's name, type, and the default value:\n\n\n\n\n\n\n\n\nname\n\n\ntype\n\n\ndefault\n\n\n\n\n\n\n\n\n\n\nadd-headers\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nallow-backend-server-header\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nhide-headers\n\n\nstring array\n\n\nempty\n\n\n\n\n\n\naccess-log-path\n\n\nstring\n\n\n\"/var/log/nginx/access.log\"\n\n\n\n\n\n\nerror-log-path\n\n\nstring\n\n\n\"/var/log/nginx/error.log\"\n\n\n\n\n\n\nenable-dynamic-tls-records\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nenable-modsecurity\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nenable-owasp-modsecurity-crs\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nclient-header-buffer-size\n\n\nstring\n\n\n\"1k\"\n\n\n\n\n\n\nclient-header-timeout\n\n\nint\n\n\n60\n\n\n\n\n\n\nclient-body-buffer-size\n\n\nstring\n\n\n\"8k\"\n\n\n\n\n\n\nclient-body-timeout\n\n\nint\n\n\n60\n\n\n\n\n\n\ndisable-access-log\n\n\nbool\n\n\nfalse\n\n\n\n\n\n\ndisable-ipv6\n\n\nbool\n\n\nfalse\n\n\n\n\n\n\ndisable-ipv6-dns\n\n\nbool\n\n\nfalse\n\n\n\n\n\n\nenable-underscores-in-headers\n\n\nbool\n\n\nfalse\n\n\n\n\n\n\nignore-invalid-headers\n\n\nbool\n\n\ntrue\n\n\n\n\n\n\nretry-non-idempotent\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nerror-log-level\n\n\nstring\n\n\n\"notice\"\n\n\n\n\n\n\nhttp2-max-field-size\n\n\nstring\n\n\n\"4k\"\n\n\n\n\n\n\nhttp2-max-header-size\n\n\nstring\n\n\n\"16k\"\n\n\n\n\n\n\nhsts\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nhsts-include-subdomains\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nhsts-max-age\n\n\nstring\n\n\n\"15724800\"\n\n\n\n\n\n\nhsts-preload\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nkeep-alive\n\n\nint\n\n\n75\n\n\n\n\n\n\nkeep-alive-requests\n\n\nint\n\n\n100\n\n\n\n\n\n\nlarge-client-header-buffers\n\n\nstring\n\n\n\"4 8k\"\n\n\n\n\n\n\nlog-format-escape-json\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nlog-format-upstream\n\n\nstring\n\n\n%v\n \n-\n \n[\n$the_real_ip\n]\n \n-\n \n$remote_user\n \n[\n$time_local\n]\n \n\"$request\"\n \n$status\n \n$body_bytes_sent\n \n\"$http_referer\"\n \n\"$http_user_agent\"\n \n$request_length\n \n$request_time\n \n[\n$proxy_upstream_name\n]\n \n$upstream_addr\n \n$upstream_response_length\n \n$upstream_response_time\n \n$upstream_status\n\n\n\n\n\n\nlog-format-stream\n\n\nstring\n\n\n[$time_local] $protocol $status $bytes_sent $bytes_received $session_time\n\n\n\n\n\n\nenable-multi-accept\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nmax-worker-connections\n\n\nint\n\n\n16384\n\n\n\n\n\n\nmap-hash-bucket-size\n\n\nint\n\n\n64\n\n\n\n\n\n\nnginx-status-ipv4-whitelist\n\n\n[]string\n\n\n\"127.0.0.1\"\n\n\n\n\n\n\nnginx-status-ipv6-whitelist\n\n\n[]string\n\n\n\"::1\"\n\n\n\n\n\n\nproxy-real-ip-cidr\n\n\n[]string\n\n\n\"0.0.0.0/0\"\n\n\n\n\n\n\nproxy-set-headers\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nserver-name-hash-max-size\n\n\nint\n\n\n1024\n\n\n\n\n\n\nserver-name-hash-bucket-size\n\n\nint\n\n\n\n\n\n\n\n\n\nproxy-headers-hash-max-size\n\n\nint\n\n\n512\n\n\n\n\n\n\nproxy-headers-hash-bucket-size\n\n\nint\n\n\n64\n\n\n\n\n\n\nreuse-port\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nserver-tokens\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nssl-ciphers\n\n\nstring\n\n\n\"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\"\n\n\n\n\n\n\nssl-ecdh-curve\n\n\nstring\n\n\n\"auto\"\n\n\n\n\n\n\nssl-dh-param\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nssl-protocols\n\n\nstring\n\n\n\"TLSv1.2\"\n\n\n\n\n\n\nssl-session-cache\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nssl-session-cache-size\n\n\nstring\n\n\n\"10m\"\n\n\n\n\n\n\nssl-session-tickets\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nssl-session-ticket-key\n\n\nstring\n\n\n\n\n\n\n\n\n\nssl-session-timeout\n\n\nstring\n\n\n\"10m\"\n\n\n\n\n\n\nssl-buffer-size\n\n\nstring\n\n\n\"4k\"\n\n\n\n\n\n\nuse-proxy-protocol\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nproxy-protocol-header-timeout\n\n\nstring\n\n\n\"5s\"\n\n\n\n\n\n\nuse-gzip\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nuse-geoip\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nenable-brotli\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nbrotli-level\n\n\nint\n\n\n4\n\n\n\n\n\n\nbrotli-types\n\n\nstring\n\n\n\"application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component\"\n\n\n\n\n\n\nuse-http2\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\ngzip-level\n\n\nint\n\n\n5\n\n\n\n\n\n\ngzip-types\n\n\nstring\n\n\n\"application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component\"\n\n\n\n\n\n\nworker-processes\n\n\nstring\n\n\n\n\n\n\n\n\n\nworker-cpu-affinity\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nworker-shutdown-timeout\n\n\nstring\n\n\n\"10s\"\n\n\n\n\n\n\nload-balance\n\n\nstring\n\n\n\"round_robin\"\n\n\n\n\n\n\nvariables-hash-bucket-size\n\n\nint\n\n\n128\n\n\n\n\n\n\nvariables-hash-max-size\n\n\nint\n\n\n2048\n\n\n\n\n\n\nupstream-keepalive-connections\n\n\nint\n\n\n32\n\n\n\n\n\n\nlimit-conn-zone-variable\n\n\nstring\n\n\n\"$binary_remote_addr\"\n\n\n\n\n\n\nproxy-stream-timeout\n\n\nstring\n\n\n\"600s\"\n\n\n\n\n\n\nproxy-stream-responses\n\n\nint\n\n\n1\n\n\n\n\n\n\nbind-address\n\n\n[]string\n\n\n\"\"\n\n\n\n\n\n\nuse-forwarded-headers\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nforwarded-for-header\n\n\nstring\n\n\n\"X-Forwarded-For\"\n\n\n\n\n\n\ncompute-full-forwarded-for\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nproxy-add-original-uri-header\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\ngenerate-request-id\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nenable-opentracing\n\n\nbool\n\n\n\"false\"\n\n\n\n\n\n\nzipkin-collector-host\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nzipkin-collector-port\n\n\nint\n\n\n9411\n\n\n\n\n\n\nzipkin-service-name\n\n\nstring\n\n\n\"nginx\"\n\n\n\n\n\n\nzipkin-sample-rate\n\n\nfloat\n\n\n1.0\n\n\n\n\n\n\njaeger-collector-host\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\njaeger-collector-port\n\n\nint\n\n\n6831\n\n\n\n\n\n\njaeger-service-name\n\n\nstring\n\n\n\"nginx\"\n\n\n\n\n\n\njaeger-sampler-type\n\n\nstring\n\n\n\"const\"\n\n\n\n\n\n\njaeger-sampler-param\n\n\nstring\n\n\n\"1\"\n\n\n\n\n\n\nmain-snippet\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nhttp-snippet\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nserver-snippet\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\nlocation-snippet\n\n\nstring\n\n\n\"\"\n\n\n\n\n\n\ncustom-http-errors\n\n\n[]int\n\n\n[]int{}\n\n\n\n\n\n\nproxy-body-size\n\n\nstring\n\n\n\"1m\"\n\n\n\n\n\n\nproxy-connect-timeout\n\n\nint\n\n\n5\n\n\n\n\n\n\nproxy-read-timeout\n\n\nint\n\n\n60\n\n\n\n\n\n\nproxy-send-timeout\n\n\nint\n\n\n60\n\n\n\n\n\n\nproxy-buffer-size\n\n\nstring\n\n\n\"4k\"\n\n\n\n\n\n\nproxy-cookie-path\n\n\nstring\n\n\n\"off\"\n\n\n\n\n\n\nproxy-cookie-domain\n\n\nstring\n\n\n\"off\"\n\n\n\n\n\n\nproxy-next-upstream\n\n\nstring\n\n\n\"error timeout\"\n\n\n\n\n\n\nproxy-next-upstream-tries\n\n\nint\n\n\n3\n\n\n\n\n\n\nproxy-redirect-from\n\n\nstring\n\n\n\"off\"\n\n\n\n\n\n\nproxy-request-buffering\n\n\nstring\n\n\n\"on\"\n\n\n\n\n\n\nssl-redirect\n\n\nbool\n\n\n\"true\"\n\n\n\n\n\n\nwhitelist-source-range\n\n\n[]string\n\n\n[]string{}\n\n\n\n\n\n\nskip-access-log-urls\n\n\n[]string\n\n\n[]string{}\n\n\n\n\n\n\nlimit-rate\n\n\nint\n\n\n0\n\n\n\n\n\n\nlimit-rate-after\n\n\nint\n\n\n0\n\n\n\n\n\n\nhttp-redirect-code\n\n\nint\n\n\n308\n\n\n\n\n\n\nproxy-buffering\n\n\nstring\n\n\n\"off\"\n\n\n\n\n\n\nlimit-req-status-code\n\n\nint\n\n\n503\n\n\n\n\n\n\nno-tls-redirect-locations\n\n\nstring\n\n\n\"/.well-known/acme-challenge\"\n\n\n\n\n\n\nno-auth-locations\n\n\nstring\n\n\n\"/.well-known/acme-challenge\"\n\n\n\n\n\n\n\n\nadd-headers\n\u00b6\n\n\nSets custom headers from named configmap before sending traffic to the client. See \nproxy-set-headers\n. \nexample\n\n\nallow-backend-server-header\n\u00b6\n\n\nEnables the return of the header Server from the backend instead of the generic nginx string. \ndefault:\n is disabled\n\n\nhide-headers\n\u00b6\n\n\nSets additional header that will not be passed from the upstream server to the client response.\n\ndefault:\n empty\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header\n\n\naccess-log-path\n\u00b6\n\n\nAccess log path. Goes to \n/var/log/nginx/access.log\n by default.\n\n\nNote:\n the file \n/var/log/nginx/access.log\n is a symlink to \n/dev/stdout\n\n\nerror-log-path\n\u00b6\n\n\nError log path. Goes to \n/var/log/nginx/error.log\n by default.\n\n\nNote:\n the file \n/var/log/nginx/error.log\n is a symlink to \n/dev/stderr\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/ngx_core_module.html#error_log\n\n\nenable-dynamic-tls-records\n\u00b6\n\n\nEnables dynamically sized TLS records to improve time-to-first-byte. \ndefault:\n is enabled\n\n\nReferences:\n\n\nhttps://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency\n\n\nenable-modsecurity\n\u00b6\n\n\nEnables the modsecurity module for NGINX. \ndefault:\n is disabled\n\n\nenable-owasp-modsecurity-crs\n\u00b6\n\n\nEnables the OWASP ModSecurity Core Rule Set (CRS). \ndefault:\n is disabled\n\n\nclient-header-buffer-size\n\u00b6\n\n\nAllows to configure a custom buffer size for reading client request header.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size\n\n\nclient-header-timeout\n\u00b6\n\n\nDefines a timeout for reading client request header, in seconds.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout\n\n\nclient-body-buffer-size\n\u00b6\n\n\nSets buffer size for reading client request body.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size\n\n\nclient-body-timeout\n\u00b6\n\n\nDefines a timeout for reading client request body, in seconds.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout\n\n\ndisable-access-log\n\u00b6\n\n\nDisables the Access Log from the entire Ingress Controller. \ndefault:\n '\"false\"'\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_log_module.html#access_log\n\n\ndisable-ipv6\n\u00b6\n\n\nDisable listening on IPV6. \ndefault:\n is disabled\n\n\ndisable-ipv6-dns\n\u00b6\n\n\nDisable IPV6 for nginx DNS resolver. \ndefault:\n is disabled\n\n\nenable-underscores-in-headers\n\u00b6\n\n\nEnables underscores in header names. \ndefault:\n is disabled\n\n\nignore-invalid-headers\n\u00b6\n\n\nSet if header fields with invalid names should be ignored.\n\ndefault:\n is enabled\n\n\nretry-non-idempotent\n\u00b6\n\n\nSince 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value \"true\".\n\n\nerror-log-level\n\u00b6\n\n\nConfigures the logging level of errors. Log levels above are listed in the order of increasing severity.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/ngx_core_module.html#error_log\n\n\nhttp2-max-field-size\n\u00b6\n\n\nLimits the maximum size of an HPACK-compressed request header field.\n\n\nReferences:\n\n\nhttps://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size\n\n\nhttp2-max-header-size\n\u00b6\n\n\nLimits the maximum size of the entire request header list after HPACK decompression.\n\n\nReferences:\n\n\nhttps://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size\n\n\nhsts\n\u00b6\n\n\nEnables or disables the header HSTS in servers running SSL.\nHTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. It provides protection against protocol downgrade attacks and cookie theft.\n\n\nReferences:\n\n\n\n\nhttps://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security\n\n\nhttps://blog.qualys.com/securitylabs/2016/03/28/the-importance-of-a-proper-http-strict-transport-security-implementation-on-your-web-server\n\n\n\n\nhsts-include-subdomains\n\u00b6\n\n\nEnables or disables the use of HSTS in all the subdomains of the server-name.\n\n\nhsts-max-age\n\u00b6\n\n\nSets the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS.\n\n\nhsts-preload\n\u00b6\n\n\nEnables or disables the preload attribute in the HSTS feature (when it is enabled) dd\n\n\nkeep-alive\n\u00b6\n\n\nSets the time during which a keep-alive client connection will stay open on the server side. The zero value disables keep-alive client connections.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout\n\n\nkeep-alive-requests\n\u00b6\n\n\nSets the maximum number of requests that can be served through one keep-alive connection.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests\n\n\nlarge-client-header-buffers\n\u00b6\n\n\nSets the maximum number and size of buffers used for reading large client request header. \ndefault:\n 4 8k\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers\n\n\nlog-format-escape-json\n\u00b6\n\n\nSets if the escape parameter allows JSON (\"true\") or default characters escaping in variables (\"false\") Sets the nginx \nlog format\n.\n\n\nlog-format-upstream\n\u00b6\n\n\nSets the nginx \nlog format\n.\nExample for json output:\n\n\nconsolelog-format-upstream: '{ \"time\": \"$time_iso8601\", \"remote_addr\": \"$proxy_protocol_addr\",\"x-forward-for\": \"$proxy_add_x_forwarded_for\", \"request_id\": \"$req_id\", \"remote_user\":\"$remote_user\", \"bytes_sent\": $bytes_sent, \"request_time\": $request_time, \"status\":$status, \"vhost\": \"$host\", \"request_proto\": \"$server_protocol\", \"path\": \"$uri\",\"request_query\": \"$args\", \"request_length\": $request_length, \"duration\": $request_time,\"method\": \"$request_method\", \"http_referrer\": \"$http_referer\", \"http_user_agent\":\"$http_user_agent\" }'\n\n\nPlease check the \nlog-format\n for definition of each field.\n\n\nlog-format-stream\n\u00b6\n\n\nSets the nginx \nstream format\n.\n\n\nenable-multi-accept\n\u00b6\n\n\nIf disabled, a worker process will accept one new connection at a time. Otherwise, a worker process will accept all new connections at a time.\n\ndefault:\n true\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/ngx_core_module.html#multi_accept\n\n\nmax-worker-connections\n\u00b6\n\n\nSets the maximum number of simultaneous connections that can be opened by each \nworker process\n\n\nmap-hash-bucket-size\n\u00b6\n\n\nSets the bucket size for the \nmap variables hash tables\n. The details of setting up hash tables are provided in a separate \ndocument\n.\n\n\nproxy-real-ip-cidr\n\u00b6\n\n\nIf use-proxy-protocol is enabled, proxy-real-ip-cidr defines the default the IP/network address of your external load balancer.\n\n\nproxy-set-headers\n\u00b6\n\n\nSets custom headers from named configmap before sending traffic to backends. The value format is namespace/name. See \nexample\n\n\nserver-name-hash-max-size\n\u00b6\n\n\nSets the maximum size of the \nserver names hash tables\n used in server names,map directive\u2019s values, MIME types, names of request header strings, etc.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/hash.html\n\n\nserver-name-hash-bucket-size\n\u00b6\n\n\nSets the size of the bucket for the server names hash tables.\n\n\nReferences:\n\n\n\n\nhttp://nginx.org/en/docs/hash.html\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size\n\n\n\n\nproxy-headers-hash-max-size\n\u00b6\n\n\nSets the maximum size of the proxy headers hash tables.\n\n\nReferences:\n\n\n\n\nhttp://nginx.org/en/docs/hash.html\n\n\nhttps://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size\n\n\n\n\nreuse-port\n\u00b6\n\n\nInstructs NGINX to create an individual listening socket for each worker process (using the SO_REUSEPORT socket option), allowing a kernel to distribute incoming connections between worker processes\n\ndefault:\n true\n\n\nproxy-headers-hash-bucket-size\n\u00b6\n\n\nSets the size of the bucket for the proxy headers hash tables.\n\n\nReferences:\n\n\n\n\nhttp://nginx.org/en/docs/hash.html\n\n\nhttps://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size\n\n\n\n\nserver-tokens\n\u00b6\n\n\nSend NGINX Server header in responses and display NGINX version in error pages. \ndefault:\n is enabled\n\n\nssl-ciphers\n\u00b6\n\n\nSets the \nciphers\n list to enable. The ciphers are specified in the format understood by the OpenSSL library.\n\n\nThe default cipher list is:\n \nECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\n.\n\n\nThe ordering of a ciphersuite is very important because it decides which algorithms are going to be selected in priority. The recommendation above prioritizes algorithms that provide perfect \nforward secrecy\n.\n\n\nPlease check the \nMozilla SSL Configuration Generator\n.\n\n\nssl-ecdh-curve\n\u00b6\n\n\nSpecifies a curve for ECDHE ciphers.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve\n\n\nssl-dh-param\n\u00b6\n\n\nSets the name of the secret that contains Diffie-Hellman key to help with \"Perfect Forward Secrecy\".\n\n\nReferences:\n\n\n\n\nhttps://wiki.openssl.org/index.php/Diffie-Hellman_parameters\n\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam\n\n\nhttp://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam\n\n\n\n\nssl-protocols\n\u00b6\n\n\nSets the \nSSL protocols\n to use. The default is: \nTLSv1.2\n.\n\n\nPlease check the result of the configuration using \nhttps://ssllabs.com/ssltest/analyze.html\n or \nhttps://testssl.sh\n.\n\n\nssl-session-cache\n\u00b6\n\n\nEnables or disables the use of shared \nSSL cache\n among worker processes.\n\n\nssl-session-cache-size\n\u00b6\n\n\nSets the size of the \nSSL shared session cache\n between all worker processes.\n\n\nssl-session-tickets\n\u00b6\n\n\nEnables or disables session resumption through \nTLS session tickets\n.\n\n\nssl-session-ticket-key\n\u00b6\n\n\nSets the secret key used to encrypt and decrypt TLS session tickets. The value must be a valid base64 string.\nTo create a ticket: \nopenssl rand 80 | openssl enc -A -base64\n\n\nTLS session ticket-key\n, by default, a randomly generated key is used. \n\n\nssl-session-timeout\n\u00b6\n\n\nSets the time during which a client may \nreuse the session\n parameters stored in a cache.\n\n\nssl-buffer-size\n\u00b6\n\n\nSets the size of the \nSSL buffer\n used for sending data. The default of 4k helps NGINX to improve TLS Time To First Byte (TTTFB).\n\n\nReferences:\n\n\nhttps://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/\n\n\nuse-proxy-protocol\n\u00b6\n\n\nEnables or disables the \nPROXY protocol\n to receive client connection (real IP address) information passed through proxy servers and load balancers such as HAProxy and Amazon Elastic Load Balancer (ELB).\n\n\nproxy-protocol-header-timeout\n\u00b6\n\n\nSets the timeout value for receiving the proxy-protocol headers. The default of 5 seconds prevents the TLS passthrough handler from waiting indefinitely on a dropped connection.\n\ndefault:\n 5s\n\n\nuse-gzip\n\u00b6\n\n\nEnables or disables compression of HTTP responses using the \n\"gzip\" module\n.\nThe default mime type list to compress is: \napplication/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component\n.\n\n\nuse-geoip\n\u00b6\n\n\nEnables or disables \n\"geoip\" module\n that creates variables with values depending on the client IP address, using the precompiled MaxMind databases.\n\ndefault:\n true\n\n\nenable-brotli\n\u00b6\n\n\nEnables or disables compression of HTTP responses using the \n\"brotli\" module\n.\nThe default mime type list to compress is: \napplication/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component\n. \ndefault:\n is disabled\n\n\n\n\nNote:\n Brotli does not works in Safari < 11. For more information see \nhttps://caniuse.com/#feat=brotli\n\n\n\n\nbrotli-level\n\u00b6\n\n\nSets the Brotli Compression Level that will be used. \ndefault:\n 4\n\n\nbrotli-types\n\u00b6\n\n\nSets the MIME Types that will be compressed on-the-fly by brotli.\n\ndefault:\n \napplication/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component\n\n\nuse-http2\n\u00b6\n\n\nEnables or disables \nHTTP/2\n support in secure connections.\n\n\ngzip-level\n\u00b6\n\n\nSets the gzip Compression Level that will be used. \ndefault:\n 5\n\n\ngzip-types\n\u00b6\n\n\nSets the MIME types in addition to \"text/html\" to compress. The special value \"*\" matches any MIME type. Responses with the \"text/html\" type are always compressed if \nuse-gzip\n is enabled.\n\n\nworker-processes\n\u00b6\n\n\nSets the number of \nworker processes\n.\nThe default of \"auto\" means number of available CPU cores.\n\n\nworker-cpu-affinity\n\u00b6\n\n\nBinds worker processes to the sets of CPUs. \nworker_cpu_affinity\n.\nBy default worker processes are not bound to any specific CPUs. The value can be:\n\n\n\n\n\"\": empty string indicate no affinity is applied.\n\n\ncpumask: e.g. \n0001 0010 0100 1000\n to bind processes to specific cpus.\n\n\nauto: binding worker processes automatically to available CPUs.\n\n\n\n\nworker-shutdown-timeout\n\u00b6\n\n\nSets a timeout for Nginx to \nwait for worker to gracefully shutdown\n. \ndefault:\n \"10s\"\n\n\nload-balance\n\u00b6\n\n\nSets the algorithm to use for load balancing.\nThe value can either be:\n\n\n\n\nround_robin: to use the default round robin loadbalancer\n\n\nleast_conn: to use the least connected method (\nnote\n that this is available only in non-dynamic mode: \n--enable-dynamic-configuration=false\n)\n\n\nip_hash: to use a hash of the server for routing (\nnote\n that this is available only in non-dynamic mode: \n--enable-dynamic-configuration=false\n, but alternatively you can consider using \nnginx.ingress.kubernetes.io/upstream-hash-by\n)\n\n\newma: to use the Peak EWMA method for routing (\nimplementation\n)\n\n\n\n\nThe default is \nround_robin\n.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/load_balancing.html\n\n\nvariables-hash-bucket-size\n\u00b6\n\n\nSets the bucket size for the variables hash table.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_bucket_size\n\n\nvariables-hash-max-size\n\u00b6\n\n\nSets the maximum size of the variables hash table.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size\n\n\nupstream-keepalive-connections\n\u00b6\n\n\nActivates the cache for connections to upstream servers. The connections parameter sets the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. When this\nnumber is exceeded, the least recently used connections are closed. \ndefault:\n 32\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive\n\n\nlimit-conn-zone-variable\n\u00b6\n\n\nSets parameters for a shared memory zone that will keep states for various keys of \nlimit_conn_zone\n. The default of \"$binary_remote_addr\" variable\u2019s size is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses.\n\n\nproxy-stream-timeout\n\u00b6\n\n\nSets the timeout between two successive read or write operations on client or proxied server connections. If no data is transmitted within this time, the connection is closed.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout\n\n\nproxy-stream-responses\n\u00b6\n\n\nSets the number of datagrams expected from the proxied server in response to the client request if the UDP protocol is used.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_responses\n\n\nbind-address\n\u00b6\n\n\nSets the addresses on which the server will accept requests instead of *. It should be noted that these addresses must exist in the runtime environment or the controller will crash loop.\n\n\nuse-forwarded-headers\n\u00b6\n\n\nIf true, NGINX passes the incoming \nX-Forwarded-*\n headers to upstreams. Use this option when NGINX is behind another L7 proxy / load balancer that is setting these headers.\n\n\nIf false, NGINX ignores incoming \nX-Forwarded-*\n headers, filling them with the request information it sees. Use this option if NGINX is exposed directly to the internet, or it's behind a L3/packet-based load balancer that doesn't alter the source IP in the packets.\n\n\nforwarded-for-header\n\u00b6\n\n\nSets the header field for identifying the originating IP address of a client. \ndefault:\n X-Forwarded-For\n\n\ncompute-full-forwarded-for\n\u00b6\n\n\nAppend the remote address to the X-Forwarded-For header instead of replacing it. When this option is enabled, the upstream application is responsible for extracting the client IP based on its own list of trusted proxies.\n\n\nproxy-add-original-uri-header\n\u00b6\n\n\nAdds an X-Original-Uri header with the original request URI to the backend request\n\n\ngenerate-request-id\n\u00b6\n\n\nEnsures that X-Request-ID is defaulted to a random value, if no X-Request-ID is present in the request\n\n\nenable-opentracing\n\u00b6\n\n\nEnables the nginx Opentracing extension. \ndefault:\n is disabled\n\n\nReferences:\n\n\nhttps://github.com/opentracing-contrib/nginx-opentracing\n\n\nzipkin-collector-host\n\u00b6\n\n\nSpecifies the host to use when uploading traces. It must be a valid URL.\n\n\nzipkin-collector-port\n\u00b6\n\n\nSpecifies the port to use when uploading traces. \ndefault:\n 9411\n\n\nzipkin-service-name\n\u00b6\n\n\nSpecifies the service name to use for any traces created. \ndefault:\n nginx\n\n\nzipkin-sample-rate\n\u00b6\n\n\nSpecifies sample rate for any traces created. \ndefault:\n 1.0\n\n\njaeger-collector-host\n\u00b6\n\n\nSpecifies the host to use when uploading traces. It must be a valid URL.\n\n\njaeger-collector-port\n\u00b6\n\n\nSpecifies the port to use when uploading traces. \ndefault:\n 6831\n\n\njaeger-service-name\n\u00b6\n\n\nSpecifies the service name to use for any traces created. \ndefault:\n nginx\n\n\njaeger-sampler-type\n\u00b6\n\n\nSpecifies the sampler to be used when sampling traces. The available samplers are: const, probabilistic, ratelimiting, remote. \ndefault:\n const\n\n\njaeger-sampler-param\n\u00b6\n\n\nSpecifies the argument to be passed to the sampler constructor. Must be a number.\nFor const this should be 0 to never sample and 1 to always sample. \ndefault:\n 1\n\n\nmain-snippet\n\u00b6\n\n\nAdds custom configuration to the main section of the nginx configuration.\n\n\nhttp-snippet\n\u00b6\n\n\nAdds custom configuration to the http section of the nginx configuration.\n\n\nserver-snippet\n\u00b6\n\n\nAdds custom configuration to all the servers in the nginx configuration.\n\n\nlocation-snippet\n\u00b6\n\n\nAdds custom configuration to all the locations in the nginx configuration.\n\n\ncustom-http-errors\n\u00b6\n\n\nEnables which HTTP codes should be passed for processing with the \nerror_page directive\n\n\nSetting at least one code also enables \nproxy_intercept_errors\n which are required to process error_page.\n\n\nExample usage: \ncustom-http-errors: 404,415\n\n\nproxy-body-size\n\u00b6\n\n\nSets the maximum allowed size of the client request body.\nSee NGINX \nclient_max_body_size\n.\n\n\nproxy-connect-timeout\n\u00b6\n\n\nSets the timeout for \nestablishing a connection with a proxied server\n. It should be noted that this timeout cannot usually exceed 75 seconds.\n\n\nproxy-read-timeout\n\u00b6\n\n\nSets the timeout in seconds for \nreading a response from the proxied server\n. The timeout is set only between two successive read operations, not for the transmission of the whole response.\n\n\nproxy-send-timeout\n\u00b6\n\n\nSets the timeout in seconds for \ntransmitting a request to the proxied server\n. The timeout is set only between two successive write operations, not for the transmission of the whole request.\n\n\nproxy-buffer-size\n\u00b6\n\n\nSets the size of the buffer used for \nreading the first part of the response\n received from the proxied server. This part usually contains a small response header.\n\n\nproxy-cookie-path\n\u00b6\n\n\nSets a text that \nshould be changed in the path attribute\n of the \u201cSet-Cookie\u201d header fields of a proxied server response.\n\n\nproxy-cookie-domain\n\u00b6\n\n\nSets a text that \nshould be changed in the domain attribute\n of the \u201cSet-Cookie\u201d header fields of a proxied server response.\n\n\nproxy-next-upstream\n\u00b6\n\n\nSpecifies in \nwhich cases\n a request should be passed to the next server.\n\n\nproxy-next-upstream-tries\n\u00b6\n\n\nLimit the number of \npossible tries\n a request should be passed to the next server.\n\n\nproxy-redirect-from\n\u00b6\n\n\nSets the original text that should be changed in the \"Location\" and \"Refresh\" header fields of a proxied server response. \ndefault:\n off\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect\n\n\nproxy-request-buffering\n\u00b6\n\n\nEnables or disables \nbuffering of a client request body\n.\n\n\nssl-redirect\n\u00b6\n\n\nSets the global value of redirects (301) to HTTPS if the server has a TLS certificate (defined in an Ingress rule).\n\ndefault:\n \"true\"\n\n\nwhitelist-source-range\n\u00b6\n\n\nSets the default whitelisted IPs for each \nserver\n block. This can be overwritten by an annotation on an Ingress rule.\nSee \nngx_http_access_module\n.\n\n\nskip-access-log-urls\n\u00b6\n\n\nSets a list of URLs that should not appear in the NGINX access log. This is useful with urls like \n/health\n or \nhealth-check\n that make \"complex\" reading the logs. \ndefault:\n is empty\n\n\nlimit-rate\n\u00b6\n\n\nLimits the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate\n\n\nlimit-rate-after\n\u00b6\n\n\nSets the initial amount after which the further transmission of a response to a client will be rate limited.\n\n\nReferences:\n\n\nhttp://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after\n\n\nhttp-redirect-code\n\u00b6\n\n\nSets the HTTP status code to be used in redirects.\nSupported codes are \n301\n,\n302\n,\n307\n and \n308\n\n\ndefault:\n 308\n\n\n\n\nWhy the default code is 308?\n\n\nRFC 7238\n was created to define the 308 (Permanent Redirect) status code that is similar to 301 (Moved Permanently) but it keeps the payload in the redirect. This is important if the we send a redirect in methods like POST.\n\n\n\n\nproxy-buffering\n\u00b6\n\n\nEnables or disables \nbuffering of responses from the proxied server\n.\n\n\nlimit-req-status-code\n\u00b6\n\n\nSets the \nstatus code to return in response to rejected requests\n. \ndefault:\n 503\n\n\nno-tls-redirect-locations\n\u00b6\n\n\nA comma-separated list of locations on which http requests will never get redirected to their https counterpart.\n\ndefault:\n \"/.well-known/acme-challenge\"\n\n\nno-auth-locations\n\u00b6\n\n\nA comma-separated list of locations that should not get authenticated.\n\ndefault:\n \"/.well-known/acme-challenge\"", + "title": "ConfigMaps" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#configmaps", + "text": "ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable. The ConfigMap API resource stores configuration data as key-value pairs. The data provides the configurations for system\ncomponents for the nginx-controller. Before you can begin using a config-map it must be deployed . In order to overwrite nginx-controller configuration values as seen in config.go ,\nyou can add key-value pairs to the data section of the config-map. For Example: data : \n map-hash-bucket-size : \"128\" \n ssl-protocols : SSLv2 Important The key and values in a ConfigMap can only be strings.\nThis means that we want a value with boolean values we need to quote the values, like \"true\" or \"false\".\nSame for numbers, like \"100\". \"Slice\" types (defined below as []string or []int can be provided as a comma-delimited string.", + "title": "ConfigMaps" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#configuration-options", + "text": "The following table shows a configuration option's name, type, and the default value: name type default add-headers string \"\" allow-backend-server-header bool \"false\" hide-headers string array empty access-log-path string \"/var/log/nginx/access.log\" error-log-path string \"/var/log/nginx/error.log\" enable-dynamic-tls-records bool \"true\" enable-modsecurity bool \"false\" enable-owasp-modsecurity-crs bool \"false\" client-header-buffer-size string \"1k\" client-header-timeout int 60 client-body-buffer-size string \"8k\" client-body-timeout int 60 disable-access-log bool false disable-ipv6 bool false disable-ipv6-dns bool false enable-underscores-in-headers bool false ignore-invalid-headers bool true retry-non-idempotent bool \"false\" error-log-level string \"notice\" http2-max-field-size string \"4k\" http2-max-header-size string \"16k\" hsts bool \"true\" hsts-include-subdomains bool \"true\" hsts-max-age string \"15724800\" hsts-preload bool \"false\" keep-alive int 75 keep-alive-requests int 100 large-client-header-buffers string \"4 8k\" log-format-escape-json bool \"false\" log-format-upstream string %v - [ $the_real_ip ] - $remote_user [ $time_local ] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" $request_length $request_time [ $proxy_upstream_name ] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status log-format-stream string [$time_local] $protocol $status $bytes_sent $bytes_received $session_time enable-multi-accept bool \"true\" max-worker-connections int 16384 map-hash-bucket-size int 64 nginx-status-ipv4-whitelist []string \"127.0.0.1\" nginx-status-ipv6-whitelist []string \"::1\" proxy-real-ip-cidr []string \"0.0.0.0/0\" proxy-set-headers string \"\" server-name-hash-max-size int 1024 server-name-hash-bucket-size int proxy-headers-hash-max-size int 512 proxy-headers-hash-bucket-size int 64 reuse-port bool \"true\" server-tokens bool \"true\" ssl-ciphers string \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\" ssl-ecdh-curve string \"auto\" ssl-dh-param string \"\" ssl-protocols string \"TLSv1.2\" ssl-session-cache bool \"true\" ssl-session-cache-size string \"10m\" ssl-session-tickets bool \"true\" ssl-session-ticket-key string ssl-session-timeout string \"10m\" ssl-buffer-size string \"4k\" use-proxy-protocol bool \"false\" proxy-protocol-header-timeout string \"5s\" use-gzip bool \"true\" use-geoip bool \"true\" enable-brotli bool \"false\" brotli-level int 4 brotli-types string \"application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component\" use-http2 bool \"true\" gzip-level int 5 gzip-types string \"application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component\" worker-processes string worker-cpu-affinity string \"\" worker-shutdown-timeout string \"10s\" load-balance string \"round_robin\" variables-hash-bucket-size int 128 variables-hash-max-size int 2048 upstream-keepalive-connections int 32 limit-conn-zone-variable string \"$binary_remote_addr\" proxy-stream-timeout string \"600s\" proxy-stream-responses int 1 bind-address []string \"\" use-forwarded-headers bool \"true\" forwarded-for-header string \"X-Forwarded-For\" compute-full-forwarded-for bool \"false\" proxy-add-original-uri-header bool \"true\" generate-request-id bool \"true\" enable-opentracing bool \"false\" zipkin-collector-host string \"\" zipkin-collector-port int 9411 zipkin-service-name string \"nginx\" zipkin-sample-rate float 1.0 jaeger-collector-host string \"\" jaeger-collector-port int 6831 jaeger-service-name string \"nginx\" jaeger-sampler-type string \"const\" jaeger-sampler-param string \"1\" main-snippet string \"\" http-snippet string \"\" server-snippet string \"\" location-snippet string \"\" custom-http-errors []int []int{} proxy-body-size string \"1m\" proxy-connect-timeout int 5 proxy-read-timeout int 60 proxy-send-timeout int 60 proxy-buffer-size string \"4k\" proxy-cookie-path string \"off\" proxy-cookie-domain string \"off\" proxy-next-upstream string \"error timeout\" proxy-next-upstream-tries int 3 proxy-redirect-from string \"off\" proxy-request-buffering string \"on\" ssl-redirect bool \"true\" whitelist-source-range []string []string{} skip-access-log-urls []string []string{} limit-rate int 0 limit-rate-after int 0 http-redirect-code int 308 proxy-buffering string \"off\" limit-req-status-code int 503 no-tls-redirect-locations string \"/.well-known/acme-challenge\" no-auth-locations string \"/.well-known/acme-challenge\"", + "title": "Configuration options" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#add-headers", + "text": "Sets custom headers from named configmap before sending traffic to the client. See proxy-set-headers . example", + "title": "add-headers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#allow-backend-server-header", + "text": "Enables the return of the header Server from the backend instead of the generic nginx string. default: is disabled", + "title": "allow-backend-server-header" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#hide-headers", + "text": "Sets additional header that will not be passed from the upstream server to the client response. default: empty References: http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header", + "title": "hide-headers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#access-log-path", + "text": "Access log path. Goes to /var/log/nginx/access.log by default. Note: the file /var/log/nginx/access.log is a symlink to /dev/stdout", + "title": "access-log-path" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#error-log-path", + "text": "Error log path. Goes to /var/log/nginx/error.log by default. Note: the file /var/log/nginx/error.log is a symlink to /dev/stderr References: http://nginx.org/en/docs/ngx_core_module.html#error_log", + "title": "error-log-path" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#enable-dynamic-tls-records", + "text": "Enables dynamically sized TLS records to improve time-to-first-byte. default: is enabled References: https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency", + "title": "enable-dynamic-tls-records" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#enable-modsecurity", + "text": "Enables the modsecurity module for NGINX. default: is disabled", + "title": "enable-modsecurity" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#enable-owasp-modsecurity-crs", + "text": "Enables the OWASP ModSecurity Core Rule Set (CRS). default: is disabled", + "title": "enable-owasp-modsecurity-crs" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#client-header-buffer-size", + "text": "Allows to configure a custom buffer size for reading client request header. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size", + "title": "client-header-buffer-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#client-header-timeout", + "text": "Defines a timeout for reading client request header, in seconds. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout", + "title": "client-header-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#client-body-buffer-size", + "text": "Sets buffer size for reading client request body. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size", + "title": "client-body-buffer-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#client-body-timeout", + "text": "Defines a timeout for reading client request body, in seconds. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout", + "title": "client-body-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#disable-access-log", + "text": "Disables the Access Log from the entire Ingress Controller. default: '\"false\"' References: http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log", + "title": "disable-access-log" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#disable-ipv6", + "text": "Disable listening on IPV6. default: is disabled", + "title": "disable-ipv6" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#disable-ipv6-dns", + "text": "Disable IPV6 for nginx DNS resolver. default: is disabled", + "title": "disable-ipv6-dns" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#enable-underscores-in-headers", + "text": "Enables underscores in header names. default: is disabled", + "title": "enable-underscores-in-headers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ignore-invalid-headers", + "text": "Set if header fields with invalid names should be ignored. default: is enabled", + "title": "ignore-invalid-headers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#retry-non-idempotent", + "text": "Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value \"true\".", + "title": "retry-non-idempotent" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#error-log-level", + "text": "Configures the logging level of errors. Log levels above are listed in the order of increasing severity. References: http://nginx.org/en/docs/ngx_core_module.html#error_log", + "title": "error-log-level" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#http2-max-field-size", + "text": "Limits the maximum size of an HPACK-compressed request header field. References: https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size", + "title": "http2-max-field-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#http2-max-header-size", + "text": "Limits the maximum size of the entire request header list after HPACK decompression. References: https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size", + "title": "http2-max-header-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#hsts", + "text": "Enables or disables the header HSTS in servers running SSL.\nHTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. It provides protection against protocol downgrade attacks and cookie theft. References: https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security https://blog.qualys.com/securitylabs/2016/03/28/the-importance-of-a-proper-http-strict-transport-security-implementation-on-your-web-server", + "title": "hsts" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#hsts-include-subdomains", + "text": "Enables or disables the use of HSTS in all the subdomains of the server-name.", + "title": "hsts-include-subdomains" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#hsts-max-age", + "text": "Sets the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS.", + "title": "hsts-max-age" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#hsts-preload", + "text": "Enables or disables the preload attribute in the HSTS feature (when it is enabled) dd", + "title": "hsts-preload" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#keep-alive", + "text": "Sets the time during which a keep-alive client connection will stay open on the server side. The zero value disables keep-alive client connections. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout", + "title": "keep-alive" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#keep-alive-requests", + "text": "Sets the maximum number of requests that can be served through one keep-alive connection. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests", + "title": "keep-alive-requests" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#large-client-header-buffers", + "text": "Sets the maximum number and size of buffers used for reading large client request header. default: 4 8k References: http://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers", + "title": "large-client-header-buffers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#log-format-escape-json", + "text": "Sets if the escape parameter allows JSON (\"true\") or default characters escaping in variables (\"false\") Sets the nginx log format .", + "title": "log-format-escape-json" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#log-format-upstream", + "text": "Sets the nginx log format .\nExample for json output: consolelog-format-upstream: '{ \"time\": \"$time_iso8601\", \"remote_addr\": \"$proxy_protocol_addr\",\"x-forward-for\": \"$proxy_add_x_forwarded_for\", \"request_id\": \"$req_id\", \"remote_user\":\"$remote_user\", \"bytes_sent\": $bytes_sent, \"request_time\": $request_time, \"status\":$status, \"vhost\": \"$host\", \"request_proto\": \"$server_protocol\", \"path\": \"$uri\",\"request_query\": \"$args\", \"request_length\": $request_length, \"duration\": $request_time,\"method\": \"$request_method\", \"http_referrer\": \"$http_referer\", \"http_user_agent\":\"$http_user_agent\" }' Please check the log-format for definition of each field.", + "title": "log-format-upstream" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#log-format-stream", + "text": "Sets the nginx stream format .", + "title": "log-format-stream" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#enable-multi-accept", + "text": "If disabled, a worker process will accept one new connection at a time. Otherwise, a worker process will accept all new connections at a time. default: true References: http://nginx.org/en/docs/ngx_core_module.html#multi_accept", + "title": "enable-multi-accept" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#max-worker-connections", + "text": "Sets the maximum number of simultaneous connections that can be opened by each worker process", + "title": "max-worker-connections" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#map-hash-bucket-size", + "text": "Sets the bucket size for the map variables hash tables . The details of setting up hash tables are provided in a separate document .", + "title": "map-hash-bucket-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-real-ip-cidr", + "text": "If use-proxy-protocol is enabled, proxy-real-ip-cidr defines the default the IP/network address of your external load balancer.", + "title": "proxy-real-ip-cidr" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-set-headers", + "text": "Sets custom headers from named configmap before sending traffic to backends. The value format is namespace/name. See example", + "title": "proxy-set-headers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#server-name-hash-max-size", + "text": "Sets the maximum size of the server names hash tables used in server names,map directive\u2019s values, MIME types, names of request header strings, etc. References: http://nginx.org/en/docs/hash.html", + "title": "server-name-hash-max-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#server-name-hash-bucket-size", + "text": "Sets the size of the bucket for the server names hash tables. References: http://nginx.org/en/docs/hash.html http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size", + "title": "server-name-hash-bucket-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-headers-hash-max-size", + "text": "Sets the maximum size of the proxy headers hash tables. References: http://nginx.org/en/docs/hash.html https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size", + "title": "proxy-headers-hash-max-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#reuse-port", + "text": "Instructs NGINX to create an individual listening socket for each worker process (using the SO_REUSEPORT socket option), allowing a kernel to distribute incoming connections between worker processes default: true", + "title": "reuse-port" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-headers-hash-bucket-size", + "text": "Sets the size of the bucket for the proxy headers hash tables. References: http://nginx.org/en/docs/hash.html https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size", + "title": "proxy-headers-hash-bucket-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#server-tokens", + "text": "Send NGINX Server header in responses and display NGINX version in error pages. default: is enabled", + "title": "server-tokens" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-ciphers", + "text": "Sets the ciphers list to enable. The ciphers are specified in the format understood by the OpenSSL library. The default cipher list is:\n ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256 . The ordering of a ciphersuite is very important because it decides which algorithms are going to be selected in priority. The recommendation above prioritizes algorithms that provide perfect forward secrecy . Please check the Mozilla SSL Configuration Generator .", + "title": "ssl-ciphers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-ecdh-curve", + "text": "Specifies a curve for ECDHE ciphers. References: http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve", + "title": "ssl-ecdh-curve" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-dh-param", + "text": "Sets the name of the secret that contains Diffie-Hellman key to help with \"Perfect Forward Secrecy\". References: https://wiki.openssl.org/index.php/Diffie-Hellman_parameters https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam", + "title": "ssl-dh-param" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-protocols", + "text": "Sets the SSL protocols to use. The default is: TLSv1.2 . Please check the result of the configuration using https://ssllabs.com/ssltest/analyze.html or https://testssl.sh .", + "title": "ssl-protocols" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-session-cache", + "text": "Enables or disables the use of shared SSL cache among worker processes.", + "title": "ssl-session-cache" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-session-cache-size", + "text": "Sets the size of the SSL shared session cache between all worker processes.", + "title": "ssl-session-cache-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-session-tickets", + "text": "Enables or disables session resumption through TLS session tickets .", + "title": "ssl-session-tickets" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-session-ticket-key", + "text": "Sets the secret key used to encrypt and decrypt TLS session tickets. The value must be a valid base64 string.\nTo create a ticket: openssl rand 80 | openssl enc -A -base64 TLS session ticket-key , by default, a randomly generated key is used.", + "title": "ssl-session-ticket-key" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-session-timeout", + "text": "Sets the time during which a client may reuse the session parameters stored in a cache.", + "title": "ssl-session-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-buffer-size", + "text": "Sets the size of the SSL buffer used for sending data. The default of 4k helps NGINX to improve TLS Time To First Byte (TTTFB). References: https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/", + "title": "ssl-buffer-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#use-proxy-protocol", + "text": "Enables or disables the PROXY protocol to receive client connection (real IP address) information passed through proxy servers and load balancers such as HAProxy and Amazon Elastic Load Balancer (ELB).", + "title": "use-proxy-protocol" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-protocol-header-timeout", + "text": "Sets the timeout value for receiving the proxy-protocol headers. The default of 5 seconds prevents the TLS passthrough handler from waiting indefinitely on a dropped connection. default: 5s", + "title": "proxy-protocol-header-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#use-gzip", + "text": "Enables or disables compression of HTTP responses using the \"gzip\" module .\nThe default mime type list to compress is: application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component .", + "title": "use-gzip" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#use-geoip", + "text": "Enables or disables \"geoip\" module that creates variables with values depending on the client IP address, using the precompiled MaxMind databases. default: true", + "title": "use-geoip" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#enable-brotli", + "text": "Enables or disables compression of HTTP responses using the \"brotli\" module .\nThe default mime type list to compress is: application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component . default: is disabled Note: Brotli does not works in Safari < 11. For more information see https://caniuse.com/#feat=brotli", + "title": "enable-brotli" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#brotli-level", + "text": "Sets the Brotli Compression Level that will be used. default: 4", + "title": "brotli-level" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#brotli-types", + "text": "Sets the MIME Types that will be compressed on-the-fly by brotli. default: application/xml+rss application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component", + "title": "brotli-types" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#use-http2", + "text": "Enables or disables HTTP/2 support in secure connections.", + "title": "use-http2" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#gzip-level", + "text": "Sets the gzip Compression Level that will be used. default: 5", + "title": "gzip-level" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#gzip-types", + "text": "Sets the MIME types in addition to \"text/html\" to compress. The special value \"*\" matches any MIME type. Responses with the \"text/html\" type are always compressed if use-gzip is enabled.", + "title": "gzip-types" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#worker-processes", + "text": "Sets the number of worker processes .\nThe default of \"auto\" means number of available CPU cores.", + "title": "worker-processes" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#worker-cpu-affinity", + "text": "Binds worker processes to the sets of CPUs. worker_cpu_affinity .\nBy default worker processes are not bound to any specific CPUs. The value can be: \"\": empty string indicate no affinity is applied. cpumask: e.g. 0001 0010 0100 1000 to bind processes to specific cpus. auto: binding worker processes automatically to available CPUs.", + "title": "worker-cpu-affinity" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#worker-shutdown-timeout", + "text": "Sets a timeout for Nginx to wait for worker to gracefully shutdown . default: \"10s\"", + "title": "worker-shutdown-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#load-balance", + "text": "Sets the algorithm to use for load balancing.\nThe value can either be: round_robin: to use the default round robin loadbalancer least_conn: to use the least connected method ( note that this is available only in non-dynamic mode: --enable-dynamic-configuration=false ) ip_hash: to use a hash of the server for routing ( note that this is available only in non-dynamic mode: --enable-dynamic-configuration=false , but alternatively you can consider using nginx.ingress.kubernetes.io/upstream-hash-by ) ewma: to use the Peak EWMA method for routing ( implementation ) The default is round_robin . References: http://nginx.org/en/docs/http/load_balancing.html", + "title": "load-balance" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#variables-hash-bucket-size", + "text": "Sets the bucket size for the variables hash table. References: http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_bucket_size", + "title": "variables-hash-bucket-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#variables-hash-max-size", + "text": "Sets the maximum size of the variables hash table. References: http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size", + "title": "variables-hash-max-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#upstream-keepalive-connections", + "text": "Activates the cache for connections to upstream servers. The connections parameter sets the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. When this\nnumber is exceeded, the least recently used connections are closed. default: 32 References: http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive", + "title": "upstream-keepalive-connections" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#limit-conn-zone-variable", + "text": "Sets parameters for a shared memory zone that will keep states for various keys of limit_conn_zone . The default of \"$binary_remote_addr\" variable\u2019s size is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses.", + "title": "limit-conn-zone-variable" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-stream-timeout", + "text": "Sets the timeout between two successive read or write operations on client or proxied server connections. If no data is transmitted within this time, the connection is closed. References: http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout", + "title": "proxy-stream-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-stream-responses", + "text": "Sets the number of datagrams expected from the proxied server in response to the client request if the UDP protocol is used. References: http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_responses", + "title": "proxy-stream-responses" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#bind-address", + "text": "Sets the addresses on which the server will accept requests instead of *. It should be noted that these addresses must exist in the runtime environment or the controller will crash loop.", + "title": "bind-address" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#use-forwarded-headers", + "text": "If true, NGINX passes the incoming X-Forwarded-* headers to upstreams. Use this option when NGINX is behind another L7 proxy / load balancer that is setting these headers. If false, NGINX ignores incoming X-Forwarded-* headers, filling them with the request information it sees. Use this option if NGINX is exposed directly to the internet, or it's behind a L3/packet-based load balancer that doesn't alter the source IP in the packets.", + "title": "use-forwarded-headers" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#forwarded-for-header", + "text": "Sets the header field for identifying the originating IP address of a client. default: X-Forwarded-For", + "title": "forwarded-for-header" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#compute-full-forwarded-for", + "text": "Append the remote address to the X-Forwarded-For header instead of replacing it. When this option is enabled, the upstream application is responsible for extracting the client IP based on its own list of trusted proxies.", + "title": "compute-full-forwarded-for" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-add-original-uri-header", + "text": "Adds an X-Original-Uri header with the original request URI to the backend request", + "title": "proxy-add-original-uri-header" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#generate-request-id", + "text": "Ensures that X-Request-ID is defaulted to a random value, if no X-Request-ID is present in the request", + "title": "generate-request-id" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#enable-opentracing", + "text": "Enables the nginx Opentracing extension. default: is disabled References: https://github.com/opentracing-contrib/nginx-opentracing", + "title": "enable-opentracing" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#zipkin-collector-host", + "text": "Specifies the host to use when uploading traces. It must be a valid URL.", + "title": "zipkin-collector-host" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#zipkin-collector-port", + "text": "Specifies the port to use when uploading traces. default: 9411", + "title": "zipkin-collector-port" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#zipkin-service-name", + "text": "Specifies the service name to use for any traces created. default: nginx", + "title": "zipkin-service-name" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#zipkin-sample-rate", + "text": "Specifies sample rate for any traces created. default: 1.0", + "title": "zipkin-sample-rate" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#jaeger-collector-host", + "text": "Specifies the host to use when uploading traces. It must be a valid URL.", + "title": "jaeger-collector-host" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#jaeger-collector-port", + "text": "Specifies the port to use when uploading traces. default: 6831", + "title": "jaeger-collector-port" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#jaeger-service-name", + "text": "Specifies the service name to use for any traces created. default: nginx", + "title": "jaeger-service-name" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#jaeger-sampler-type", + "text": "Specifies the sampler to be used when sampling traces. The available samplers are: const, probabilistic, ratelimiting, remote. default: const", + "title": "jaeger-sampler-type" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#jaeger-sampler-param", + "text": "Specifies the argument to be passed to the sampler constructor. Must be a number.\nFor const this should be 0 to never sample and 1 to always sample. default: 1", + "title": "jaeger-sampler-param" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#main-snippet", + "text": "Adds custom configuration to the main section of the nginx configuration.", + "title": "main-snippet" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#http-snippet", + "text": "Adds custom configuration to the http section of the nginx configuration.", + "title": "http-snippet" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#server-snippet", + "text": "Adds custom configuration to all the servers in the nginx configuration.", + "title": "server-snippet" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#location-snippet", + "text": "Adds custom configuration to all the locations in the nginx configuration.", + "title": "location-snippet" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#custom-http-errors", + "text": "Enables which HTTP codes should be passed for processing with the error_page directive Setting at least one code also enables proxy_intercept_errors which are required to process error_page. Example usage: custom-http-errors: 404,415", + "title": "custom-http-errors" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-body-size", + "text": "Sets the maximum allowed size of the client request body.\nSee NGINX client_max_body_size .", + "title": "proxy-body-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-connect-timeout", + "text": "Sets the timeout for establishing a connection with a proxied server . It should be noted that this timeout cannot usually exceed 75 seconds.", + "title": "proxy-connect-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-read-timeout", + "text": "Sets the timeout in seconds for reading a response from the proxied server . The timeout is set only between two successive read operations, not for the transmission of the whole response.", + "title": "proxy-read-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-send-timeout", + "text": "Sets the timeout in seconds for transmitting a request to the proxied server . The timeout is set only between two successive write operations, not for the transmission of the whole request.", + "title": "proxy-send-timeout" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-buffer-size", + "text": "Sets the size of the buffer used for reading the first part of the response received from the proxied server. This part usually contains a small response header.", + "title": "proxy-buffer-size" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-cookie-path", + "text": "Sets a text that should be changed in the path attribute of the \u201cSet-Cookie\u201d header fields of a proxied server response.", + "title": "proxy-cookie-path" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-cookie-domain", + "text": "Sets a text that should be changed in the domain attribute of the \u201cSet-Cookie\u201d header fields of a proxied server response.", + "title": "proxy-cookie-domain" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-next-upstream", + "text": "Specifies in which cases a request should be passed to the next server.", + "title": "proxy-next-upstream" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-next-upstream-tries", + "text": "Limit the number of possible tries a request should be passed to the next server.", + "title": "proxy-next-upstream-tries" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-redirect-from", + "text": "Sets the original text that should be changed in the \"Location\" and \"Refresh\" header fields of a proxied server response. default: off References: http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect", + "title": "proxy-redirect-from" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-request-buffering", + "text": "Enables or disables buffering of a client request body .", + "title": "proxy-request-buffering" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#ssl-redirect", + "text": "Sets the global value of redirects (301) to HTTPS if the server has a TLS certificate (defined in an Ingress rule). default: \"true\"", + "title": "ssl-redirect" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#whitelist-source-range", + "text": "Sets the default whitelisted IPs for each server block. This can be overwritten by an annotation on an Ingress rule.\nSee ngx_http_access_module .", + "title": "whitelist-source-range" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#skip-access-log-urls", + "text": "Sets a list of URLs that should not appear in the NGINX access log. This is useful with urls like /health or health-check that make \"complex\" reading the logs. default: is empty", + "title": "skip-access-log-urls" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#limit-rate", + "text": "Limits the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate", + "title": "limit-rate" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#limit-rate-after", + "text": "Sets the initial amount after which the further transmission of a response to a client will be rate limited. References: http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after", + "title": "limit-rate-after" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#http-redirect-code", + "text": "Sets the HTTP status code to be used in redirects.\nSupported codes are 301 , 302 , 307 and 308 default: 308 Why the default code is 308? RFC 7238 was created to define the 308 (Permanent Redirect) status code that is similar to 301 (Moved Permanently) but it keeps the payload in the redirect. This is important if the we send a redirect in methods like POST.", + "title": "http-redirect-code" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#proxy-buffering", + "text": "Enables or disables buffering of responses from the proxied server .", + "title": "proxy-buffering" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#limit-req-status-code", + "text": "Sets the status code to return in response to rejected requests . default: 503", + "title": "limit-req-status-code" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#no-tls-redirect-locations", + "text": "A comma-separated list of locations on which http requests will never get redirected to their https counterpart. default: \"/.well-known/acme-challenge\"", + "title": "no-tls-redirect-locations" + }, + { + "location": "/user-guide/nginx-configuration/configmap/#no-auth-locations", + "text": "A comma-separated list of locations that should not get authenticated. default: \"/.well-known/acme-challenge\"", + "title": "no-auth-locations" + }, + { + "location": "/user-guide/nginx-configuration/custom-template/", + "text": "Custom NGINX template\n\u00b6\n\n\nThe NGINX template is located in the file \n/etc/nginx/template/nginx.tmpl\n.\n\n\nUsing a \nVolume\n it is possible to use a custom template. \nThis includes using a \nConfigmap\n as source of the template\n\n\n \nvolumeMounts\n:\n\n \n-\n \nmountPath\n:\n \n/etc/nginx/template\n\n \nname\n:\n \nnginx-template-volume\n\n \nreadOnly\n:\n \ntrue\n\n \n \nvolumes\n:\n\n \n-\n \nname\n:\n \nnginx-template-volume\n\n \nconfigMap\n:\n\n \nname\n:\n \nnginx-template\n\n \nitems\n:\n\n \n-\n \nkey\n:\n \nnginx.tmpl\n\n \npath\n:\n \nnginx.tmpl\n\n\n\n\n\n\nPlease note the template is tied to the Go code. Do not change names in the variable \n$cfg\n.\n\n\nFor more information about the template syntax please check the \nGo template package\n.\nIn addition to the built-in functions provided by the Go package the following functions are also available:\n\n\n\n\nempty: returns true if the specified parameter (string) is empty\n\n\ncontains: \nstrings.Contains\n\n\nhasPrefix: \nstrings.HasPrefix\n\n\nhasSuffix: \nstrings.HasSuffix\n\n\ntoUpper: \nstrings.ToUpper\n\n\ntoLower: \nstrings.ToLower\n\n\nbuildLocation: helps to build the NGINX Location section in each server\n\n\nbuildProxyPass: builds the reverse proxy configuration\n\n\nbuildRateLimit: helps to build a limit zone inside a location if contains a rate limit annotation\n\n\n\n\nTODO:\n\n\n\n\nbuildAuthLocation:\n\n\nbuildAuthResponseHeaders:\n\n\nbuildResolvers:\n\n\nbuildLogFormatUpstream:\n\n\nbuildDenyVariable:\n\n\nbuildUpstreamName:\n\n\nbuildForwardedFor:\n\n\nbuildAuthSignURL:\n\n\nbuildNextUpstream:\n\n\nfilterRateLimits:\n\n\nformatIP:\n\n\ngetenv:\n\n\ngetIngressInformation:\n\n\nserverConfig:\n\n\nisLocationAllowed:\n\n\nisValidClientBodyBufferSize:", + "title": "Custom NGINX template" + }, + { + "location": "/user-guide/nginx-configuration/custom-template/#custom-nginx-template", + "text": "The NGINX template is located in the file /etc/nginx/template/nginx.tmpl . Using a Volume it is possible to use a custom template. \nThis includes using a Configmap as source of the template volumeMounts : \n - mountPath : /etc/nginx/template \n name : nginx-template-volume \n readOnly : true \n volumes : \n - name : nginx-template-volume \n configMap : \n name : nginx-template \n items : \n - key : nginx.tmpl \n path : nginx.tmpl Please note the template is tied to the Go code. Do not change names in the variable $cfg . For more information about the template syntax please check the Go template package .\nIn addition to the built-in functions provided by the Go package the following functions are also available: empty: returns true if the specified parameter (string) is empty contains: strings.Contains hasPrefix: strings.HasPrefix hasSuffix: strings.HasSuffix toUpper: strings.ToUpper toLower: strings.ToLower buildLocation: helps to build the NGINX Location section in each server buildProxyPass: builds the reverse proxy configuration buildRateLimit: helps to build a limit zone inside a location if contains a rate limit annotation TODO: buildAuthLocation: buildAuthResponseHeaders: buildResolvers: buildLogFormatUpstream: buildDenyVariable: buildUpstreamName: buildForwardedFor: buildAuthSignURL: buildNextUpstream: filterRateLimits: formatIP: getenv: getIngressInformation: serverConfig: isLocationAllowed: isValidClientBodyBufferSize:", + "title": "Custom NGINX template" + }, + { + "location": "/user-guide/nginx-configuration/log-format/", + "text": "Log format\n\u00b6\n\n\nThe default configuration uses a custom logging format to add additional information about upstreams, response time and status.\n\n\nlog_format upstreaminfo\n\n\n '\n{{\n \nif\n \n$\ncfg.useProxyProtocol\n \n}}\n$proxy_protocol_addr\n{{\n \nelse\n \n}}\n$remote_addr\n{{\n \nend\n \n}}\n - '\n\n\n '[$the_real_ip] - $remote_user [$time_local] \"$request\" '\n\n\n '$status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" '\n\n\n '$request_length $request_time [$proxy_upstream_name] $upstream_addr '\n\n\n '$upstream_response_length $upstream_response_time $upstream_status';\n\n\n\n\n\n\n\n\n\n\n\n\nPlaceholder\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\n$proxy_protocol_addr\n\n\nremote address if proxy protocol is enabled\n\n\n\n\n\n\n$remote_addr\n\n\nremote address if proxy protocol is disabled (default)\n\n\n\n\n\n\n$the_real_ip\n\n\nthe source IP address of the client\n\n\n\n\n\n\n$remote_user\n\n\nuser name supplied with the Basic authentication\n\n\n\n\n\n\n$time_local\n\n\nlocal time in the Common Log Format\n\n\n\n\n\n\n$request\n\n\nfull original request line\n\n\n\n\n\n\n$status\n\n\nresponse status\n\n\n\n\n\n\n$body_bytes_sent\n\n\nnumber of bytes sent to a client, not counting the response header\n\n\n\n\n\n\n$http_referer\n\n\nvalue of the Referer header\n\n\n\n\n\n\n$http_user_agent\n\n\nvalue of User-Agent header\n\n\n\n\n\n\n$request_length\n\n\nrequest length (including request line, header, and request body)\n\n\n\n\n\n\n$request_time\n\n\ntime elapsed since the first bytes were read from the client\n\n\n\n\n\n\n$proxy_upstream_name\n\n\nname of the upstream. The format is \nupstream---\n\n\n\n\n\n\n$upstream_addr\n\n\nthe IP address and port (or the path to the domain socket) of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas.\n\n\n\n\n\n\n$upstream_response_length\n\n\nthe length of the response obtained from the upstream server\n\n\n\n\n\n\n$upstream_response_time\n\n\ntime spent on receiving the response from the upstream server as seconds with millisecond resolution\n\n\n\n\n\n\n$upstream_status\n\n\nstatus code of the response obtained from the upstream server\n\n\n\n\n\n\n\n\nAdditional available variables:\n\n\n\n\n\n\n\n\nPlaceholder\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\n$namespace\n\n\nnamespace of the ingress\n\n\n\n\n\n\n$ingress_name\n\n\nname of the ingress\n\n\n\n\n\n\n$service_name\n\n\nname of the service\n\n\n\n\n\n\n$service_port\n\n\nport of the service\n\n\n\n\n\n\n\n\nSources:\n\n\n\n\nUpstream variables\n\n\nEmbedded variables", + "title": "Log format" + }, + { + "location": "/user-guide/nginx-configuration/log-format/#log-format", + "text": "The default configuration uses a custom logging format to add additional information about upstreams, response time and status. log_format upstreaminfo ' {{ if $ cfg.useProxyProtocol }} $proxy_protocol_addr {{ else }} $remote_addr {{ end }} - ' '[$the_real_ip] - $remote_user [$time_local] \"$request\" ' '$status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" ' '$request_length $request_time [$proxy_upstream_name] $upstream_addr ' '$upstream_response_length $upstream_response_time $upstream_status'; Placeholder Description $proxy_protocol_addr remote address if proxy protocol is enabled $remote_addr remote address if proxy protocol is disabled (default) $the_real_ip the source IP address of the client $remote_user user name supplied with the Basic authentication $time_local local time in the Common Log Format $request full original request line $status response status $body_bytes_sent number of bytes sent to a client, not counting the response header $http_referer value of the Referer header $http_user_agent value of User-Agent header $request_length request length (including request line, header, and request body) $request_time time elapsed since the first bytes were read from the client $proxy_upstream_name name of the upstream. The format is upstream--- $upstream_addr the IP address and port (or the path to the domain socket) of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas. $upstream_response_length the length of the response obtained from the upstream server $upstream_response_time time spent on receiving the response from the upstream server as seconds with millisecond resolution $upstream_status status code of the response obtained from the upstream server Additional available variables: Placeholder Description $namespace namespace of the ingress $ingress_name name of the ingress $service_name name of the service $service_port port of the service Sources: Upstream variables Embedded variables", + "title": "Log format" + }, + { + "location": "/user-guide/cli-arguments/", + "text": "Command line arguments\n\u00b6\n\n\nThe following command line arguments are accepted by the Ingress controller executable.\n\n\nThey are set in the container spec of the \nnginx-ingress-controller\n Deployment manifest (see \ndeploy/with-rbac.yaml\n or \ndeploy/without-rbac.yaml\n).\n\n\n\n\n\n\n\n\nArgument\n\n\nDescription\n\n\n\n\n\n\n\n\n\n\n--alsologtostderr\n\n\nlog to standard error as well as files\n\n\n\n\n\n\n--annotations-prefix string\n\n\nPrefix of the Ingress annotations specific to the NGINX controller. (default \"nginx.ingress.kubernetes.io\")\n\n\n\n\n\n\n--apiserver-host string\n\n\nAddress of the Kubernetes API server. Takes the form \"protocol://address:port\". If not specified, it is assumed the program runs inside a Kubernetes cluster and local discovery is attempted.\n\n\n\n\n\n\n--configmap string\n\n\nName of the ConfigMap containing custom global configurations for the controller.\n\n\n\n\n\n\n--default-backend-service string\n\n\nService used to serve HTTP requests not matching any known server name (catch-all). Takes the form \"namespace/name\". The controller configures NGINX to forward requests to the first port of this Service.\n\n\n\n\n\n\n--default-server-port int\n\n\nPort to use for exposing the default server (catch-all). (default 8181)\n\n\n\n\n\n\n--default-ssl-certificate string\n\n\nSecret containing a SSL certificate to be used by the default HTTPS server (catch-all). Takes the form \"namespace/name\".\n\n\n\n\n\n\n--election-id string\n\n\nElection id to use for Ingress status updates. (default \"ingress-controller-leader\")\n\n\n\n\n\n\n--enable-dynamic-configuration\n\n\nDynamically refresh backends on topology changes instead of reloading NGINX. Feature backed by OpenResty Lua libraries. (enabled by default)\n\n\n\n\n\n\n--enable-dynamic-certificates\n\n\nDynamically serves certificates instead of reloading NGINX when certificates are created, updated, or deleted. Currently does not support OCSP stapling, so --enable-ssl-chain-completion must be turned off. Assuming the certificate is generated with a 2048 bit RSA key/cert pair, this feature can store roughly 5000 certificates. This is an experiemental feature that currently is not ready for production use. Feature backed by OpenResty Lua libraries. (disabled by default)\n\n\n\n\n\n\n--enable-ssl-chain-completion\n\n\nAutocomplete SSL certificate chains with missing intermediate CA certificates. A valid certificate chain is required to enable OCSP stapling. Certificates uploaded to Kubernetes must have the \"Authority Information Access\" X.509 v3 extension for this to succeed. (default true)\n\n\n\n\n\n\n--enable-ssl-passthrough\n\n\nEnable SSL Passthrough.\n\n\n\n\n\n\n--force-namespace-isolation\n\n\nForce namespace isolation. Prevents Ingress objects from referencing Secrets and ConfigMaps located in a different namespace than their own. May be used together with watch-namespace.\n\n\n\n\n\n\n--health-check-path string\n\n\nURL path of the health check endpoint. Configured inside the NGINX status server. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. (default \"/healthz\")\n\n\n\n\n\n\n--healthz-port int\n\n\nPort to use for the healthz endpoint. (default 10254)\n\n\n\n\n\n\n--http-port int\n\n\nPort to use for servicing HTTP traffic. (default 80)\n\n\n\n\n\n\n--https-port int\n\n\nPort to use for servicing HTTPS traffic. (default 443)\n\n\n\n\n\n\n--ingress-class string\n\n\nName of the ingress class this controller satisfies. The class of an Ingress object is set using the annotation \"kubernetes.io/ingress.class\". All ingress classes are satisfied if this parameter is left empty.\n\n\n\n\n\n\n--kubeconfig string\n\n\nPath to a kubeconfig file containing authorization and API server information.\n\n\n\n\n\n\n--log_backtrace_at traceLocation\n\n\nwhen logging hits line file:N, emit a stack trace (default :0)\n\n\n\n\n\n\n--log_dir string\n\n\nIf non-empty, write log files in this directory\n\n\n\n\n\n\n--logtostderr\n\n\nlog to standard error instead of files (default true)\n\n\n\n\n\n\n--profiling\n\n\nEnable profiling via web interface host:port/debug/pprof/ (default true)\n\n\n\n\n\n\n--publish-service string\n\n\nService fronting the Ingress controller. Takes the form \"namespace/name\". When used together with update-status, the controller mirrors the address of this service's endpoints to the load-balancer status of all Ingress objects it satisfies.\n\n\n\n\n\n\n--publish-status-address string\n\n\nCustomized address to set as the load-balancer status of Ingress objects this controller satisfies. Requires the update-status parameter.\n\n\n\n\n\n\n--report-node-internal-ip-address\n\n\nSet the load-balancer status of Ingress objects to internal Node addresses instead of external. Requires the update-status parameter.\n\n\n\n\n\n\n--sort-backends\n\n\nSort servers inside NGINX upstreams.\n\n\n\n\n\n\n--ssl-passthrough-proxy-port int\n\n\nPort to use internally for SSL Passthrough. (default 442)\n\n\n\n\n\n\n--status-port int\n\n\nPort to use for exposing NGINX status pages. (default 18080)\n\n\n\n\n\n\n--stderrthreshold severity\n\n\nlogs at or above this threshold go to stderr (default 2)\n\n\n\n\n\n\n--sync-period duration\n\n\nPeriod at which the controller forces the repopulation of its local object stores. (default is 0)\n\n\n\n\n\n\n--sync-rate-limit float32\n\n\nDefine the sync frequency upper limit (default 0.3)\n\n\n\n\n\n\n--tcp-services-configmap string\n\n\nName of the ConfigMap containing the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form \"namespace/name:port\", where \"port\" can either be a port number or name. TCP ports 80 and 443 are reserved by the controller for servicing HTTP traffic.\n\n\n\n\n\n\n--udp-services-configmap string\n\n\nName of the ConfigMap containing the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form \"namespace/name:port\", where \"port\" can either be a port name or number.\n\n\n\n\n\n\n--update-status\n\n\nUpdate the load-balancer status of Ingress objects this controller satisfies. Requires setting the publish-service parameter to a valid Service reference. (default true)\n\n\n\n\n\n\n--update-status-on-shutdown\n\n\nUpdate the load-balancer status of Ingress objects when the controller shuts down. Requires the update-status parameter. (default true)\n\n\n\n\n\n\n--v Level\n\n\nlog level for V logs\n\n\n\n\n\n\n--version\n\n\nShow release information about the NGINX Ingress controller and exit.\n\n\n\n\n\n\n--vmodule moduleSpec\n\n\ncomma-separated list of pattern=N settings for file-filtered logging\n\n\n\n\n\n\n--watch-namespace string\n\n\nNamespace the controller watches for updates to Kubernetes objects. This includes Ingresses, Services and all configuration resources. All namespaces are watched if this parameter is left empty.", + "title": "Command line arguments" + }, + { + "location": "/user-guide/cli-arguments/#command-line-arguments", + "text": "The following command line arguments are accepted by the Ingress controller executable. They are set in the container spec of the nginx-ingress-controller Deployment manifest (see deploy/with-rbac.yaml or deploy/without-rbac.yaml ). Argument Description --alsologtostderr log to standard error as well as files --annotations-prefix string Prefix of the Ingress annotations specific to the NGINX controller. (default \"nginx.ingress.kubernetes.io\") --apiserver-host string Address of the Kubernetes API server. Takes the form \"protocol://address:port\". If not specified, it is assumed the program runs inside a Kubernetes cluster and local discovery is attempted. --configmap string Name of the ConfigMap containing custom global configurations for the controller. --default-backend-service string Service used to serve HTTP requests not matching any known server name (catch-all). Takes the form \"namespace/name\". The controller configures NGINX to forward requests to the first port of this Service. --default-server-port int Port to use for exposing the default server (catch-all). (default 8181) --default-ssl-certificate string Secret containing a SSL certificate to be used by the default HTTPS server (catch-all). Takes the form \"namespace/name\". --election-id string Election id to use for Ingress status updates. (default \"ingress-controller-leader\") --enable-dynamic-configuration Dynamically refresh backends on topology changes instead of reloading NGINX. Feature backed by OpenResty Lua libraries. (enabled by default) --enable-dynamic-certificates Dynamically serves certificates instead of reloading NGINX when certificates are created, updated, or deleted. Currently does not support OCSP stapling, so --enable-ssl-chain-completion must be turned off. Assuming the certificate is generated with a 2048 bit RSA key/cert pair, this feature can store roughly 5000 certificates. This is an experiemental feature that currently is not ready for production use. Feature backed by OpenResty Lua libraries. (disabled by default) --enable-ssl-chain-completion Autocomplete SSL certificate chains with missing intermediate CA certificates. A valid certificate chain is required to enable OCSP stapling. Certificates uploaded to Kubernetes must have the \"Authority Information Access\" X.509 v3 extension for this to succeed. (default true) --enable-ssl-passthrough Enable SSL Passthrough. --force-namespace-isolation Force namespace isolation. Prevents Ingress objects from referencing Secrets and ConfigMaps located in a different namespace than their own. May be used together with watch-namespace. --health-check-path string URL path of the health check endpoint. Configured inside the NGINX status server. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. (default \"/healthz\") --healthz-port int Port to use for the healthz endpoint. (default 10254) --http-port int Port to use for servicing HTTP traffic. (default 80) --https-port int Port to use for servicing HTTPS traffic. (default 443) --ingress-class string Name of the ingress class this controller satisfies. The class of an Ingress object is set using the annotation \"kubernetes.io/ingress.class\". All ingress classes are satisfied if this parameter is left empty. --kubeconfig string Path to a kubeconfig file containing authorization and API server information. --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files (default true) --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) --publish-service string Service fronting the Ingress controller. Takes the form \"namespace/name\". When used together with update-status, the controller mirrors the address of this service's endpoints to the load-balancer status of all Ingress objects it satisfies. --publish-status-address string Customized address to set as the load-balancer status of Ingress objects this controller satisfies. Requires the update-status parameter. --report-node-internal-ip-address Set the load-balancer status of Ingress objects to internal Node addresses instead of external. Requires the update-status parameter. --sort-backends Sort servers inside NGINX upstreams. --ssl-passthrough-proxy-port int Port to use internally for SSL Passthrough. (default 442) --status-port int Port to use for exposing NGINX status pages. (default 18080) --stderrthreshold severity logs at or above this threshold go to stderr (default 2) --sync-period duration Period at which the controller forces the repopulation of its local object stores. (default is 0) --sync-rate-limit float32 Define the sync frequency upper limit (default 0.3) --tcp-services-configmap string Name of the ConfigMap containing the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form \"namespace/name:port\", where \"port\" can either be a port number or name. TCP ports 80 and 443 are reserved by the controller for servicing HTTP traffic. --udp-services-configmap string Name of the ConfigMap containing the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form \"namespace/name:port\", where \"port\" can either be a port name or number. --update-status Update the load-balancer status of Ingress objects this controller satisfies. Requires setting the publish-service parameter to a valid Service reference. (default true) --update-status-on-shutdown Update the load-balancer status of Ingress objects when the controller shuts down. Requires the update-status parameter. (default true) --v Level log level for V logs --version Show release information about the NGINX Ingress controller and exit. --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging --watch-namespace string Namespace the controller watches for updates to Kubernetes objects. This includes Ingresses, Services and all configuration resources. All namespaces are watched if this parameter is left empty.", + "title": "Command line arguments" + }, + { + "location": "/user-guide/custom-errors/", + "text": "Custom errors\n\u00b6\n\n\nWhen the \ncustom-http-errors\n option is enabled, the Ingress controller configures NGINX so\nthat it passes several HTTP headers down to its \ndefault-backend\n in case of error:\n\n\n\n\n\n\n\n\nHeader\n\n\nValue\n\n\n\n\n\n\n\n\n\n\nX-Code\n\n\nHTTP status code retuned by the request\n\n\n\n\n\n\nX-Format\n\n\nValue of the \nAccept\n header sent by the client\n\n\n\n\n\n\nX-Original-URI\n\n\nURI that caused the error\n\n\n\n\n\n\nX-Namespace\n\n\nNamespace where the backend Service is located\n\n\n\n\n\n\nX-Ingress-Name\n\n\nName of the Ingress where the backend is defined\n\n\n\n\n\n\nX-Service-Name\n\n\nName of the Service backing the backend\n\n\n\n\n\n\nX-Service-Port\n\n\nPort number of the Service backing the backend\n\n\n\n\n\n\n\n\nA custom error backend can use this information to return the best possible representation of an error page. For\nexample, if the value of the \nAccept\n header send by the client was \napplication/json\n, a carefully crafted backend\ncould decide to return the error payload as a JSON document instead of HTML.\n\n\n\n\nImportant\n\n\nThe custom backend is expected to return the correct HTTP status code instead of \n200\n.\nNGINX does not change the response from the custom default backend.\n\n\n\n\nAn example of such custom backend is available inside the source repository at \nimages/custom-error-pages\n.\n\n\nSee also the \nCustom errors\n example.", + "title": "Custom errors" + }, + { + "location": "/user-guide/custom-errors/#custom-errors", + "text": "When the custom-http-errors option is enabled, the Ingress controller configures NGINX so\nthat it passes several HTTP headers down to its default-backend in case of error: Header Value X-Code HTTP status code retuned by the request X-Format Value of the Accept header sent by the client X-Original-URI URI that caused the error X-Namespace Namespace where the backend Service is located X-Ingress-Name Name of the Ingress where the backend is defined X-Service-Name Name of the Service backing the backend X-Service-Port Port number of the Service backing the backend A custom error backend can use this information to return the best possible representation of an error page. For\nexample, if the value of the Accept header send by the client was application/json , a carefully crafted backend\ncould decide to return the error payload as a JSON document instead of HTML. Important The custom backend is expected to return the correct HTTP status code instead of 200 .\nNGINX does not change the response from the custom default backend. An example of such custom backend is available inside the source repository at images/custom-error-pages . See also the Custom errors example.", + "title": "Custom errors" + }, + { + "location": "/user-guide/default-backend/", + "text": "Default backend\n\u00b6\n\n\nThe default backend is a service which handles all URL paths and hosts the nginx controller doesn't understand\n(i.e., all the requests that are not mapped with an Ingress).\n\n\nBasically a default backend exposes two URLs:\n\n\n\n\n/healthz\n that returns 200\n\n\n/\n that returns 404\n\n\n\n\n\n\nExample\n\n\nThe sub-directory \n/images/404-server\n\nprovides a service which satisfies the requirements for a default backend.\n\n\n\n\n\n\nExample\n\n\nThe sub-directory \n/images/custom-error-pages\n\nprovides an additional service for the purpose of customizing the error pages served via the default backend.", + "title": "Default backend" + }, + { + "location": "/user-guide/default-backend/#default-backend", + "text": "The default backend is a service which handles all URL paths and hosts the nginx controller doesn't understand\n(i.e., all the requests that are not mapped with an Ingress). Basically a default backend exposes two URLs: /healthz that returns 200 / that returns 404 Example The sub-directory /images/404-server \nprovides a service which satisfies the requirements for a default backend. Example The sub-directory /images/custom-error-pages \nprovides an additional service for the purpose of customizing the error pages served via the default backend.", + "title": "Default backend" + }, + { + "location": "/user-guide/exposing-tcp-udp-services/", + "text": "Exposing TCP and UDP services\n\u00b6\n\n\nIngress does not support TCP or UDP services. For this reason this Ingress controller uses the flags \n--tcp-services-configmap\n and \n--udp-services-configmap\n to point to an existing config map where the key is the external port to use and the value indicates the service to expose using the format:\n\n::[PROXY]:[PROXY]\n\n\nIt is also possible to use a number or the name of the port. The two last fields are optional.\nAdding \nPROXY\n in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service (https://www.nginx.com/resources/admin-guide/proxy-protocol/).\n\n\nThe next example shows how to expose the service \nexample-go\n running in the namespace \ndefault\n in the port \n8080\n using the port \n9000\n\n\napiVersion\n:\n \nv1\n\n\nkind\n:\n \nConfigMap\n\n\nmetadata\n:\n\n \nname\n:\n \ntcp-services\n\n \nnamespace\n:\n \ningress-nginx\n\n\ndata\n:\n\n \n9000\n:\n \n\"default/example-go:8080\"\n\n\n\n\n\n\nSince 1.9.13 NGINX provides \nUDP Load Balancing\n.\nThe next example shows how to expose the service \nkube-dns\n running in the namespace \nkube-system\n in the port \n53\n using the port \n53\n\n\napiVersion\n:\n \nv1\n\n\nkind\n:\n \nConfigMap\n\n\nmetadata\n:\n\n \nname\n:\n \nudp-services\n\n \nnamespace\n:\n \ningress-nginx\n\n\ndata\n:\n\n\n \u00a0\n53\n:\n \n\"kube-system/kube-dns:53\"", + "title": "Exposing TCP and UDP services" + }, + { + "location": "/user-guide/exposing-tcp-udp-services/#exposing-tcp-and-udp-services", + "text": "Ingress does not support TCP or UDP services. For this reason this Ingress controller uses the flags --tcp-services-configmap and --udp-services-configmap to point to an existing config map where the key is the external port to use and the value indicates the service to expose using the format: ::[PROXY]:[PROXY] It is also possible to use a number or the name of the port. The two last fields are optional.\nAdding PROXY in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service (https://www.nginx.com/resources/admin-guide/proxy-protocol/). The next example shows how to expose the service example-go running in the namespace default in the port 8080 using the port 9000 apiVersion : v1 kind : ConfigMap metadata : \n name : tcp-services \n namespace : ingress-nginx data : \n 9000 : \"default/example-go:8080\" Since 1.9.13 NGINX provides UDP Load Balancing .\nThe next example shows how to expose the service kube-dns running in the namespace kube-system in the port 53 using the port 53 apiVersion : v1 kind : ConfigMap metadata : \n name : udp-services \n namespace : ingress-nginx data : \u00a0 53 : \"kube-system/kube-dns:53\"", + "title": "Exposing TCP and UDP services" + }, + { + "location": "/user-guide/external-articles/", + "text": "External Articles\n\u00b6\n\n\n\n\nPain(less) NGINX Ingress\n\n\nAccessing Kubernetes Pods from Outside of the Cluster\n\n\nKubernetes - Redirect HTTP to HTTPS with ELB and the nginx ingress controller\n\n\nConfigure Nginx Ingress Controller for TLS termination on Kubernetes on Azure", + "title": "External Articles" + }, + { + "location": "/user-guide/external-articles/#external-articles", + "text": "Pain(less) NGINX Ingress Accessing Kubernetes Pods from Outside of the Cluster Kubernetes - Redirect HTTP to HTTPS with ELB and the nginx ingress controller Configure Nginx Ingress Controller for TLS termination on Kubernetes on Azure", + "title": "External Articles" + }, + { + "location": "/user-guide/miscellaneous/", + "text": "Miscellaneous\n\u00b6\n\n\nSource IP address\n\u00b6\n\n\nBy default NGINX uses the content of the header \nX-Forwarded-For\n as the source of truth to get information about the client IP address. This works without issues in L7 \nif we configure the setting \nproxy-real-ip-cidr\n with the correct information of the IP/network address of trusted external load balancer.\n\n\nIf the ingress controller is running in AWS we need to use the VPC IPv4 CIDR.\n\n\nAnother option is to enable proxy protocol using \nuse-proxy-protocol: \"true\"\n.\n\n\nIn this mode NGINX does not use the content of the header to get the source IP address of the connection.\n\n\nProxy Protocol\n\u00b6\n\n\nIf you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP address. To prevent this you could use the \nProxy Protocol\n for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself.\n\n\nAmongst others \nELBs in AWS\n and \nHAProxy\n support Proxy Protocol.\n\n\nWebsockets\n\u00b6\n\n\nSupport for websockets is provided by NGINX out of the box. No special configuration required.\n\n\nThe only requirement to avoid the close of connections is the increase of the values of \nproxy-read-timeout\n and \nproxy-send-timeout\n.\n\n\nThe default value of this settings is \n60 seconds\n.\n\n\nA more adequate value to support websockets is a value higher than one hour (\n3600\n).\n\n\n\n\nImportant\n\n\nIf the NGINX ingress controller is exposed with a service \ntype=LoadBalancer\n make sure the protocol between the loadbalancer and NGINX is TCP.\n\n\n\n\nOptimizing TLS Time To First Byte (TTTFB)\n\u00b6\n\n\nNGINX provides the configuration option \nssl_buffer_size\n to allow the optimization of the TLS record size.\n\n\nThis improves the \nTLS Time To First Byte\n (TTTFB).\nThe default value in the Ingress controller is \n4k\n (NGINX default is \n16k\n).\n\n\nRetries in non-idempotent methods\n\u00b6\n\n\nSince 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error.\nThe previous behavior can be restored using \nretry-non-idempotent=true\n in the configuration ConfigMap.\n\n\nLimitations\n\u00b6\n\n\n\n\nIngress rules for TLS require the definition of the field \nhost\n\n\n\n\nWhy endpoints and not services\n\u00b6\n\n\nThe NGINX ingress controller does not use \nServices\n to route traffic to the pods. Instead it uses the Endpoints API in order to bypass \nkube-proxy\n to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT.", + "title": "Miscellaneous" + }, + { + "location": "/user-guide/miscellaneous/#miscellaneous", + "text": "", + "title": "Miscellaneous" + }, + { + "location": "/user-guide/miscellaneous/#source-ip-address", + "text": "By default NGINX uses the content of the header X-Forwarded-For as the source of truth to get information about the client IP address. This works without issues in L7 if we configure the setting proxy-real-ip-cidr with the correct information of the IP/network address of trusted external load balancer. If the ingress controller is running in AWS we need to use the VPC IPv4 CIDR. Another option is to enable proxy protocol using use-proxy-protocol: \"true\" . In this mode NGINX does not use the content of the header to get the source IP address of the connection.", + "title": "Source IP address" + }, + { + "location": "/user-guide/miscellaneous/#proxy-protocol", + "text": "If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP address. To prevent this you could use the Proxy Protocol for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself. Amongst others ELBs in AWS and HAProxy support Proxy Protocol.", + "title": "Proxy Protocol" + }, + { + "location": "/user-guide/miscellaneous/#websockets", + "text": "Support for websockets is provided by NGINX out of the box. No special configuration required. The only requirement to avoid the close of connections is the increase of the values of proxy-read-timeout and proxy-send-timeout . The default value of this settings is 60 seconds . A more adequate value to support websockets is a value higher than one hour ( 3600 ). Important If the NGINX ingress controller is exposed with a service type=LoadBalancer make sure the protocol between the loadbalancer and NGINX is TCP.", + "title": "Websockets" + }, + { + "location": "/user-guide/miscellaneous/#optimizing-tls-time-to-first-byte-tttfb", + "text": "NGINX provides the configuration option ssl_buffer_size to allow the optimization of the TLS record size. This improves the TLS Time To First Byte (TTTFB).\nThe default value in the Ingress controller is 4k (NGINX default is 16k ).", + "title": "Optimizing TLS Time To First Byte (TTTFB)" + }, + { + "location": "/user-guide/miscellaneous/#retries-in-non-idempotent-methods", + "text": "Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error.\nThe previous behavior can be restored using retry-non-idempotent=true in the configuration ConfigMap.", + "title": "Retries in non-idempotent methods" + }, + { + "location": "/user-guide/miscellaneous/#limitations", + "text": "Ingress rules for TLS require the definition of the field host", + "title": "Limitations" + }, + { + "location": "/user-guide/miscellaneous/#why-endpoints-and-not-services", + "text": "The NGINX ingress controller does not use Services to route traffic to the pods. Instead it uses the Endpoints API in order to bypass kube-proxy to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT.", + "title": "Why endpoints and not services" + }, + { + "location": "/user-guide/monitoring/", + "text": "Prometheus and Grafana installation\n\u00b6\n\n\nThis tutorial will show you how to install \nPrometheus\n and \nGrafana\n for scraping the metrics of the NGINX Ingress controller.\n\n\n\n\nImportant\n\n\nThis example uses \nemptyDir\n volumes for Prometheus and Grafana. This means once the pod gets terminated you will lose all the data.\n\n\n\n\nBefore You Begin\n\u00b6\n\n\nThe NGINX Ingress controller should already be deployed according to the deployment instructions \nhere\n.\n\n\nNote that the yaml files used in this tutorial are stored in the \ndeploy/monitoring\n folder of the GitHub repository \nkubernetes/ingress-nginx\n.\n\n\nDeploy and configure Prometheus Server\n\u00b6\n\n\nThe Prometheus server must be configured so that it can discover endpoints of services. If a Prometheus server is already running in the cluster and if it is configured in a way that it can find the ingress controller pods, no extra configuration is needed.\n\n\nIf there is no existing Prometheus server running, the rest of this tutorial will guide you through the steps needed to deploy a properly configured Prometheus server.\n\n\nRunning the following command deploys the prometheus configuration in Kubernetes:\n\n\nkubectl create -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/monitoring/configuration.yaml\n\n\nconfigmap \"prometheus-configuration\" created\n\n\n\n\n\n\nRunning the following command deploys prometheus in Kubernetes:\n\n\nkubectl create -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/monitoring/prometheus.yaml\n\n\nclusterrole \"prometheus-server\" created\n\n\nserviceaccount \"prometheus-server\" created\n\n\nclusterrolebinding \"prometheus-server\" created\n\n\ndeployment \"prometheus-server\" created\n\n\nservice \"prometheus-service\" created\n\n\n\n\n\n\nPrometheus Dashboard\n\u00b6\n\n\nOpen Prometheus dashboard in a web browser:\n\n\nkubectl get svc -n ingress-nginx\n\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n\n\ndefault-http-backend ClusterIP 10.103.59.201 80/TCP 3d\n\n\ningress-nginx NodePort 10.97.44.72 80:30100/TCP,443:30154/TCP,10254:32049/TCP 5h\n\n\nprometheus NodePort 10.98.233.86 9090:32630/TCP 1m\n\n\n\n\n\n\nObtain the IP address of the nodes in the running cluster:\n\n\nkubectl get nodes -o wide\n\n\n\n\n\n\nIn some cases where the node only have internal IP adresses we need to execute:\n\n\nkubectl get nodes --selector=kubernetes.io/role!=master -o jsonpath={.items[*].status.addresses[?\\(@.type==\\\"InternalIP\\\"\\)].address}\n\n\n10.192.0.2 10.192.0.3 10.192.0.4\n\n\n\n\n\n\nOpen your browser and visit the following URL: \nhttp://{node IP address}:{prometheus-svc-nodeport}\n to load the Prometheus Dashboard.\n\n\nAccording to the above example, this URL will be http://10.192.0.3:32630\n\n\n\n\nGrafana\n\u00b6\n\n\nkubectl create -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/monitoring/grafana.yaml\n\n\n\n\n\n\nkubectl get svc -n ingress-nginx\n\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n\n\ndefault-http-backend ClusterIP 10.103.59.201 80/TCP 3d\n\n\ningress-nginx NodePort 10.97.44.72 80:30100/TCP,443:30154/TCP,10254:32049/TCP 5h\n\n\nprometheus NodePort 10.98.233.86 9090:32630/TCP 10m\n\n\ngrafana NodePort 10.98.233.86 9090:31086/TCP 10m\n\n\n\n\n\n\nOpen your browser and visit the following URL: \nhttp://{node IP address}:{grafana-svc-nodeport}\n to load the Grafana Dashboard.\nAccording to the above example, this URL will be http://10.192.0.3:31086\n\n\nThe username and password is \nadmin\n\n\nAfter the login you can import the Grafana dashboard from \nhttps://github.com/kubernetes/ingress-nginx/tree/master/deploy/grafana/dashboards", + "title": "Prometheus and Grafana installation" + }, + { + "location": "/user-guide/monitoring/#prometheus-and-grafana-installation", + "text": "This tutorial will show you how to install Prometheus and Grafana for scraping the metrics of the NGINX Ingress controller. Important This example uses emptyDir volumes for Prometheus and Grafana. This means once the pod gets terminated you will lose all the data.", + "title": "Prometheus and Grafana installation" + }, + { + "location": "/user-guide/monitoring/#before-you-begin", + "text": "The NGINX Ingress controller should already be deployed according to the deployment instructions here . Note that the yaml files used in this tutorial are stored in the deploy/monitoring folder of the GitHub repository kubernetes/ingress-nginx .", + "title": "Before You Begin" + }, + { + "location": "/user-guide/monitoring/#deploy-and-configure-prometheus-server", + "text": "The Prometheus server must be configured so that it can discover endpoints of services. If a Prometheus server is already running in the cluster and if it is configured in a way that it can find the ingress controller pods, no extra configuration is needed. If there is no existing Prometheus server running, the rest of this tutorial will guide you through the steps needed to deploy a properly configured Prometheus server. Running the following command deploys the prometheus configuration in Kubernetes: kubectl create -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/monitoring/configuration.yaml configmap \"prometheus-configuration\" created Running the following command deploys prometheus in Kubernetes: kubectl create -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/monitoring/prometheus.yaml clusterrole \"prometheus-server\" created serviceaccount \"prometheus-server\" created clusterrolebinding \"prometheus-server\" created deployment \"prometheus-server\" created service \"prometheus-service\" created", + "title": "Deploy and configure Prometheus Server" + }, + { + "location": "/user-guide/monitoring/#prometheus-dashboard", + "text": "Open Prometheus dashboard in a web browser: kubectl get svc -n ingress-nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-http-backend ClusterIP 10.103.59.201 80/TCP 3d ingress-nginx NodePort 10.97.44.72 80:30100/TCP,443:30154/TCP,10254:32049/TCP 5h prometheus NodePort 10.98.233.86 9090:32630/TCP 1m Obtain the IP address of the nodes in the running cluster: kubectl get nodes -o wide In some cases where the node only have internal IP adresses we need to execute: kubectl get nodes --selector=kubernetes.io/role!=master -o jsonpath={.items[*].status.addresses[?\\(@.type==\\\"InternalIP\\\"\\)].address} 10.192.0.2 10.192.0.3 10.192.0.4 Open your browser and visit the following URL: http://{node IP address}:{prometheus-svc-nodeport} to load the Prometheus Dashboard. According to the above example, this URL will be http://10.192.0.3:32630", + "title": "Prometheus Dashboard" + }, + { + "location": "/user-guide/monitoring/#grafana", + "text": "kubectl create -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/monitoring/grafana.yaml kubectl get svc -n ingress-nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-http-backend ClusterIP 10.103.59.201 80/TCP 3d ingress-nginx NodePort 10.97.44.72 80:30100/TCP,443:30154/TCP,10254:32049/TCP 5h prometheus NodePort 10.98.233.86 9090:32630/TCP 10m grafana NodePort 10.98.233.86 9090:31086/TCP 10m Open your browser and visit the following URL: http://{node IP address}:{grafana-svc-nodeport} to load the Grafana Dashboard.\nAccording to the above example, this URL will be http://10.192.0.3:31086 The username and password is admin After the login you can import the Grafana dashboard from https://github.com/kubernetes/ingress-nginx/tree/master/deploy/grafana/dashboards", + "title": "Grafana" + }, + { + "location": "/user-guide/multiple-ingress/", + "text": "Multiple Ingress controllers\n\u00b6\n\n\nIf you're running multiple ingress controllers, or running on a cloud provider that natively handles ingress such as GKE,\nyou need to specify the annotation \nkubernetes.io/ingress.class: \"nginx\"\n in all ingresses that you would like the ingress-nginx controller to claim.\n\n\nFor instance,\n\n\nmetadata\n:\n\n \nname\n:\n \nfoo\n\n \nannotations\n:\n\n \nkubernetes.io/ingress.class\n:\n \n\"gce\"\n\n\n\n\n\n\nwill target the GCE controller, forcing the nginx controller to ignore it, while an annotation like\n\n\nmetadata\n:\n\n \nname\n:\n \nfoo\n\n \nannotations\n:\n\n \nkubernetes.io/ingress.class\n:\n \n\"nginx\"\n\n\n\n\n\n\nwill target the nginx controller, forcing the GCE controller to ignore it.\n\n\nTo reiterate, setting the annotation to any value which does not match a valid ingress class will force the NGINX Ingress controller to ignore your Ingress.\nIf you are only running a single NGINX ingress controller, this can be achieved by setting the annotation to any value except \"nginx\" or an empty string.\n\n\nDo this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller.\n\n\nMultiple ingress-nginx controllers\n\u00b6\n\n\nThis mechanism also provides users the ability to run \nmultiple\n NGINX ingress controllers (e.g. one which serves public traffic, one which serves \"internal\" traffic).\nTo do this, the option \n--ingress-class\n must be changed to a value unique for the cluster within the definition of the replication controller.\nHere is a partial example:\n\n\nspec\n:\n\n \ntemplate\n:\n\n \nspec\n:\n\n \ncontainers\n:\n\n \n-\n \nname\n:\n \nnginx-ingress-internal-controller\n\n \nargs\n:\n\n \n-\n \n/nginx-ingress-controller\n\n \n-\n \n'--default-backend-service=ingress/nginx-ingress-default-backend'\n\n \n-\n \n'--election-id=ingress-controller-leader-internal'\n\n \n-\n \n'--ingress-class=nginx-internal'\n\n \n-\n \n'--configmap=ingress/nginx-ingress-internal-controller'\n\n\n\n\n\n\n\n\nImportant\n\n\nDeploying multiple Ingress controllers, of different types (e.g., \ningress-nginx\n & \ngce\n), and not specifying a class annotation will\nresult in both or all controllers fighting to satisfy the Ingress, and all of them racing to update Ingress status field in confusing ways.\n\n\nWhen running multiple ingress-nginx controllers, it will only process an unset class annotation if one of the controllers uses the default\n\n--ingress-class\n value (see \nIsValid\n method in \ninternal/ingress/annotations/class/main.go\n), otherwise the class annotation become required.", + "title": "Multiple Ingress controllers" + }, + { + "location": "/user-guide/multiple-ingress/#multiple-ingress-controllers", + "text": "If you're running multiple ingress controllers, or running on a cloud provider that natively handles ingress such as GKE,\nyou need to specify the annotation kubernetes.io/ingress.class: \"nginx\" in all ingresses that you would like the ingress-nginx controller to claim. For instance, metadata : \n name : foo \n annotations : \n kubernetes.io/ingress.class : \"gce\" will target the GCE controller, forcing the nginx controller to ignore it, while an annotation like metadata : \n name : foo \n annotations : \n kubernetes.io/ingress.class : \"nginx\" will target the nginx controller, forcing the GCE controller to ignore it. To reiterate, setting the annotation to any value which does not match a valid ingress class will force the NGINX Ingress controller to ignore your Ingress.\nIf you are only running a single NGINX ingress controller, this can be achieved by setting the annotation to any value except \"nginx\" or an empty string. Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller.", + "title": "Multiple Ingress controllers" + }, + { + "location": "/user-guide/multiple-ingress/#multiple-ingress-nginx-controllers", + "text": "This mechanism also provides users the ability to run multiple NGINX ingress controllers (e.g. one which serves public traffic, one which serves \"internal\" traffic).\nTo do this, the option --ingress-class must be changed to a value unique for the cluster within the definition of the replication controller.\nHere is a partial example: spec : \n template : \n spec : \n containers : \n - name : nginx-ingress-internal-controller \n args : \n - /nginx-ingress-controller \n - '--default-backend-service=ingress/nginx-ingress-default-backend' \n - '--election-id=ingress-controller-leader-internal' \n - '--ingress-class=nginx-internal' \n - '--configmap=ingress/nginx-ingress-internal-controller' Important Deploying multiple Ingress controllers, of different types (e.g., ingress-nginx & gce ), and not specifying a class annotation will\nresult in both or all controllers fighting to satisfy the Ingress, and all of them racing to update Ingress status field in confusing ways. When running multiple ingress-nginx controllers, it will only process an unset class annotation if one of the controllers uses the default --ingress-class value (see IsValid method in internal/ingress/annotations/class/main.go ), otherwise the class annotation become required.", + "title": "Multiple ingress-nginx controllers" + }, + { + "location": "/user-guide/tls/", + "text": "TLS/HTTPS\n\u00b6\n\n\nTLS Secrets\n\u00b6\n\n\nAnytime we reference a TLS secret, we mean a PEM-encoded X.509, RSA (2048) secret.\n\n\nYou can generate a self-signed certificate and private key with with:\n\n\n$ openssl req -x509 -nodes -days \n365\n -newkey rsa:2048 -keyout \n${\nKEY_FILE\n}\n -out \n${\nCERT_FILE\n}\n -subj \n\"/CN=\n${\nHOST\n}\n/O=\n${\nHOST\n}\n\"\n`\n\n\n\n\n\n\nThen create the secret in the cluster via:\n\n\nkubectl create secret tls \n${\nCERT_NAME\n}\n --key \n${\nKEY_FILE\n}\n --cert \n${\nCERT_FILE\n}\n\n\n\n\n\n\nThe resulting secret will be of type \nkubernetes.io/tls\n.\n\n\nDefault SSL Certificate\n\u00b6\n\n\nNGINX provides the option to configure a server as a catch-all with\n\nserver_name\n\nfor requests that do not match any of the configured server names.\nThis configuration works without out-of-the-box for HTTP traffic.\nFor HTTPS, a certificate is naturally required.\n\n\nFor this reason the Ingress controller provides the flag \n--default-ssl-certificate\n.\nThe secret referred to by this flag contains the default certificate to be used when\naccessing the catch-all server.\nIf this flag is not provided NGINX will use a self-signed certificate.\n\n\nFor instance, if you have a TLS secret \nfoo-tls\n in the \ndefault\n namespace,\nadd \n--default-ssl-certificate=default/foo-tls\n in the \nnginx-controller\n deployment.\n\n\nSSL Passthrough\n\u00b6\n\n\nThe flag \n--enable-ssl-passthrough\n enables the SSL passthrough feature.\nBy default this feature is disabled.\n\n\nThis is required to enable passthrough backends in Ingress configurations.\n\n\nTODO: Improve this documentation.\n\n\nHTTP Strict Transport Security\n\u00b6\n\n\nHTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified\nthrough the use of a special response header. Once a supported browser receives\nthis header that browser will prevent any communications from being sent over\nHTTP to the specified domain and will instead send all communications over HTTPS.\n\n\nHSTS is enabled by default.\n\n\nTo disable this behavior use \nhsts: \"false\"\n in the configuration \nConfigMap\n.\n\n\nServer-side HTTPS enforcement through redirect\n\u00b6\n\n\nBy default the controller redirects HTTP clients to the HTTPS port\n443 using a 308 Permanent Redirect response if TLS is enabled for that Ingress.\n\n\nThis can be disabled globally using \nssl-redirect: \"false\"\n in the NGINX \nconfig map\n,\nor per-Ingress with the \nnginx.ingress.kubernetes.io/ssl-redirect: \"false\"\n\nannotation in the particular resource.\n\n\n\n\nTip\n\n\nWhen using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a\nredirect to HTTPS even when there is no TLS certificate available.\nThis can be achieved by using the \nnginx.ingress.kubernetes.io/force-ssl-redirect: \"true\"\n\nannotation in the particular resource.\n\n\n\n\nAutomated Certificate Management with Kube-Lego\n\u00b6\n\n\n\n\nTip\n\n\nKube-Lego has reached end-of-life and is being\nreplaced by \ncert-manager\n.\n\n\n\n\nKube-Lego\n automatically requests missing or expired certificates from \nLet's Encrypt\n\nby monitoring ingress resources and their referenced secrets.\n\n\nTo enable this for an ingress resource you have to add an annotation:\n\n\nkubectl annotate ing ingress-demo kubernetes.io/tls-acme=\"true\"\n\n\n\n\n\n\nTo setup Kube-Lego you can take a look at this \nfull example\n.\nThe first version to fully support Kube-Lego is Nginx Ingress controller 0.8.\n\n\nDefault TLS Version and Ciphers\n\u00b6\n\n\nTo provide the most secure baseline configuration possible,\n\n\nnginx-ingress defaults to using TLS 1.2 only and a \nsecure set of TLS ciphers\n.\n\n\nLegacy TLS\n\u00b6\n\n\nThe default configuration, though secure, does not support some older browsers and operating systems.\n\n\nFor instance, TLS 1.1+ is only enabled by default from Android 5.0 on. At the time of writing,\nMay 2018, \napproximately 15% of Android devices\n\nare not compatible with nginx-ingress's default configuration.\n\n\nTo change this default behavior, use a \nConfigMap\n.\n\n\nA sample ConfigMap fragment to allow these older clients to connect could look something like the following:\n\n\nkind\n:\n \nConfigMap\n\n\napiVersion\n:\n \nv1\n\n\nmetadata\n:\n\n \nname\n:\n \nnginx\n-\nconfig\n\n\ndata\n:\n\n \nssl\n-\nciphers\n:\n \n\"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA\"\n\n \nssl\n-\nprotocols\n:\n \n\"TLSv1 TLSv1.1 TLSv1.2\"", + "title": "TLS/HTTPS" + }, + { + "location": "/user-guide/tls/#tlshttps", + "text": "", + "title": "TLS/HTTPS" + }, + { + "location": "/user-guide/tls/#tls-secrets", + "text": "Anytime we reference a TLS secret, we mean a PEM-encoded X.509, RSA (2048) secret. You can generate a self-signed certificate and private key with with: $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${ KEY_FILE } -out ${ CERT_FILE } -subj \"/CN= ${ HOST } /O= ${ HOST } \" ` Then create the secret in the cluster via: kubectl create secret tls ${ CERT_NAME } --key ${ KEY_FILE } --cert ${ CERT_FILE } The resulting secret will be of type kubernetes.io/tls .", + "title": "TLS Secrets" + }, + { + "location": "/user-guide/tls/#default-ssl-certificate", + "text": "NGINX provides the option to configure a server as a catch-all with server_name \nfor requests that do not match any of the configured server names.\nThis configuration works without out-of-the-box for HTTP traffic.\nFor HTTPS, a certificate is naturally required. For this reason the Ingress controller provides the flag --default-ssl-certificate .\nThe secret referred to by this flag contains the default certificate to be used when\naccessing the catch-all server.\nIf this flag is not provided NGINX will use a self-signed certificate. For instance, if you have a TLS secret foo-tls in the default namespace,\nadd --default-ssl-certificate=default/foo-tls in the nginx-controller deployment.", + "title": "Default SSL Certificate" + }, + { + "location": "/user-guide/tls/#ssl-passthrough", + "text": "The flag --enable-ssl-passthrough enables the SSL passthrough feature.\nBy default this feature is disabled. This is required to enable passthrough backends in Ingress configurations. TODO: Improve this documentation.", + "title": "SSL Passthrough" + }, + { + "location": "/user-guide/tls/#http-strict-transport-security", + "text": "HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified\nthrough the use of a special response header. Once a supported browser receives\nthis header that browser will prevent any communications from being sent over\nHTTP to the specified domain and will instead send all communications over HTTPS. HSTS is enabled by default. To disable this behavior use hsts: \"false\" in the configuration ConfigMap .", + "title": "HTTP Strict Transport Security" + }, + { + "location": "/user-guide/tls/#server-side-https-enforcement-through-redirect", + "text": "By default the controller redirects HTTP clients to the HTTPS port\n443 using a 308 Permanent Redirect response if TLS is enabled for that Ingress. This can be disabled globally using ssl-redirect: \"false\" in the NGINX config map ,\nor per-Ingress with the nginx.ingress.kubernetes.io/ssl-redirect: \"false\" \nannotation in the particular resource. Tip When using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a\nredirect to HTTPS even when there is no TLS certificate available.\nThis can be achieved by using the nginx.ingress.kubernetes.io/force-ssl-redirect: \"true\" \nannotation in the particular resource.", + "title": "Server-side HTTPS enforcement through redirect" + }, + { + "location": "/user-guide/tls/#automated-certificate-management-with-kube-lego", + "text": "Tip Kube-Lego has reached end-of-life and is being\nreplaced by cert-manager . Kube-Lego automatically requests missing or expired certificates from Let's Encrypt \nby monitoring ingress resources and their referenced secrets. To enable this for an ingress resource you have to add an annotation: kubectl annotate ing ingress-demo kubernetes.io/tls-acme=\"true\" To setup Kube-Lego you can take a look at this full example .\nThe first version to fully support Kube-Lego is Nginx Ingress controller 0.8.", + "title": "Automated Certificate Management with Kube-Lego" + }, + { + "location": "/user-guide/tls/#default-tls-version-and-ciphers", + "text": "To provide the most secure baseline configuration possible, nginx-ingress defaults to using TLS 1.2 only and a secure set of TLS ciphers .", + "title": "Default TLS Version and Ciphers" + }, + { + "location": "/user-guide/tls/#legacy-tls", + "text": "The default configuration, though secure, does not support some older browsers and operating systems. For instance, TLS 1.1+ is only enabled by default from Android 5.0 on. At the time of writing,\nMay 2018, approximately 15% of Android devices \nare not compatible with nginx-ingress's default configuration. To change this default behavior, use a ConfigMap . A sample ConfigMap fragment to allow these older clients to connect could look something like the following: kind : ConfigMap apiVersion : v1 metadata : \n name : nginx - config data : \n ssl - ciphers : \"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA\" \n ssl - protocols : \"TLSv1 TLSv1.1 TLSv1.2\"", + "title": "Legacy TLS" + }, + { + "location": "/user-guide/third-party-addons/modsecurity/", + "text": "ModSecurity Web Application Firewall\n\u00b6\n\n\nModSecurity is an open source, cross platform web application firewall (WAF) engine for Apache, IIS and Nginx that is developed by Trustwave's SpiderLabs. It has a robust event-based programming language which provides protection from a range of attacks against web applications and allows for HTTP traffic monitoring, logging and real-time analysis - \nhttps://www.modsecurity.org\n\n\nThe \nModSecurity-nginx\n connector is the connection point between NGINX and libmodsecurity (ModSecurity v3).\n\n\nThe default ModSecurity configuration file is located in \n/etc/nginx/modsecurity/modsecurity.conf\n. This is the only file located in this directory and contains the default recommended configuration. Using a volume we can replace this file with the desired configuration.\nTo enable the ModSecurity feature we need to specify \nenable-modsecurity: \"true\"\n in the configuration configmap.\n\n\n\n\nNote:\n the default configuration use detection only, because that minimizes the chances of post-installation disruption.\nThe file \n/var/log/modsec_audit.log\n contains the log of ModSecurity.\n\n\n\n\nThe OWASP ModSecurity Core Rule Set (CRS) is a set of generic attack detection rules for use with ModSecurity or compatible web application firewalls. The CRS aims to protect web applications from a wide range of attacks, including the OWASP Top Ten, with a minimum of false alerts.\nThe directory \n/etc/nginx/owasp-modsecurity-crs\n contains the \nowasp-modsecurity-crs repository\n.\nUsing \nenable-owasp-modsecurity-crs: \"true\"\n we enable the use of the rules.", + "title": "ModSecurity Web Application Firewall" + }, + { + "location": "/user-guide/third-party-addons/modsecurity/#modsecurity-web-application-firewall", + "text": "ModSecurity is an open source, cross platform web application firewall (WAF) engine for Apache, IIS and Nginx that is developed by Trustwave's SpiderLabs. It has a robust event-based programming language which provides protection from a range of attacks against web applications and allows for HTTP traffic monitoring, logging and real-time analysis - https://www.modsecurity.org The ModSecurity-nginx connector is the connection point between NGINX and libmodsecurity (ModSecurity v3). The default ModSecurity configuration file is located in /etc/nginx/modsecurity/modsecurity.conf . This is the only file located in this directory and contains the default recommended configuration. Using a volume we can replace this file with the desired configuration.\nTo enable the ModSecurity feature we need to specify enable-modsecurity: \"true\" in the configuration configmap. Note: the default configuration use detection only, because that minimizes the chances of post-installation disruption.\nThe file /var/log/modsec_audit.log contains the log of ModSecurity. The OWASP ModSecurity Core Rule Set (CRS) is a set of generic attack detection rules for use with ModSecurity or compatible web application firewalls. The CRS aims to protect web applications from a wide range of attacks, including the OWASP Top Ten, with a minimum of false alerts.\nThe directory /etc/nginx/owasp-modsecurity-crs contains the owasp-modsecurity-crs repository .\nUsing enable-owasp-modsecurity-crs: \"true\" we enable the use of the rules.", + "title": "ModSecurity Web Application Firewall" + }, + { + "location": "/user-guide/third-party-addons/opentracing/", + "text": "OpenTracing\n\u00b6\n\n\nEnables requests served by nginx for distributed tracing via The OpenTracing Project.\n\n\nUsing the third party module \nopentracing-contrib/nginx-opentracing\n the NGINX ingress controller can configure NGINX to enable \nOpenTracing\n instrumentation.\nBy default this feature is disabled.\n\n\nUsage\n\u00b6\n\n\nTo enable the instrumentation we must enable opentracing in the configuration configmap:\n\n\ndata\n:\n\n \nenable\n-\nopentracing\n:\n \n\"true\"\n\n\n\n\n\n\nWe must also set the host to use when uploading traces:\n\n\nzipkin-collector-host: zipkin.default.svc.cluster.local\njaeger-collector-host: jaeger-collector.default.svc.cluster.local\n\n\n\n\n\nNext you will need to deploy a distributed tracing system which uses OpenTracing. Both \nZipkin\n and\n\nJaeger\n have been tested.\n\n\nOther optional configuration options:\n\n\n# specifies the port to use when uploading traces\nzipkin-collector-port\n\n# specifies the service name to use for any traces created, Default: nginx\nzipkin-service-name\n\n# specifies sample rate for any traces created. Default: 1.0\nzipkin-sample-rate\n\n# specifies the port to use when uploading traces\njaeger-collector-port\n\n# specifies the service name to use for any traces created, Default: nginx\njaeger-service-name\n\n# specifies the sampler to be used when sampling traces.\n# The available samplers are: const, probabilistic, ratelimiting, remote, Default: const\njaeger-sampler-type\n\n# specifies the argument to be passed to the sampler constructor, Default: 1\njaeger-sampler-param\n\n\n\n\n\nExamples\n\u00b6\n\n\nThe following examples show how to deploy and test different distributed tracing systems. These example can be performed\nusing Minikube.\n\n\nZipkin\n\u00b6\n\n\nIn the \nrnburn/zipkin-date-server\n\ngithub repository is an example of a dockerized date service. To install the example and zipkin collector run:\n\n\nkubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/zipkin.yaml\nkubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/deployment.yaml\n\n\n\n\n\nAlso we need to configure the NGINX controller configmap with the required values:\n\n\n$ \necho\n \n'\n\n\napiVersion: v1\n\n\nkind: ConfigMap\n\n\ndata:\n\n\n enable-opentracing: \"true\"\n\n\n zipkin-collector-host: zipkin.default.svc.cluster.local\n\n\nmetadata:\n\n\n name: nginx-load-balancer-conf\n\n\n namespace: kube-system\n\n\n'\n \n|\n kubectl replace -f -\n\n\n\n\n\nIn the zipkin interface we can see the details:\n\n\n\nJaeger\n\u00b6\n\n\n\n\n\n\nEnable Ingress addon in minikube:\n \n$ minikube addons \nenable\n ingress\n\n\n\n\n\n\nAdd minikube IP to /etc/hosts:\n \n$ \necho\n \n\"\n$(\nminikube ip\n)\n example.com\"\n \n|\n sudo tee -a /etc/hosts\n\n\n\n\n\n\nApply a Basic Service and Ingress Resource:\n ```\n # Create Echoheaders Deployment\n $ kubectl run echoheaders --image=k8s.gcr.io/echoserver:1.4 --replicas=1 --port=8080\n\n\nExpose as a Cluster-IP\n\u00b6\n\n\n$ kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x\n\n\nApply the Ingress Resource\n\u00b6\n\n\n$ echo '\n apiVersion: extensions/v1beta1\n kind: Ingress\n metadata:\n name: echo-ingress\n spec:\n rules:\n - host: example.com\n http:\n paths:\n - backend:\n serviceName: echoheaders-x\n servicePort: 80\n path: /echo\n ' | kubectl apply -f -\n```\n\n\n\n\n\n\nEnable OpenTracing and set the zipkin-collector-host:\n \n$ \necho\n \n'\n apiVersion: v1\n kind: ConfigMap\n data:\n enable-opentracing: \"true\"\n zipkin-collector-host: zipkin.default.svc.cluster.local\n jaeger-collector-host: jaeger-collector.default.svc.cluster.local\n metadata:\n name: nginx-load-balancer-conf\n namespace: kube-system\n '\n \n|\n kubectl replace -f -\n\n\n\n\n\n\nApply the Jaeger All-In-One Template:\n \n$ kubectl apply -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml\n\n\n\n\n\n\nMake a few requests to the Service:\n ```\n $ curl example.com/echo -d \"meow\"\n\n\nCLIENT VALUES:\nclient_address=172.17.0.5\ncommand=POST\nreal path=/echo\nquery=nil\nrequest_version=1.1\nrequest_uri=http://example.com:8080/echo\n\n\nSERVER VALUES:\nserver_version=nginx: 1.10.0 - lua: 10001\n\n\nHEADERS RECEIVED:\naccept=\n/\n\nconnection=close\ncontent-length=4\ncontent-type=application/x-www-form-urlencoded\nhost=example.com\nuser-agent=curl/7.54.0\nx-forwarded-for=192.168.99.1\nx-forwarded-host=example.com\nx-forwarded-port=80\nx-forwarded-proto=http\nx-original-uri=/echo\nx-real-ip=192.168.99.1\nx-scheme=http\nBODY:\nmeow\n```\n\n\n\n\n\n\nView the Jaeger UI:\n ```\n $ minikube service jaeger-query --url\n\n\nhttp://192.168.99.100:30183\n```\n\n\nIn the jaeger interface we can see the details:", + "title": "OpenTracing" + }, + { + "location": "/user-guide/third-party-addons/opentracing/#opentracing", + "text": "Enables requests served by nginx for distributed tracing via The OpenTracing Project. Using the third party module opentracing-contrib/nginx-opentracing the NGINX ingress controller can configure NGINX to enable OpenTracing instrumentation.\nBy default this feature is disabled.", + "title": "OpenTracing" + }, + { + "location": "/user-guide/third-party-addons/opentracing/#usage", + "text": "To enable the instrumentation we must enable opentracing in the configuration configmap: data : \n enable - opentracing : \"true\" We must also set the host to use when uploading traces: zipkin-collector-host: zipkin.default.svc.cluster.local\njaeger-collector-host: jaeger-collector.default.svc.cluster.local Next you will need to deploy a distributed tracing system which uses OpenTracing. Both Zipkin and Jaeger have been tested. Other optional configuration options: # specifies the port to use when uploading traces\nzipkin-collector-port\n\n# specifies the service name to use for any traces created, Default: nginx\nzipkin-service-name\n\n# specifies sample rate for any traces created. Default: 1.0\nzipkin-sample-rate\n\n# specifies the port to use when uploading traces\njaeger-collector-port\n\n# specifies the service name to use for any traces created, Default: nginx\njaeger-service-name\n\n# specifies the sampler to be used when sampling traces.\n# The available samplers are: const, probabilistic, ratelimiting, remote, Default: const\njaeger-sampler-type\n\n# specifies the argument to be passed to the sampler constructor, Default: 1\njaeger-sampler-param", + "title": "Usage" + }, + { + "location": "/user-guide/third-party-addons/opentracing/#examples", + "text": "The following examples show how to deploy and test different distributed tracing systems. These example can be performed\nusing Minikube.", + "title": "Examples" + }, + { + "location": "/user-guide/third-party-addons/opentracing/#zipkin", + "text": "In the rnburn/zipkin-date-server \ngithub repository is an example of a dockerized date service. To install the example and zipkin collector run: kubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/zipkin.yaml\nkubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/deployment.yaml Also we need to configure the NGINX controller configmap with the required values: $ echo ' apiVersion: v1 kind: ConfigMap data: enable-opentracing: \"true\" zipkin-collector-host: zipkin.default.svc.cluster.local metadata: name: nginx-load-balancer-conf namespace: kube-system ' | kubectl replace -f - In the zipkin interface we can see the details:", + "title": "Zipkin" + }, + { + "location": "/user-guide/third-party-addons/opentracing/#jaeger", + "text": "Enable Ingress addon in minikube:\n $ minikube addons enable ingress Add minikube IP to /etc/hosts:\n $ echo \" $( minikube ip ) example.com\" | sudo tee -a /etc/hosts Apply a Basic Service and Ingress Resource:\n ```\n # Create Echoheaders Deployment\n $ kubectl run echoheaders --image=k8s.gcr.io/echoserver:1.4 --replicas=1 --port=8080", + "title": "Jaeger" + }, + { + "location": "/user-guide/third-party-addons/opentracing/#expose-as-a-cluster-ip", + "text": "$ kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x", + "title": "Expose as a Cluster-IP" + }, + { + "location": "/user-guide/third-party-addons/opentracing/#apply-the-ingress-resource", + "text": "$ echo '\n apiVersion: extensions/v1beta1\n kind: Ingress\n metadata:\n name: echo-ingress\n spec:\n rules:\n - host: example.com\n http:\n paths:\n - backend:\n serviceName: echoheaders-x\n servicePort: 80\n path: /echo\n ' | kubectl apply -f -\n``` Enable OpenTracing and set the zipkin-collector-host:\n $ echo ' apiVersion: v1 kind: ConfigMap data: enable-opentracing: \"true\" zipkin-collector-host: zipkin.default.svc.cluster.local jaeger-collector-host: jaeger-collector.default.svc.cluster.local metadata: name: nginx-load-balancer-conf namespace: kube-system ' | kubectl replace -f - Apply the Jaeger All-In-One Template:\n $ kubectl apply -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml Make a few requests to the Service:\n ```\n $ curl example.com/echo -d \"meow\" CLIENT VALUES:\nclient_address=172.17.0.5\ncommand=POST\nreal path=/echo\nquery=nil\nrequest_version=1.1\nrequest_uri=http://example.com:8080/echo SERVER VALUES:\nserver_version=nginx: 1.10.0 - lua: 10001 HEADERS RECEIVED:\naccept= / \nconnection=close\ncontent-length=4\ncontent-type=application/x-www-form-urlencoded\nhost=example.com\nuser-agent=curl/7.54.0\nx-forwarded-for=192.168.99.1\nx-forwarded-host=example.com\nx-forwarded-port=80\nx-forwarded-proto=http\nx-original-uri=/echo\nx-real-ip=192.168.99.1\nx-scheme=http\nBODY:\nmeow\n``` View the Jaeger UI:\n ```\n $ minikube service jaeger-query --url http://192.168.99.100:30183\n``` In the jaeger interface we can see the details:", + "title": "Apply the Ingress Resource" + }, + { + "location": "/examples/", + "text": "Ingress examples\n\u00b6\n\n\nThis directory contains a catalog of examples on how to run, configure and scale Ingress.\n\nPlease review the \nprerequisites\n before trying them.\n\n\n\n\n\n\n\n\nCategory\n\n\nName\n\n\nDescription\n\n\nComplexity Level\n\n\n\n\n\n\n\n\n\n\nApps\n\n\nDocker Registry\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nAuth\n\n\nBasic authentication\n\n\npassword protect your website\n\n\nIntermediate\n\n\n\n\n\n\nAuth\n\n\nClient certificate authentication\n\n\nsecure your website with client certificate authentication\n\n\nIntermediate\n\n\n\n\n\n\nAuth\n\n\nExternal authentication plugin\n\n\ndefer to an external authentication service\n\n\nIntermediate\n\n\n\n\n\n\nAuth\n\n\nOAuth external auth\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nCustomization\n\n\nConfiguration snippets\n\n\ncustomize nginx location configuration using annotations\n\n\nAdvanced\n\n\n\n\n\n\nCustomization\n\n\nCustom configuration\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nCustomization\n\n\nCustom DH parameters for perfect forward secrecy\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nCustomization\n\n\nCustom errors\n\n\nserve custom error pages from the default backend\n\n\nIntermediate\n\n\n\n\n\n\nCustomization\n\n\nCustom headers\n\n\nset custom headers before sending traffic to backends\n\n\nAdvanced\n\n\n\n\n\n\nCustomization\n\n\nCustom upstream check\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nCustomization\n\n\nExternal authentication with response header propagation\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nCustomization\n\n\nSysctl tuning\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nFeatures\n\n\nRewrite\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nFeatures\n\n\nSession stickiness\n\n\nroute requests consistently to the same endpoint\n\n\nAdvanced\n\n\n\n\n\n\nScaling\n\n\nStatic IP\n\n\na single ingress gets a single static IP\n\n\nIntermediate\n\n\n\n\n\n\nTLS\n\n\nMulti TLS certificate termination\n\n\nTODO\n\n\nTODO\n\n\n\n\n\n\nTLS\n\n\nTLS termination\n\n\nTODO\n\n\nTODO", + "title": "Ingress examples" + }, + { + "location": "/examples/#ingress-examples", + "text": "This directory contains a catalog of examples on how to run, configure and scale Ingress. \nPlease review the prerequisites before trying them. Category Name Description Complexity Level Apps Docker Registry TODO TODO Auth Basic authentication password protect your website Intermediate Auth Client certificate authentication secure your website with client certificate authentication Intermediate Auth External authentication plugin defer to an external authentication service Intermediate Auth OAuth external auth TODO TODO Customization Configuration snippets customize nginx location configuration using annotations Advanced Customization Custom configuration TODO TODO Customization Custom DH parameters for perfect forward secrecy TODO TODO Customization Custom errors serve custom error pages from the default backend Intermediate Customization Custom headers set custom headers before sending traffic to backends Advanced Customization Custom upstream check TODO TODO Customization External authentication with response header propagation TODO TODO Customization Sysctl tuning TODO TODO Features Rewrite TODO TODO Features Session stickiness route requests consistently to the same endpoint Advanced Scaling Static IP a single ingress gets a single static IP Intermediate TLS Multi TLS certificate termination TODO TODO TLS TLS termination TODO TODO", + "title": "Ingress examples" + }, + { + "location": "/examples/PREREQUISITES/", + "text": "Prerequisites\n\u00b6\n\n\nMany of the examples in this directory have common prerequisites.\n\n\nTLS certificates\n\u00b6\n\n\nUnless otherwise mentioned, the TLS secret used in examples is a 2048 bit RSA\nkey/cert pair with an arbitrarily chosen hostname, created as follows\n\n\n$\n openssl req -x509 -nodes -days \n365\n -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \n\"/CN=nginxsvc/O=nginxsvc\"\n\n\nGenerating a 2048 bit RSA private key\n\n\n................+++\n\n\n................+++\n\n\nwriting new private key to 'tls.key'\n\n\n-----\n\n\n\n$\n kubectl create secret tls tls-secret --key tls.key --cert tls.crt\n\nsecret \"tls-secret\" created\n\n\n\n\n\n\nCA Authentication\n\u00b6\n\n\nYou can act as your very own CA, or use an existing one. As an exercise / learning, we're going to generate our\nown CA, and also generate a client certificate.\n\n\nThese instructions are based on CoreOS OpenSSL. \nSee live doc.\n\n\nGenerating a CA\n\u00b6\n\n\nFirst of all, you've to generate a CA. This is going to be the one who will sign your client certificates.\nIn real production world, you may face CAs with intermediate certificates, as the following:\n\n\n$\n openssl s_client -connect www.google.com:443\n\n[...]\n\n\n---\n\n\nCertificate chain\n\n\n 0 s:/C=US/ST=California/L=Mountain View/O=Google Inc/CN=www.google.com\n\n\n i:/C=US/O=Google Inc/CN=Google Internet Authority G2\n\n\n 1 s:/C=US/O=Google Inc/CN=Google Internet Authority G2\n\n\n i:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA\n\n\n 2 s:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA\n\n\n i:/C=US/O=Equifax/OU=Equifax Secure Certificate Authority\n\n\n\n\n\n\nTo generate our CA Certificate, we've to run the following commands:\n\n\n$\n openssl genrsa -out ca.key \n2048\n\n\n$\n openssl req -x509 -new -nodes -key ca.key -days \n10000\n -out ca.crt -subj \n\"/CN=example-ca\"\n\n\n\n\n\n\nThis will generate two files: A private key (ca.key) and a public key (ca.crt). This CA is valid for 10000 days.\nThe ca.crt can be used later in the step of creation of CA authentication secret.\n\n\nGenerating the client certificate\n\u00b6\n\n\nThe following steps generate a client certificate signed by the CA generated above. This client can be\nused to authenticate in a tls-auth configured ingress.\n\n\nFirst, we need to generate an 'openssl.cnf' file that will be used while signing the keys:\n\n\n[req]\n\n\nreq_extensions = v3_req\n\n\ndistinguished_name = req_distinguished_name\n\n\n[req_distinguished_name]\n\n\n[ v3_req ]\n\n\nbasicConstraints = CA:FALSE\n\n\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n\n\n\n\nThen, a user generates his very own private key (that he needs to keep secret)\nand a CSR (Certificate Signing Request) that will be sent to the CA to sign and generate a certificate.\n\n\n$\n openssl genrsa -out client1.key \n2048\n\n\n$\n openssl req -new -key client1.key -out client1.csr -subj \n\"/CN=client1\"\n -config openssl.cnf\n\n\n\n\n\nAs the CA receives the generated 'client1.csr' file, it signs it and generates a client.crt certificate:\n\n\n$\n openssl x509 -req -in client1.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client1.crt -days \n365\n -extensions v3_req -extfile openssl.cnf\n\n\n\n\n\nThen, you'll have 3 files: the client.key (user's private key), client.crt (user's public key) and client.csr (disposable CSR).\n\n\nCreating the CA Authentication secret\n\u00b6\n\n\nIf you're using the CA Authentication feature, you need to generate a secret containing \nall the authorized CAs. You must download them from your CA site in PEM format (like the following):\n\n\n-----BEGIN CERTIFICATE-----\n[....]\n-----END CERTIFICATE-----\n\n\n\n\n\nYou can have as many certificates as you want. If they're in the binary DER format, \nyou can convert them as the following:\n\n\n$\n openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem\n\n\n\n\n\nThen, you've to concatenate them all in only one file, named 'ca.crt' as the following:\n\n\n$\n cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt\n\n\n\n\n\nThe final step is to create a secret with the content of this file. This secret is going to be used in \nthe TLS Auth directive:\n\n\n$\n kubectl create secret generic caingress --namespace\n=\ndefault --from-file\n=\nca.crt\n=\n\n\n\n\n\n\nNote:\n You can also generate the CA Authentication Secret along with the TLS Secret by using:\n\n\n$\n kubectl create secret generic caingress --namespace\n=\ndefault --from-file\n=\nca.crt\n=\n --from-file\n=\ntls.crt\n=\n --from-file\n=\ntls.key\n=\n\n\n\n\n\n\nTest HTTP Service\n\u00b6\n\n\nAll examples that require a test HTTP Service use the standard http-svc pod,\nwhich you can deploy as follows\n\n\n$\n kubectl create -f http-svc.yaml\n\nservice \"http-svc\" created\n\n\nreplicationcontroller \"http-svc\" created\n\n\n\n$\n kubectl get po\n\nNAME READY STATUS RESTARTS AGE\n\n\nhttp-svc-p1t3t 1/1 Running 0 1d\n\n\n\n$\n kubectl get svc\n\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n\n\nhttp-svc 10.0.122.116 80:30301/TCP 1d\n\n\n\n\n\n\nYou can test that the HTTP Service works by exposing it temporarily\n\n\n$\n kubectl patch svc http-svc -p \n'{\"spec\":{\"type\": \"LoadBalancer\"}}'\n\n\n\"http-svc\" patched\n\n\n\n$\n kubectl get svc http-svc\n\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n\n\nhttp-svc 10.0.122.116 80:30301/TCP 1d\n\n\n\n$\n kubectl describe svc http-svc\n\nName: http-svc\n\n\nNamespace: default\n\n\nLabels: app=http-svc\n\n\nSelector: app=http-svc\n\n\nType: LoadBalancer\n\n\nIP: 10.0.122.116\n\n\nLoadBalancer Ingress: 108.59.87.136\n\n\nPort: http 80/TCP\n\n\nNodePort: http 30301/TCP\n\n\nEndpoints: 10.180.1.6:8080\n\n\nSession Affinity: None\n\n\nEvents:\n\n\n FirstSeen LastSeen Count From SubObjectPath Type Reason Message\n\n\n --------- -------- ----- ---- ------------- -------- ------ -------\n\n\n 1m 1m 1 {service-controller } Normal Type ClusterIP -> LoadBalancer\n\n\n 1m 1m 1 {service-controller } Normal CreatingLoadBalancer Creating load balancer\n\n\n 16s 16s 1 {service-controller } Normal CreatedLoadBalancer Created load balancer\n\n\n\n$\n curl \n108\n.59.87.126\n\nCLIENT VALUES:\n\n\nclient_address=10.240.0.3\n\n\ncommand=GET\n\n\nreal path=/\n\n\nquery=nil\n\n\nrequest_version=1.1\n\n\nrequest_uri=http://108.59.87.136:8080/\n\n\n\nSERVER VALUES:\n\n\nserver_version=nginx: 1.9.11 - lua: 10001\n\n\n\nHEADERS RECEIVED:\n\n\naccept=*/*\n\n\nhost=108.59.87.136\n\n\nuser-agent=curl/7.46.0\n\n\nBODY:\n\n\n-no body in request-\n\n\n\n$\n kubectl patch svc http-svc -p \n'{\"spec\":{\"type\": \"NodePort\"}}'\n\n\n\"http-svc\" patched", + "title": "Prerequisites" + }, + { + "location": "/examples/PREREQUISITES/#prerequisites", + "text": "Many of the examples in this directory have common prerequisites.", + "title": "Prerequisites" + }, + { + "location": "/examples/PREREQUISITES/#tls-certificates", + "text": "Unless otherwise mentioned, the TLS secret used in examples is a 2048 bit RSA\nkey/cert pair with an arbitrarily chosen hostname, created as follows $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \"/CN=nginxsvc/O=nginxsvc\" Generating a 2048 bit RSA private key ................+++ ................+++ writing new private key to 'tls.key' ----- $ kubectl create secret tls tls-secret --key tls.key --cert tls.crt secret \"tls-secret\" created", + "title": "TLS certificates" + }, + { + "location": "/examples/PREREQUISITES/#ca-authentication", + "text": "You can act as your very own CA, or use an existing one. As an exercise / learning, we're going to generate our\nown CA, and also generate a client certificate. These instructions are based on CoreOS OpenSSL. See live doc.", + "title": "CA Authentication" + }, + { + "location": "/examples/PREREQUISITES/#generating-a-ca", + "text": "First of all, you've to generate a CA. This is going to be the one who will sign your client certificates.\nIn real production world, you may face CAs with intermediate certificates, as the following: $ openssl s_client -connect www.google.com:443 [...] --- Certificate chain 0 s:/C=US/ST=California/L=Mountain View/O=Google Inc/CN=www.google.com i:/C=US/O=Google Inc/CN=Google Internet Authority G2 1 s:/C=US/O=Google Inc/CN=Google Internet Authority G2 i:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA 2 s:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA i:/C=US/O=Equifax/OU=Equifax Secure Certificate Authority To generate our CA Certificate, we've to run the following commands: $ openssl genrsa -out ca.key 2048 $ openssl req -x509 -new -nodes -key ca.key -days 10000 -out ca.crt -subj \"/CN=example-ca\" This will generate two files: A private key (ca.key) and a public key (ca.crt). This CA is valid for 10000 days.\nThe ca.crt can be used later in the step of creation of CA authentication secret.", + "title": "Generating a CA" + }, + { + "location": "/examples/PREREQUISITES/#generating-the-client-certificate", + "text": "The following steps generate a client certificate signed by the CA generated above. This client can be\nused to authenticate in a tls-auth configured ingress. First, we need to generate an 'openssl.cnf' file that will be used while signing the keys: [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment Then, a user generates his very own private key (that he needs to keep secret)\nand a CSR (Certificate Signing Request) that will be sent to the CA to sign and generate a certificate. $ openssl genrsa -out client1.key 2048 $ openssl req -new -key client1.key -out client1.csr -subj \"/CN=client1\" -config openssl.cnf As the CA receives the generated 'client1.csr' file, it signs it and generates a client.crt certificate: $ openssl x509 -req -in client1.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client1.crt -days 365 -extensions v3_req -extfile openssl.cnf Then, you'll have 3 files: the client.key (user's private key), client.crt (user's public key) and client.csr (disposable CSR).", + "title": "Generating the client certificate" + }, + { + "location": "/examples/PREREQUISITES/#creating-the-ca-authentication-secret", + "text": "If you're using the CA Authentication feature, you need to generate a secret containing \nall the authorized CAs. You must download them from your CA site in PEM format (like the following): -----BEGIN CERTIFICATE-----\n[....]\n-----END CERTIFICATE----- You can have as many certificates as you want. If they're in the binary DER format, \nyou can convert them as the following: $ openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem Then, you've to concatenate them all in only one file, named 'ca.crt' as the following: $ cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt The final step is to create a secret with the content of this file. This secret is going to be used in \nthe TLS Auth directive: $ kubectl create secret generic caingress --namespace = default --from-file = ca.crt = Note: You can also generate the CA Authentication Secret along with the TLS Secret by using: $ kubectl create secret generic caingress --namespace = default --from-file = ca.crt = --from-file = tls.crt = --from-file = tls.key = ", + "title": "Creating the CA Authentication secret" + }, + { + "location": "/examples/PREREQUISITES/#test-http-service", + "text": "All examples that require a test HTTP Service use the standard http-svc pod,\nwhich you can deploy as follows $ kubectl create -f http-svc.yaml service \"http-svc\" created replicationcontroller \"http-svc\" created $ kubectl get po NAME READY STATUS RESTARTS AGE http-svc-p1t3t 1/1 Running 0 1d $ kubectl get svc NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE http-svc 10.0.122.116 80:30301/TCP 1d You can test that the HTTP Service works by exposing it temporarily $ kubectl patch svc http-svc -p '{\"spec\":{\"type\": \"LoadBalancer\"}}' \"http-svc\" patched $ kubectl get svc http-svc NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE http-svc 10.0.122.116 80:30301/TCP 1d $ kubectl describe svc http-svc Name: http-svc Namespace: default Labels: app=http-svc Selector: app=http-svc Type: LoadBalancer IP: 10.0.122.116 LoadBalancer Ingress: 108.59.87.136 Port: http 80/TCP NodePort: http 30301/TCP Endpoints: 10.180.1.6:8080 Session Affinity: None Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 1m 1m 1 {service-controller } Normal Type ClusterIP -> LoadBalancer 1m 1m 1 {service-controller } Normal CreatingLoadBalancer Creating load balancer 16s 16s 1 {service-controller } Normal CreatedLoadBalancer Created load balancer $ curl 108 .59.87.126 CLIENT VALUES: client_address=10.240.0.3 command=GET real path=/ query=nil request_version=1.1 request_uri=http://108.59.87.136:8080/ SERVER VALUES: server_version=nginx: 1.9.11 - lua: 10001 HEADERS RECEIVED: accept=*/* host=108.59.87.136 user-agent=curl/7.46.0 BODY: -no body in request- $ kubectl patch svc http-svc -p '{\"spec\":{\"type\": \"NodePort\"}}' \"http-svc\" patched", + "title": "Test HTTP Service" + }, + { + "location": "/examples/affinity/cookie/README/", + "text": "Sticky Session\n\u00b6\n\n\nThis example demonstrates how to achieve session affinity using cookies\n\n\nDeployment\n\u00b6\n\n\nSession stickiness is achieved through 3 annotations on the Ingress, as shown in the \nexample\n.\n\n\n\n\n\n\n\n\nName\n\n\nDescription\n\n\nValues\n\n\n\n\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/affinity\n\n\nSets the affinity type\n\n\nstring (in NGINX only \ncookie\n is possible\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/session-cookie-name\n\n\nName of the cookie that will be used\n\n\nstring (default to INGRESSCOOKIE)\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/session-cookie-hash\n\n\nType of hash that will be used in cookie value\n\n\nsha1/md5/index\n\n\n\n\n\n\n\n\nYou can create the ingress to test this\n\n\nkubectl create -f ingress.yaml\n\n\n\n\n\n\nValidation\n\u00b6\n\n\nYou can confirm that the Ingress works.\n\n\n$\n kubectl describe ing nginx-test\n\nName: nginx-test\n\n\nNamespace: default\n\n\nAddress: \n\n\nDefault backend: default-http-backend:80 (10.180.0.4:8080,10.240.0.2:8080)\n\n\nRules:\n\n\n Host Path Backends\n\n\n ---- ---- --------\n\n\n stickyingress.example.com \n\n\n / nginx-service:80 ()\n\n\nAnnotations:\n\n\n affinity: cookie\n\n\n session-cookie-hash: sha1\n\n\n session-cookie-name: INGRESSCOOKIE\n\n\nEvents:\n\n\n FirstSeen LastSeen Count From SubObjectPath Type Reason Message\n\n\n --------- -------- ----- ---- ------------- -------- ------ -------\n\n\n 7s 7s 1 {nginx-ingress-controller } Normal CREATE default/nginx-test\n\n\n\n\n$\n curl -I http://stickyingress.example.com\n\nHTTP/1.1 200 OK\n\n\nServer: nginx/1.11.9\n\n\nDate: Fri, 10 Feb 2017 14:11:12 GMT\n\n\nContent-Type: text/html\n\n\nContent-Length: 612\n\n\nConnection: keep-alive\n\n\nSet-Cookie: INGRESSCOOKIE=a9907b79b248140b56bb13723f72b67697baac3d; Path=/; HttpOnly\n\n\nLast-Modified: Tue, 24 Jan 2017 14:02:19 GMT\n\n\nETag: \"58875e6b-264\"\n\n\nAccept-Ranges: bytes\n\n\n\n\n\n\nIn the example above, you can see a line containing the 'Set-Cookie: INGRESSCOOKIE' setting the right defined stickiness cookie.\nThis cookie is created by NGINX containing the hash of the used upstream in that request. \nIf the user changes this cookie, NGINX creates a new one and redirect the user to another upstream.\n\n\nIf the backend pool grows up NGINX will keep sending the requests through the same server of the first request, even if it's overloaded.\n\n\nWhen the backend server is removed, the requests are then re-routed to another upstream server and NGINX creates a new cookie, as the previous hash became invalid.\n\n\nWhen you have more than one Ingress Object pointing to the same Service, but one containing affinity configuration and other don't, the first created Ingress will be used. \nThis means that you can face the situation that you've configured Session Affinity in one Ingress and it doesn't reflects in NGINX configuration, because there is another Ingress Object pointing to the same service that doesn't configure this.", + "title": "Sticky Session" + }, + { + "location": "/examples/affinity/cookie/README/#sticky-session", + "text": "This example demonstrates how to achieve session affinity using cookies", + "title": "Sticky Session" + }, + { + "location": "/examples/affinity/cookie/README/#deployment", + "text": "Session stickiness is achieved through 3 annotations on the Ingress, as shown in the example . Name Description Values nginx.ingress.kubernetes.io/affinity Sets the affinity type string (in NGINX only cookie is possible nginx.ingress.kubernetes.io/session-cookie-name Name of the cookie that will be used string (default to INGRESSCOOKIE) nginx.ingress.kubernetes.io/session-cookie-hash Type of hash that will be used in cookie value sha1/md5/index You can create the ingress to test this kubectl create -f ingress.yaml", + "title": "Deployment" + }, + { + "location": "/examples/affinity/cookie/README/#validation", + "text": "You can confirm that the Ingress works. $ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: Default backend: default-http-backend:80 (10.180.0.4:8080,10.240.0.2:8080) Rules: Host Path Backends ---- ---- -------- stickyingress.example.com / nginx-service:80 () Annotations: affinity: cookie session-cookie-hash: sha1 session-cookie-name: INGRESSCOOKIE Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 7s 7s 1 {nginx-ingress-controller } Normal CREATE default/nginx-test $ curl -I http://stickyingress.example.com HTTP/1.1 200 OK Server: nginx/1.11.9 Date: Fri, 10 Feb 2017 14:11:12 GMT Content-Type: text/html Content-Length: 612 Connection: keep-alive Set-Cookie: INGRESSCOOKIE=a9907b79b248140b56bb13723f72b67697baac3d; Path=/; HttpOnly Last-Modified: Tue, 24 Jan 2017 14:02:19 GMT ETag: \"58875e6b-264\" Accept-Ranges: bytes In the example above, you can see a line containing the 'Set-Cookie: INGRESSCOOKIE' setting the right defined stickiness cookie.\nThis cookie is created by NGINX containing the hash of the used upstream in that request. \nIf the user changes this cookie, NGINX creates a new one and redirect the user to another upstream. If the backend pool grows up NGINX will keep sending the requests through the same server of the first request, even if it's overloaded. When the backend server is removed, the requests are then re-routed to another upstream server and NGINX creates a new cookie, as the previous hash became invalid. When you have more than one Ingress Object pointing to the same Service, but one containing affinity configuration and other don't, the first created Ingress will be used. \nThis means that you can face the situation that you've configured Session Affinity in one Ingress and it doesn't reflects in NGINX configuration, because there is another Ingress Object pointing to the same service that doesn't configure this.", + "title": "Validation" + }, + { + "location": "/examples/auth/basic/README/", + "text": "Basic Authentication\n\u00b6\n\n\nThis example shows how to add authentication in a Ingress rule using a secret that contains a file generated with \nhtpasswd\n.\nIt's important the file generated is named \nauth\n (actually - that the secret has a key \ndata.auth\n), otherwise the ingress-controller returns a 503.\n\n\n$\n htpasswd -c auth foo\n\nNew password: \n\n\nNew password:\n\n\nRe-type new password:\n\n\nAdding password for user foo\n\n\n\n\n\n\n$\n kubectl create secret generic basic-auth --from-file\n=\nauth\n\nsecret \"basic-auth\" created\n\n\n\n\n\n\n$\n kubectl get secret basic-auth -o yaml\n\napiVersion: v1\n\n\ndata:\n\n\n auth: Zm9vOiRhcHIxJE9GRzNYeWJwJGNrTDBGSERBa29YWUlsSDkuY3lzVDAK\n\n\nkind: Secret\n\n\nmetadata:\n\n\n name: basic-auth\n\n\n namespace: default\n\n\ntype: Opaque\n\n\n\n\n\n\necho \"\n\n\napiVersion: extensions/v1beta1\n\n\nkind: Ingress\n\n\nmetadata:\n\n\n name: ingress-with-auth\n\n\n annotations:\n\n\n #\n \ntype\n of authentication\n\n nginx.ingress.kubernetes.io/auth-type: basic\n\n\n #\n name of the secret that contains the user/password definitions\n\n nginx.ingress.kubernetes.io/auth-secret: basic-auth\n\n\n #\n message to display with an appropriate context why the authentication is required\n\n nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - foo'\n\n\nspec:\n\n\n rules:\n\n\n - host: foo.bar.com\n\n\n http:\n\n\n paths:\n\n\n - path: /\n\n\n backend:\n\n\n serviceName: http-svc\n\n\n servicePort: 80\n\n\n\" | kubectl create -f -\n\n\n\n\n\n\n$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com'\n* Trying 10.2.29.4...\n* Connected to 10.2.29.4 (10.2.29.4) port 80 (#0)\n> GET / HTTP/1.1\n> Host: foo.bar.com\n> User-Agent: curl/7.43.0\n> Accept: */*\n>\n\n< HTTP\n/1.1\n \n401\n \nUnauthorized\n\n\n<\n \nServer:\n \nnginx/1.10.0\n\n\n<\n \nDate:\n \nWed,\n \n11\n \nMay\n \n2016\n \n05:27:23\n \nGMT\n\n\n<\n \nContent-Type:\n \ntext/html\n\n\n<\n \nContent-Length:\n \n195\n\n\n<\n \nConnection:\n \nkeep-alive\n\n\n<\n \nWWW-Authenticate:\n \nBasic\n \nrealm=\n\"Authentication Required - foo\"\n\n\n<\n\n\n\n\n\n\n401 Authorization Required\n\n\n\n\n\n\n

    \n401 Authorization Required\n

    \n\n\n
    \nnginx/1.10.0\n
    \n\n\n\n\n\n\n\n* Connection #0 to host 10.2.29.4 left intact\n\n\n\n\n\n$ curl -v http://10.2.29.4/ -H \n'Host: foo.bar.com'\n -u \n'foo:bar'\n\n* Trying \n10\n.2.29.4...\n* Connected to \n10\n.2.29.4 \n(\n10\n.2.29.4\n)\n port \n80\n \n(\n#0)\n\n* Server auth using Basic with user \n'foo'\n\n> GET / HTTP/1.1\n> Host: foo.bar.com\n> Authorization: Basic \nZm9vOmJhcg\n==\n\n> User-Agent: curl/7.43.0\n> Accept: */*\n>\n< HTTP/1.1 \n200\n OK\n< Server: nginx/1.10.0\n< Date: Wed, \n11\n May \n2016\n \n06\n:05:26 GMT\n< Content-Type: text/plain\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n< Vary: Accept-Encoding\n<\nCLIENT VALUES:\n\nclient_address\n=\n10\n.2.29.4\n\ncommand\n=\nGET\nreal \npath\n=\n/\n\nquery\n=\nnil\n\nrequest_version\n=\n1\n.1\n\nrequest_uri\n=\nhttp://foo.bar.com:8080/\n\nSERVER VALUES:\n\nserver_version\n=\nnginx: \n1\n.9.11 - lua: \n10001\n\n\nHEADERS RECEIVED:\n\naccept\n=\n*/*\n\nauthorization\n=\nBasic \nZm9vOmJhcg\n==\n\n\nconnection\n=\nclose\n\nhost\n=\nfoo.bar.com\nuser-agent\n=\ncurl/7.43.0\nx-forwarded-for\n=\n10\n.2.29.1\nx-forwarded-host\n=\nfoo.bar.com\nx-forwarded-port\n=\n80\n\nx-forwarded-proto\n=\nhttp\nx-real-ip\n=\n10\n.2.29.1\nBODY:\n* Connection \n#0 to host 10.2.29.4 left intact\n\n-no body in request-", + "title": "Basic Authentication" + }, + { + "location": "/examples/auth/basic/README/#basic-authentication", + "text": "This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with htpasswd .\nIt's important the file generated is named auth (actually - that the secret has a key data.auth ), otherwise the ingress-controller returns a 503. $ htpasswd -c auth foo New password: New password: Re-type new password: Adding password for user foo $ kubectl create secret generic basic-auth --from-file = auth secret \"basic-auth\" created $ kubectl get secret basic-auth -o yaml apiVersion: v1 data: auth: Zm9vOiRhcHIxJE9GRzNYeWJwJGNrTDBGSERBa29YWUlsSDkuY3lzVDAK kind: Secret metadata: name: basic-auth namespace: default type: Opaque echo \" apiVersion: extensions/v1beta1 kind: Ingress metadata: name: ingress-with-auth annotations: # type of authentication nginx.ingress.kubernetes.io/auth-type: basic # name of the secret that contains the user/password definitions nginx.ingress.kubernetes.io/auth-secret: basic-auth # message to display with an appropriate context why the authentication is required nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - foo' spec: rules: - host: foo.bar.com http: paths: - path: / backend: serviceName: http-svc servicePort: 80 \" | kubectl create -f - $ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com'\n* Trying 10.2.29.4...\n* Connected to 10.2.29.4 (10.2.29.4) port 80 (#0)\n> GET / HTTP/1.1\n> Host: foo.bar.com\n> User-Agent: curl/7.43.0\n> Accept: */*\n> < HTTP /1.1 401 Unauthorized < Server: nginx/1.10.0 < Date: Wed, 11 May 2016 05:27:23 GMT < Content-Type: text/html < Content-Length: 195 < Connection: keep-alive < WWW-Authenticate: Basic realm= \"Authentication Required - foo\" < 401 Authorization Required

    401 Authorization Required


    nginx/1.10.0
    \n* Connection #0 to host 10.2.29.4 left intact $ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' -u 'foo:bar' \n* Trying 10 .2.29.4...\n* Connected to 10 .2.29.4 ( 10 .2.29.4 ) port 80 ( #0) \n* Server auth using Basic with user 'foo' \n> GET / HTTP/1.1\n> Host: foo.bar.com\n> Authorization: Basic Zm9vOmJhcg == \n> User-Agent: curl/7.43.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Server: nginx/1.10.0\n< Date: Wed, 11 May 2016 06 :05:26 GMT\n< Content-Type: text/plain\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n< Vary: Accept-Encoding\n<\nCLIENT VALUES: client_address = 10 .2.29.4 command = GET\nreal path = / query = nil request_version = 1 .1 request_uri = http://foo.bar.com:8080/\n\nSERVER VALUES: server_version = nginx: 1 .9.11 - lua: 10001 \n\nHEADERS RECEIVED: accept = */* authorization = Basic Zm9vOmJhcg == connection = close host = foo.bar.com\nuser-agent = curl/7.43.0\nx-forwarded-for = 10 .2.29.1\nx-forwarded-host = foo.bar.com\nx-forwarded-port = 80 \nx-forwarded-proto = http\nx-real-ip = 10 .2.29.1\nBODY:\n* Connection #0 to host 10.2.29.4 left intact \n-no body in request-", + "title": "Basic Authentication" + }, + { + "location": "/examples/auth/client-certs/README/", + "text": "Client Certificate Authentication\n\u00b6\n\n\nIt is possible to enable Client Certificate Authentication using additional annotations in the Ingress.\n\n\nSetup instructions\n\u00b6\n\n\n\n\n\n\nCreate a file named \nca.crt\n containing the trusted certificate authority chain (all ca certificates in PEM format) to verify client certificates. \n\n\n\n\n\n\nCreate a secret from this file:\n\nkubectl create secret generic auth-tls-chain --from-file=ca.crt --namespace=default\n\n\n\n\n\n\nAdd the annotations as provided in the \ningress.yaml\n example to your ingress object.", + "title": "Client Certificate Authentication" + }, + { + "location": "/examples/auth/client-certs/README/#client-certificate-authentication", + "text": "It is possible to enable Client Certificate Authentication using additional annotations in the Ingress.", + "title": "Client Certificate Authentication" + }, + { + "location": "/examples/auth/client-certs/README/#setup-instructions", + "text": "Create a file named ca.crt containing the trusted certificate authority chain (all ca certificates in PEM format) to verify client certificates. Create a secret from this file: kubectl create secret generic auth-tls-chain --from-file=ca.crt --namespace=default Add the annotations as provided in the ingress.yaml example to your ingress object.", + "title": "Setup instructions" + }, + { + "location": "/examples/auth/external-auth/README/", + "text": "External Basic Authentication\n\u00b6\n\n\nExample 1:\n\u00b6\n\n\nUse an external service (Basic Auth) located in \nhttps://httpbin.org\n \n\n\n$ kubectl create -f ingress.yaml\ningress \n\"external-auth\"\n created\n\n$ kubectl get ing external-auth\nNAME HOSTS ADDRESS PORTS AGE\nexternal-auth external-auth-01.sample.com \n172\n.17.4.99 \n80\n 13s\n\n$ kubectl get ing external-auth -o yaml\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n annotations:\n nginx.ingress.kubernetes.io/auth-url: https://httpbin.org/basic-auth/user/passwd\n creationTimestamp: \n2016\n-10-03T13:50:35Z\n generation: \n1\n\n name: external-auth\n namespace: default\n resourceVersion: \n\"2068378\"\n\n selfLink: /apis/extensions/v1beta1/namespaces/default/ingresses/external-auth\n uid: 5c388f1d-8970-11e6-9004-080027d2dc94\nspec:\n rules:\n - host: external-auth-01.sample.com\n http:\n paths:\n - backend:\n serviceName: http-svc\n servicePort: \n80\n\n path: /\nstatus:\n loadBalancer:\n ingress:\n - ip: \n172\n.17.4.99\n$\n\n\n\n\n\nTest 1: no username/password (expect code 401)\n\n\n$\n curl -k http://172.17.4.99 -v -H \n'Host: external-auth-01.sample.com'\n\n\n* Rebuilt URL to: http://172.17.4.99/\n\n\n* Trying 172.17.4.99...\n\n\n* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0)\n\n\n>\n GET / HTTP/1.1\n\n>\n Host: external-auth-01.sample.com\n\n>\n User-Agent: curl/7.50.1\n\n>\n Accept: */*\n\n>\n\n\n< HTTP/1.1 401 Unauthorized\n\n\n< Server: nginx/1.11.3\n\n\n< Date: Mon, 03 Oct 2016 14:52:08 GMT\n\n\n< Content-Type: text/html\n\n\n< Content-Length: 195\n\n\n< Connection: keep-alive\n\n\n< WWW-Authenticate: Basic realm=\"Fake Realm\"\n\n\n<\n\n\n\n\n\n401 Authorization Required\n\n\n\n\n\n

    401 Authorization Required

    \n\n\n
    nginx/1.11.3
    \n\n\n\n\n\n\n\n\n* Connection #0 to host 172.17.4.99 left intact\n\n\n\n\n\n\nTest 2: valid username/password (expect code 200)\n\n\n$ curl -k http://172.17.4.99 -v -H \n'Host: external-auth-01.sample.com'\n -u \n'user:passwd'\n\n* Rebuilt URL to: http://172.17.4.99/\n* Trying \n172\n.17.4.99...\n* Connected to \n172\n.17.4.99 \n(\n172\n.17.4.99\n)\n port \n80\n \n(\n#0)\n\n* Server auth using Basic with user \n'user'\n\n> GET / HTTP/1.1\n> Host: external-auth-01.sample.com\n> Authorization: Basic \ndXNlcjpwYXNzd2Q\n=\n\n> User-Agent: curl/7.50.1\n> Accept: */*\n>\n< HTTP/1.1 \n200\n OK\n< Server: nginx/1.11.3\n< Date: Mon, \n03\n Oct \n2016\n \n14\n:52:50 GMT\n< Content-Type: text/plain\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n<\nCLIENT VALUES:\n\nclient_address\n=\n10\n.2.60.2\n\ncommand\n=\nGET\nreal \npath\n=\n/\n\nquery\n=\nnil\n\nrequest_version\n=\n1\n.1\n\nrequest_uri\n=\nhttp://external-auth-01.sample.com:8080/\n\nSERVER VALUES:\n\nserver_version\n=\nnginx: \n1\n.9.11 - lua: \n10001\n\n\nHEADERS RECEIVED:\n\naccept\n=\n*/*\n\nauthorization\n=\nBasic \ndXNlcjpwYXNzd2Q\n=\n\n\nconnection\n=\nclose\n\nhost\n=\nexternal-auth-01.sample.com\nuser-agent\n=\ncurl/7.50.1\nx-forwarded-for\n=\n10\n.2.60.1\nx-forwarded-host\n=\nexternal-auth-01.sample.com\nx-forwarded-port\n=\n80\n\nx-forwarded-proto\n=\nhttp\nx-real-ip\n=\n10\n.2.60.1\nBODY:\n* Connection \n#0 to host 172.17.4.99 left intact\n\n-no body in request-\n\n\n\n\n\nTest 3: invalid username/password (expect code 401)\n\n\ncurl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user'\n* Rebuilt URL to: http://172.17.4.99/\n* Trying 172.17.4.99...\n* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0)\n* Server auth using Basic with user 'user'\n> GET / HTTP/1.1\n> Host: external-auth-01.sample.com\n> Authorization: Basic dXNlcjp1c2Vy\n> User-Agent: curl/7.50.1\n> Accept: */*\n>\n\n< HTTP\n/1.1\n \n401\n \nUnauthorized\n\n\n<\n \nServer:\n \nnginx/1.11.3\n\n\n<\n \nDate:\n \nMon,\n \n03\n \nOct\n \n2016\n \n14:53:04\n \nGMT\n\n\n<\n \nContent-Type:\n \ntext/html\n\n\n<\n \nContent-Length:\n \n195\n\n\n<\n \nConnection:\n \nkeep-alive\n\n\n*\n \nAuthentication\n \nproblem.\n \nIgnoring\n \nthis.\n\n\n<\n \nWWW-Authenticate:\n \nBasic\n \nrealm=\n\"Fake Realm\"\n\n\n<\n\n\n\n\n\n\n401 Authorization Required\n\n\n\n\n\n\n

    \n401 Authorization Required\n

    \n\n\n
    \nnginx/1.11.3\n
    \n\n\n\n\n\n\n\n* Connection #0 to host 172.17.4.99 left intact", + "title": "External Basic Authentication" + }, + { + "location": "/examples/auth/external-auth/README/#external-basic-authentication", + "text": "", + "title": "External Basic Authentication" + }, + { + "location": "/examples/auth/external-auth/README/#example-1", + "text": "Use an external service (Basic Auth) located in https://httpbin.org $ kubectl create -f ingress.yaml\ningress \"external-auth\" created\n\n$ kubectl get ing external-auth\nNAME HOSTS ADDRESS PORTS AGE\nexternal-auth external-auth-01.sample.com 172 .17.4.99 80 13s\n\n$ kubectl get ing external-auth -o yaml\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n annotations:\n nginx.ingress.kubernetes.io/auth-url: https://httpbin.org/basic-auth/user/passwd\n creationTimestamp: 2016 -10-03T13:50:35Z\n generation: 1 \n name: external-auth\n namespace: default\n resourceVersion: \"2068378\" \n selfLink: /apis/extensions/v1beta1/namespaces/default/ingresses/external-auth\n uid: 5c388f1d-8970-11e6-9004-080027d2dc94\nspec:\n rules:\n - host: external-auth-01.sample.com\n http:\n paths:\n - backend:\n serviceName: http-svc\n servicePort: 80 \n path: /\nstatus:\n loadBalancer:\n ingress:\n - ip: 172 .17.4.99\n$ Test 1: no username/password (expect code 401) $ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) > GET / HTTP/1.1 > Host: external-auth-01.sample.com > User-Agent: curl/7.50.1 > Accept: */* > < HTTP/1.1 401 Unauthorized < Server: nginx/1.11.3 < Date: Mon, 03 Oct 2016 14:52:08 GMT < Content-Type: text/html < Content-Length: 195 < Connection: keep-alive < WWW-Authenticate: Basic realm=\"Fake Realm\" < 401 Authorization Required

    401 Authorization Required


    nginx/1.11.3
    * Connection #0 to host 172.17.4.99 left intact Test 2: valid username/password (expect code 200) $ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:passwd' \n* Rebuilt URL to: http://172.17.4.99/\n* Trying 172 .17.4.99...\n* Connected to 172 .17.4.99 ( 172 .17.4.99 ) port 80 ( #0) \n* Server auth using Basic with user 'user' \n> GET / HTTP/1.1\n> Host: external-auth-01.sample.com\n> Authorization: Basic dXNlcjpwYXNzd2Q = \n> User-Agent: curl/7.50.1\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Server: nginx/1.11.3\n< Date: Mon, 03 Oct 2016 14 :52:50 GMT\n< Content-Type: text/plain\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n<\nCLIENT VALUES: client_address = 10 .2.60.2 command = GET\nreal path = / query = nil request_version = 1 .1 request_uri = http://external-auth-01.sample.com:8080/\n\nSERVER VALUES: server_version = nginx: 1 .9.11 - lua: 10001 \n\nHEADERS RECEIVED: accept = */* authorization = Basic dXNlcjpwYXNzd2Q = connection = close host = external-auth-01.sample.com\nuser-agent = curl/7.50.1\nx-forwarded-for = 10 .2.60.1\nx-forwarded-host = external-auth-01.sample.com\nx-forwarded-port = 80 \nx-forwarded-proto = http\nx-real-ip = 10 .2.60.1\nBODY:\n* Connection #0 to host 172.17.4.99 left intact \n-no body in request- Test 3: invalid username/password (expect code 401) curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user'\n* Rebuilt URL to: http://172.17.4.99/\n* Trying 172.17.4.99...\n* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0)\n* Server auth using Basic with user 'user'\n> GET / HTTP/1.1\n> Host: external-auth-01.sample.com\n> Authorization: Basic dXNlcjp1c2Vy\n> User-Agent: curl/7.50.1\n> Accept: */*\n> < HTTP /1.1 401 Unauthorized < Server: nginx/1.11.3 < Date: Mon, 03 Oct 2016 14:53:04 GMT < Content-Type: text/html < Content-Length: 195 < Connection: keep-alive * Authentication problem. Ignoring this. < WWW-Authenticate: Basic realm= \"Fake Realm\" < 401 Authorization Required

    401 Authorization Required


    nginx/1.11.3
    \n* Connection #0 to host 172.17.4.99 left intact", + "title": "Example 1:" + }, + { + "location": "/examples/auth/oauth-external-auth/README/", + "text": "External OAUTH Authentication\n\u00b6\n\n\nOverview\n\u00b6\n\n\nThe \nauth-url\n and \nauth-signin\n annotations allow you to use an external\nauthentication provider to protect your Ingress resources.\n\n\n\n\nImportant\n\n\nThis annotation requires \nnginx-ingress-controller v0.9.0\n or greater.)\n\n\n\n\nKey Detail\n\u00b6\n\n\nThis functionality is enabled by deploying multiple Ingress objects for a single host.\nOne Ingress object has no special annotations and handles authentication.\n\n\nOther Ingress objects can then be annotated in such a way that require the user to\nauthenticate against the first Ingress's endpoint, and can redirect \n401\ns to the\nsame endpoint.\n\n\nSample:\n\n\n...\n\n\nmetadata\n:\n\n \nname\n:\n \napplication\n\n \nannotations\n:\n\n \nnginx.ingress.kubernetes.io/auth-url\n:\n \n\"https://$host/oauth2/auth\"\n\n \nnginx.ingress.kubernetes.io/auth-signin\n:\n \n\"https://$host/oauth2/start?rd=$escaped_request_uri\"\n\n\n...\n\n\n\n\n\n\nExample: OAuth2 Proxy + Kubernetes-Dashboard\n\u00b6\n\n\nThis example will show you how to deploy \noauth2_proxy\n\ninto a Kubernetes cluster and use it to protect the Kubernetes Dashboard using github as oAuth2 provider\n\n\nPrepare\n\u00b6\n\n\n\n\nInstall the kubernetes dashboard\n\n\n\n\nkubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.5.0.yaml\n\n\n\n\n\n\n\n\nCreate a \ncustom Github OAuth application\n\n\n\n\n\n\n\n\nHomepage URL is the FQDN in the Ingress rule, like \nhttps://foo.bar.com\n\n\nAuthorization callback URL is the same as the base FQDN plus \n/oauth2\n, like \nhttps://foo.bar.com/oauth2\n\n\n\n\n\n\n\n\n\n\nConfigure oauth2_proxy values in the file oauth2-proxy.yaml with the values:\n\n\n\n\n\n\nOAUTH2_PROXY_CLIENT_ID with the github \n\n\n\n\n\nOAUTH2_PROXY_CLIENT_SECRET with the github \n\n\n\n\n\nOAUTH2_PROXY_COOKIE_SECRET with value of \npython\n \n-\nc\n \n'import os,base64; print base64.b64encode(os.urandom(16))'\n \n\n\n\n\n\n\nCustomize the contents of the file dashboard-ingress.yaml:\n\n\n\n\n\n\nReplace \n__INGRESS_HOST__\n with a valid FQDN and \n__INGRESS_SECRET__\n with a Secret with a valid SSL certificate.\n\n\n\n\nDeploy the oauth2 proxy and the ingress rules running:\n\n\n\n\n$\n kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml\n\n\n\n\n\nTest the oauth integration accessing the configured URL, like \nhttps://foo.bar.com", + "title": "External OAUTH Authentication" + }, + { + "location": "/examples/auth/oauth-external-auth/README/#external-oauth-authentication", + "text": "", + "title": "External OAUTH Authentication" + }, + { + "location": "/examples/auth/oauth-external-auth/README/#overview", + "text": "The auth-url and auth-signin annotations allow you to use an external\nauthentication provider to protect your Ingress resources. Important This annotation requires nginx-ingress-controller v0.9.0 or greater.)", + "title": "Overview" + }, + { + "location": "/examples/auth/oauth-external-auth/README/#key-detail", + "text": "This functionality is enabled by deploying multiple Ingress objects for a single host.\nOne Ingress object has no special annotations and handles authentication. Other Ingress objects can then be annotated in such a way that require the user to\nauthenticate against the first Ingress's endpoint, and can redirect 401 s to the\nsame endpoint. Sample: ... metadata : \n name : application \n annotations : \n nginx.ingress.kubernetes.io/auth-url : \"https://$host/oauth2/auth\" \n nginx.ingress.kubernetes.io/auth-signin : \"https://$host/oauth2/start?rd=$escaped_request_uri\" ...", + "title": "Key Detail" + }, + { + "location": "/examples/auth/oauth-external-auth/README/#example-oauth2-proxy-kubernetes-dashboard", + "text": "This example will show you how to deploy oauth2_proxy \ninto a Kubernetes cluster and use it to protect the Kubernetes Dashboard using github as oAuth2 provider", + "title": "Example: OAuth2 Proxy + Kubernetes-Dashboard" + }, + { + "location": "/examples/auth/oauth-external-auth/README/#prepare", + "text": "Install the kubernetes dashboard kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.5.0.yaml Create a custom Github OAuth application Homepage URL is the FQDN in the Ingress rule, like https://foo.bar.com Authorization callback URL is the same as the base FQDN plus /oauth2 , like https://foo.bar.com/oauth2 Configure oauth2_proxy values in the file oauth2-proxy.yaml with the values: OAUTH2_PROXY_CLIENT_ID with the github OAUTH2_PROXY_CLIENT_SECRET with the github OAUTH2_PROXY_COOKIE_SECRET with value of python - c 'import os,base64; print base64.b64encode(os.urandom(16))' Customize the contents of the file dashboard-ingress.yaml: Replace __INGRESS_HOST__ with a valid FQDN and __INGRESS_SECRET__ with a Secret with a valid SSL certificate. Deploy the oauth2 proxy and the ingress rules running: $ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml Test the oauth integration accessing the configured URL, like https://foo.bar.com", + "title": "Prepare" + }, + { + "location": "/examples/customization/configuration-snippets/README/", + "text": "Configuration Snippets\n\u00b6\n\n\nIngress\n\u00b6\n\n\nThe Ingress in this example adds a custom header to Nginx configuration that only applies to that specific Ingress. If you want to add headers that apply globally to all Ingresses, please have a look at \nthis example\n.\n\n\n$\n kubectl apply -f ingress.yaml\n\n\n\n\n\nTest\n\u00b6\n\n\nCheck if the contents of the annotation are present in the nginx.conf file using:\n\nkubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf", + "title": "Configuration Snippets" + }, + { + "location": "/examples/customization/configuration-snippets/README/#configuration-snippets", + "text": "", + "title": "Configuration Snippets" + }, + { + "location": "/examples/customization/configuration-snippets/README/#ingress", + "text": "The Ingress in this example adds a custom header to Nginx configuration that only applies to that specific Ingress. If you want to add headers that apply globally to all Ingresses, please have a look at this example . $ kubectl apply -f ingress.yaml", + "title": "Ingress" + }, + { + "location": "/examples/customization/configuration-snippets/README/#test", + "text": "Check if the contents of the annotation are present in the nginx.conf file using: kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf", + "title": "Test" + }, + { + "location": "/examples/customization/custom-configuration/README/", + "text": "Custom Configuration\n\u00b6\n\n\nUsing a \nConfigMap\n is possible to customize the NGINX configuration\n\n\nFor example, if we want to change the timeouts we need to create a ConfigMap:\n\n\n$ cat configmap.yaml\napiVersion: v1\ndata:\n proxy-connect-timeout: \n\"10\"\n\n proxy-read-timeout: \n\"120\"\n\n proxy-send-timeout: \n\"120\"\n\nkind: ConfigMap\nmetadata:\n name: nginx-load-balancer-conf\n\n\n\n\n\ncurl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-configuration/configmap.yaml \\\n | kubectl apply -f -\n\n\n\n\n\nIf the Configmap it is updated, NGINX will be reloaded with the new configuration.", + "title": "Custom Configuration" + }, + { + "location": "/examples/customization/custom-configuration/README/#custom-configuration", + "text": "Using a ConfigMap is possible to customize the NGINX configuration For example, if we want to change the timeouts we need to create a ConfigMap: $ cat configmap.yaml\napiVersion: v1\ndata:\n proxy-connect-timeout: \"10\" \n proxy-read-timeout: \"120\" \n proxy-send-timeout: \"120\" \nkind: ConfigMap\nmetadata:\n name: nginx-load-balancer-conf curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-configuration/configmap.yaml \\\n | kubectl apply -f - If the Configmap it is updated, NGINX will be reloaded with the new configuration.", + "title": "Custom Configuration" + }, + { + "location": "/examples/customization/custom-errors/README/", + "text": "Custom Errors\n\u00b6\n\n\nThis example demonstrates how to use a custom backend to render custom error pages.\n\n\nCustomized default backend\n\u00b6\n\n\nFirst, create the custom \ndefault-backend\n. It will be used by the Ingress controller later on.\n\n\n$ kubectl create -f custom-default-backend.yaml\nservice \n\"nginx-errors\"\n created\ndeployment.apps \n\"nginx-errors\"\n created\n\n\n\n\n\nThis should have created a Deployment and a Service with the name \nnginx-errors\n.\n\n\n$ kubectl get deploy,svc\nNAME DESIRED CURRENT READY AGE\ndeployment.apps/nginx-errors \n1\n \n1\n \n1\n 10s\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT\n(\nS\n)\n AGE\nservice/nginx-errors ClusterIP \n10\n.0.0.12 \n80\n/TCP 10s\n\n\n\n\n\nIngress controller configuration\n\u00b6\n\n\nIf you do not already have an instance of the the NGINX Ingress controller running, deploy it according to the\n\ndeployment guide\n, then follow these steps:\n\n\n\n\n\n\nEdit the \nnginx-ingress-controller\n Deployment and set the value of the \n--default-backend\n flag to the name of the\n newly created error backend.\n\n\n\n\n\n\nEdit the \nnginx-configuration\n ConfigMap and create the key \ncustom-http-errors\n with a value of \n404,503\n.\n\n\n\n\n\n\nTake note of the IP address assigned to the NGINX Ingress controller Service.\n \n$ kubectl get svc ingress-nginxNAME TYPE CLUSTER-IP EXTERNAL-IP PORT\n(\nS\n)\n AGEingress-nginx ClusterIP \n10\n.0.0.13 \n80\n/TCP,443/TCP 10m\n\n\n\n\n\n\n\n\nNote\n\n\nThe \ningress-nginx\n Service is of type \nClusterIP\n in this example. This may vary depending on your environment.\nMake sure you can use the Service to reach NGINX before proceeding with the rest of this example.\n\n\n\n\nTesting error pages\n\u00b6\n\n\nLet us send a couple of HTTP requests using cURL and validate everything is working as expected.\n\n\nA request to the default backend returns a 404 error with a custom message:\n\n\n$ curl -D- http://10.0.0.13/\nHTTP/1.1 404 Not Found\nServer: nginx/1.13.12\nDate: Tue, 12 Jun 2018 19:11:24 GMT\nContent-Type: */*\nTransfer-Encoding: chunked\nConnection: keep-alive\n\n\n\nThe page you're looking for could not be found.\n\n\n\n\n\n\n\nA request with a custom \nAccept\n header returns the corresponding document type (JSON):\n\n\n$ curl -D- -H \n'Accept: application/json'\n http://10.0.0.13/\nHTTP/1.1 \n404\n Not Found\nServer: nginx/1.13.12\nDate: Tue, \n12\n Jun \n2018\n \n19\n:12:36 GMT\nContent-Type: application/json\nTransfer-Encoding: chunked\nConnection: keep-alive\nVary: Accept-Encoding\n\n\n{\n \n\"message\"\n: \n\"The page you're looking for could not be found\"\n \n}\n\n\n\n\n\n\nTo go further with this example, feel free to deploy your own applications and Ingress objects, and validate that the\nresponses are still in the correct format when a backend returns 503 (eg. if you scale a Deployment down to 0 replica).", + "title": "Custom Errors" + }, + { + "location": "/examples/customization/custom-errors/README/#custom-errors", + "text": "This example demonstrates how to use a custom backend to render custom error pages.", + "title": "Custom Errors" + }, + { + "location": "/examples/customization/custom-errors/README/#customized-default-backend", + "text": "First, create the custom default-backend . It will be used by the Ingress controller later on. $ kubectl create -f custom-default-backend.yaml\nservice \"nginx-errors\" created\ndeployment.apps \"nginx-errors\" created This should have created a Deployment and a Service with the name nginx-errors . $ kubectl get deploy,svc\nNAME DESIRED CURRENT READY AGE\ndeployment.apps/nginx-errors 1 1 1 10s\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT ( S ) AGE\nservice/nginx-errors ClusterIP 10 .0.0.12 80 /TCP 10s", + "title": "Customized default backend" + }, + { + "location": "/examples/customization/custom-errors/README/#ingress-controller-configuration", + "text": "If you do not already have an instance of the the NGINX Ingress controller running, deploy it according to the deployment guide , then follow these steps: Edit the nginx-ingress-controller Deployment and set the value of the --default-backend flag to the name of the\n newly created error backend. Edit the nginx-configuration ConfigMap and create the key custom-http-errors with a value of 404,503 . Take note of the IP address assigned to the NGINX Ingress controller Service.\n $ kubectl get svc ingress-nginxNAME TYPE CLUSTER-IP EXTERNAL-IP PORT ( S ) AGEingress-nginx ClusterIP 10 .0.0.13 80 /TCP,443/TCP 10m Note The ingress-nginx Service is of type ClusterIP in this example. This may vary depending on your environment.\nMake sure you can use the Service to reach NGINX before proceeding with the rest of this example.", + "title": "Ingress controller configuration" + }, + { + "location": "/examples/customization/custom-errors/README/#testing-error-pages", + "text": "Let us send a couple of HTTP requests using cURL and validate everything is working as expected. A request to the default backend returns a 404 error with a custom message: $ curl -D- http://10.0.0.13/\nHTTP/1.1 404 Not Found\nServer: nginx/1.13.12\nDate: Tue, 12 Jun 2018 19:11:24 GMT\nContent-Type: */*\nTransfer-Encoding: chunked\nConnection: keep-alive The page you're looking for could not be found. A request with a custom Accept header returns the corresponding document type (JSON): $ curl -D- -H 'Accept: application/json' http://10.0.0.13/\nHTTP/1.1 404 Not Found\nServer: nginx/1.13.12\nDate: Tue, 12 Jun 2018 19 :12:36 GMT\nContent-Type: application/json\nTransfer-Encoding: chunked\nConnection: keep-alive\nVary: Accept-Encoding { \"message\" : \"The page you're looking for could not be found\" } To go further with this example, feel free to deploy your own applications and Ingress objects, and validate that the\nresponses are still in the correct format when a backend returns 503 (eg. if you scale a Deployment down to 0 replica).", + "title": "Testing error pages" + }, + { + "location": "/examples/customization/custom-headers/README/", + "text": "Custom Headers\n\u00b6\n\n\nThis example aims to demonstrate the deployment of an nginx ingress controller and\nuse a ConfigMap to configure a custom list of headers to be passed to the upstream\nserver\n\n\ncurl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml \\\n\n\n | kubectl apply -f -\n\n\n\ncurl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml \\\n\n\n | kubectl apply -f -\n\n\n\n\n\n\nTest\n\u00b6\n\n\nCheck the contents of the configmap is present in the nginx.conf file using:\n\nkubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf", + "title": "Custom Headers" + }, + { + "location": "/examples/customization/custom-headers/README/#custom-headers", + "text": "This example aims to demonstrate the deployment of an nginx ingress controller and\nuse a ConfigMap to configure a custom list of headers to be passed to the upstream\nserver curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml \\ | kubectl apply -f - curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml \\ | kubectl apply -f -", + "title": "Custom Headers" + }, + { + "location": "/examples/customization/custom-headers/README/#test", + "text": "Check the contents of the configmap is present in the nginx.conf file using: kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf", + "title": "Test" + }, + { + "location": "/examples/customization/custom-upstream-check/README/", + "text": "Custom Upstream server checks\n\u00b6\n\n\nThis example shows how is possible to create a custom configuration for a particular upstream associated with an Ingress rule.\n\n\necho \"\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: http-svc\n annotations:\n nginx.ingress.kubernetes.io/upstream-fail-timeout: \"30\"\nspec:\n rules:\n - host: foo.bar.com\n http:\n paths:\n - path: /\n backend:\n serviceName: http-svc\n servicePort: 80\n\" | kubectl create -f -\n\n\n\n\n\nCheck the annotation is present in the Ingress rule:\n\n\nkubectl get ingress http-svc -o yaml\n\n\n\n\n\nCheck the NGINX configuration is updated using kubectl or the status page:\n\n\n$ kubectl \nexec\n nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf\n\n\n\n\n\n....\n\n \nupstream\n \ndefault-http-svc-x-80\n \n{\n\n \nleast_conn\n;\n\n \nserver\n \n10.2.92.2:8080\n \nmax_fails=5\n \nfail_timeout=30\n;\n\n\n \n}\n\n\n....", + "title": "Custom Upstream server checks" + }, + { + "location": "/examples/customization/custom-upstream-check/README/#custom-upstream-server-checks", + "text": "This example shows how is possible to create a custom configuration for a particular upstream associated with an Ingress rule. echo \"\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: http-svc\n annotations:\n nginx.ingress.kubernetes.io/upstream-fail-timeout: \"30\"\nspec:\n rules:\n - host: foo.bar.com\n http:\n paths:\n - path: /\n backend:\n serviceName: http-svc\n servicePort: 80\n\" | kubectl create -f - Check the annotation is present in the Ingress rule: kubectl get ingress http-svc -o yaml Check the NGINX configuration is updated using kubectl or the status page: $ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf .... \n upstream default-http-svc-x-80 { \n least_conn ; \n server 10.2.92.2:8080 max_fails=5 fail_timeout=30 ; \n\n } ....", + "title": "Custom Upstream server checks" + }, + { + "location": "/examples/customization/external-auth-headers/README/", + "text": "External authentication, authentication service response headers propagation\n\u00b6\n\n\nThis example demonstrates propagation of selected authentication service response headers\nto backend service.\n\n\nSample configuration includes:\n\n\n\n\nSample authentication service producing several response headers\n\n\nAuthentication logic is based on HTTP header: requests with header \nUser\n containing string \ninternal\n are considered authenticated\n\n\nAfter successful authentication service generates response headers \nUserID\n and \nUserRole\n\n\nSample echo service displaying header information\n\n\nTwo ingress objects pointing to echo service\n\n\nPublic, which allows access from unauthenticated users\n\n\nPrivate, which allows access from authenticated users only\n\n\n\n\nYou can deploy the controller as\nfollows:\n\n\n$\n kubectl create -f deploy/\n\ndeployment \"demo-auth-service\" created\n\n\nservice \"demo-auth-service\" created\n\n\ningress \"demo-auth-service\" created\n\n\ndeployment \"demo-echo-service\" created\n\n\nservice \"demo-echo-service\" created\n\n\ningress \"public-demo-echo-service\" created\n\n\ningress \"secure-demo-echo-service\" created\n\n\n\n$\n kubectl get po\n\nNAME READY STATUS RESTARTS AGE\n\n\nNAME READY STATUS RESTARTS AGE\n\n\ndemo-auth-service-2769076528-7g9mh 1/1 Running 0 30s\n\n\ndemo-echo-service-3636052215-3vw8c 1/1 Running 0 29s\n\n\n\nkubectl get ing\n\n\nNAME HOSTS ADDRESS PORTS AGE\n\n\npublic-demo-echo-service public-demo-echo-service.kube.local 80 1m\n\n\nsecure-demo-echo-service secure-demo-echo-service.kube.local 80 1m\n\n\n\n\n\n\nTest 1: public service with no auth header\n\n\n$\n curl -H \n'Host: public-demo-echo-service.kube.local'\n -v \n192\n.168.99.100\n\n* Rebuilt URL to: 192.168.99.100/\n\n\n* Trying 192.168.99.100...\n\n\n* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)\n\n\n>\n GET / HTTP/1.1\n\n>\n Host: public-demo-echo-service.kube.local\n\n>\n User-Agent: curl/7.43.0\n\n>\n Accept: */*\n\n>\n\n\n< HTTP/1.1 200 OK\n\n\n< Server: nginx/1.11.10\n\n\n< Date: Mon, 13 Mar 2017 20:19:21 GMT\n\n\n< Content-Type: text/plain; charset=utf-8\n\n\n< Content-Length: 20\n\n\n< Connection: keep-alive\n\n\n<\n\n\n* Connection #0 to host 192.168.99.100 left intact\n\n\nUserID: , UserRole:\n\n\n\n\n\n\nTest 2: secure service with no auth header\n\n\n$\n curl -H \n'Host: secure-demo-echo-service.kube.local'\n -v \n192\n.168.99.100\n\n* Rebuilt URL to: 192.168.99.100/\n\n\n* Trying 192.168.99.100...\n\n\n* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)\n\n\n>\n GET / HTTP/1.1\n\n>\n Host: secure-demo-echo-service.kube.local\n\n>\n User-Agent: curl/7.43.0\n\n>\n Accept: */*\n\n>\n\n\n< HTTP/1.1 403 Forbidden\n\n\n< Server: nginx/1.11.10\n\n\n< Date: Mon, 13 Mar 2017 20:18:48 GMT\n\n\n< Content-Type: text/html\n\n\n< Content-Length: 170\n\n\n< Connection: keep-alive\n\n\n<\n\n\n\n\n\n403 Forbidden\n\n\n\n\n\n

    403 Forbidden

    \n\n\n
    nginx/1.11.10
    \n\n\n\n\n\n\n\n\n* Connection #0 to host 192.168.99.100 left intact\n\n\n\n\n\n\nTest 3: public service with valid auth header\n\n\n$\n curl -H \n'Host: public-demo-echo-service.kube.local'\n -H \n'User:internal'\n -v \n192\n.168.99.100\n\n* Rebuilt URL to: 192.168.99.100/\n\n\n* Trying 192.168.99.100...\n\n\n* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)\n\n\n>\n GET / HTTP/1.1\n\n>\n Host: public-demo-echo-service.kube.local\n\n>\n User-Agent: curl/7.43.0\n\n>\n Accept: */*\n\n>\n User:internal\n\n>\n\n\n< HTTP/1.1 200 OK\n\n\n< Server: nginx/1.11.10\n\n\n< Date: Mon, 13 Mar 2017 20:19:59 GMT\n\n\n< Content-Type: text/plain; charset=utf-8\n\n\n< Content-Length: 44\n\n\n< Connection: keep-alive\n\n\n<\n\n\n* Connection #0 to host 192.168.99.100 left intact\n\n\nUserID: 1443635317331776148, UserRole: admin\n\n\n\n\n\n\nTest 4: public service with valid auth header\n\n\n$\n curl -H \n'Host: secure-demo-echo-service.kube.local'\n -H \n'User:internal'\n -v \n192\n.168.99.100\n\n* Rebuilt URL to: 192.168.99.100/\n\n\n* Trying 192.168.99.100...\n\n\n* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)\n\n\n>\n GET / HTTP/1.1\n\n>\n Host: secure-demo-echo-service.kube.local\n\n>\n User-Agent: curl/7.43.0\n\n>\n Accept: */*\n\n>\n User:internal\n\n>\n\n\n< HTTP/1.1 200 OK\n\n\n< Server: nginx/1.11.10\n\n\n< Date: Mon, 13 Mar 2017 20:17:23 GMT\n\n\n< Content-Type: text/plain; charset=utf-8\n\n\n< Content-Length: 43\n\n\n< Connection: keep-alive\n\n\n<\n\n\n* Connection #0 to host 192.168.99.100 left intact\n\n\nUserID: 605394647632969758, UserRole: admin", + "title": "External authentication, authentication service response headers propagation" + }, + { + "location": "/examples/customization/external-auth-headers/README/#external-authentication-authentication-service-response-headers-propagation", + "text": "This example demonstrates propagation of selected authentication service response headers\nto backend service. Sample configuration includes: Sample authentication service producing several response headers Authentication logic is based on HTTP header: requests with header User containing string internal are considered authenticated After successful authentication service generates response headers UserID and UserRole Sample echo service displaying header information Two ingress objects pointing to echo service Public, which allows access from unauthenticated users Private, which allows access from authenticated users only You can deploy the controller as\nfollows: $ kubectl create -f deploy/ deployment \"demo-auth-service\" created service \"demo-auth-service\" created ingress \"demo-auth-service\" created deployment \"demo-echo-service\" created service \"demo-echo-service\" created ingress \"public-demo-echo-service\" created ingress \"secure-demo-echo-service\" created $ kubectl get po NAME READY STATUS RESTARTS AGE NAME READY STATUS RESTARTS AGE demo-auth-service-2769076528-7g9mh 1/1 Running 0 30s demo-echo-service-3636052215-3vw8c 1/1 Running 0 29s kubectl get ing NAME HOSTS ADDRESS PORTS AGE public-demo-echo-service public-demo-echo-service.kube.local 80 1m secure-demo-echo-service secure-demo-echo-service.kube.local 80 1m Test 1: public service with no auth header $ curl -H 'Host: public-demo-echo-service.kube.local' -v 192 .168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) > GET / HTTP/1.1 > Host: public-demo-echo-service.kube.local > User-Agent: curl/7.43.0 > Accept: */* > < HTTP/1.1 200 OK < Server: nginx/1.11.10 < Date: Mon, 13 Mar 2017 20:19:21 GMT < Content-Type: text/plain; charset=utf-8 < Content-Length: 20 < Connection: keep-alive < * Connection #0 to host 192.168.99.100 left intact UserID: , UserRole: Test 2: secure service with no auth header $ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192 .168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) > GET / HTTP/1.1 > Host: secure-demo-echo-service.kube.local > User-Agent: curl/7.43.0 > Accept: */* > < HTTP/1.1 403 Forbidden < Server: nginx/1.11.10 < Date: Mon, 13 Mar 2017 20:18:48 GMT < Content-Type: text/html < Content-Length: 170 < Connection: keep-alive < 403 Forbidden

    403 Forbidden


    nginx/1.11.10
    * Connection #0 to host 192.168.99.100 left intact Test 3: public service with valid auth header $ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192 .168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) > GET / HTTP/1.1 > Host: public-demo-echo-service.kube.local > User-Agent: curl/7.43.0 > Accept: */* > User:internal > < HTTP/1.1 200 OK < Server: nginx/1.11.10 < Date: Mon, 13 Mar 2017 20:19:59 GMT < Content-Type: text/plain; charset=utf-8 < Content-Length: 44 < Connection: keep-alive < * Connection #0 to host 192.168.99.100 left intact UserID: 1443635317331776148, UserRole: admin Test 4: public service with valid auth header $ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192 .168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) > GET / HTTP/1.1 > Host: secure-demo-echo-service.kube.local > User-Agent: curl/7.43.0 > Accept: */* > User:internal > < HTTP/1.1 200 OK < Server: nginx/1.11.10 < Date: Mon, 13 Mar 2017 20:17:23 GMT < Content-Type: text/plain; charset=utf-8 < Content-Length: 43 < Connection: keep-alive < * Connection #0 to host 192.168.99.100 left intact UserID: 605394647632969758, UserRole: admin", + "title": "External authentication, authentication service response headers propagation" + }, + { + "location": "/examples/customization/ssl-dh-param/README/", + "text": "Custom DH parameters for perfect forward secrecy\n\u00b6\n\n\nThis example aims to demonstrate the deployment of an nginx ingress controller and\nuse a ConfigMap to configure custom Diffie-Hellman parameters file to help with\n\"Perfect Forward Secrecy\".\n\n\nCustom configuration\n\u00b6\n\n\n$\n cat configmap.yaml\n\napiVersion: v1\n\n\ndata:\n\n\n ssl-dh-param: \"ingress-nginx/lb-dhparam\"\n\n\nkind: ConfigMap\n\n\nmetadata:\n\n\n name: nginx-configuration\n\n\n namespace: ingress-nginx\n\n\n labels:\n\n\n app: ingress-nginx\n\n\n\n\n\n\n$\n kubectl create -f configmap.yaml\n\n\n\n\n\nCustom DH parameters secret\n\u00b6\n\n\n$\n> openssl dhparam \n1024\n \n2\n> /dev/null \n|\n base64\n\nLS0tLS1CRUdJTiBESCBQQVJBTUVURVJ...\n\n\n\n\n\n\n$\n cat ssl-dh-param.yaml\n\napiVersion: v1\n\n\ndata:\n\n\n dhparam.pem: \"LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ...\"\n\n\nkind: ConfigMap\n\n\nmetadata:\n\n\n name: nginx-configuration\n\n\n namespace: ingress-nginx\n\n\n labels:\n\n\n app: ingress-nginx\n\n\n\n\n\n\n$\n kubectl create -f ssl-dh-param.yaml\n\n\n\n\n\nTest\n\u00b6\n\n\nCheck the contents of the configmap is present in the nginx.conf file using:\n\nkubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf", + "title": "Custom DH parameters for perfect forward secrecy" + }, + { + "location": "/examples/customization/ssl-dh-param/README/#custom-dh-parameters-for-perfect-forward-secrecy", + "text": "This example aims to demonstrate the deployment of an nginx ingress controller and\nuse a ConfigMap to configure custom Diffie-Hellman parameters file to help with\n\"Perfect Forward Secrecy\".", + "title": "Custom DH parameters for perfect forward secrecy" + }, + { + "location": "/examples/customization/ssl-dh-param/README/#custom-configuration", + "text": "$ cat configmap.yaml apiVersion: v1 data: ssl-dh-param: \"ingress-nginx/lb-dhparam\" kind: ConfigMap metadata: name: nginx-configuration namespace: ingress-nginx labels: app: ingress-nginx $ kubectl create -f configmap.yaml", + "title": "Custom configuration" + }, + { + "location": "/examples/customization/ssl-dh-param/README/#custom-dh-parameters-secret", + "text": "$ > openssl dhparam 1024 2 > /dev/null | base64 LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ... $ cat ssl-dh-param.yaml apiVersion: v1 data: dhparam.pem: \"LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ...\" kind: ConfigMap metadata: name: nginx-configuration namespace: ingress-nginx labels: app: ingress-nginx $ kubectl create -f ssl-dh-param.yaml", + "title": "Custom DH parameters secret" + }, + { + "location": "/examples/customization/ssl-dh-param/README/#test", + "text": "Check the contents of the configmap is present in the nginx.conf file using: kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf", + "title": "Test" + }, + { + "location": "/examples/customization/sysctl/README/", + "text": "Sysctl tuning\n\u00b6\n\n\nThis example aims to demonstrate the use of an Init Container to adjust sysctl default values\nusing \nkubectl patch\n\n\nkubectl patch deployment -n ingress-nginx nginx-ingress-controller --patch=\"$(cat patch.json)\"", + "title": "Sysctl tuning" + }, + { + "location": "/examples/customization/sysctl/README/#sysctl-tuning", + "text": "This example aims to demonstrate the use of an Init Container to adjust sysctl default values\nusing kubectl patch kubectl patch deployment -n ingress-nginx nginx-ingress-controller --patch=\"$(cat patch.json)\"", + "title": "Sysctl tuning" + }, + { + "location": "/examples/docker-registry/README/", + "text": "Docker registry\n\u00b6\n\n\nThis example demonstrates how to deploy a \ndocker registry\n in the cluster and configure Ingress enable access from Internet\n\n\nDeployment\n\u00b6\n\n\nFirst we deploy the docker registry in the cluster:\n\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/deployment.yaml\n\n\n\n\n\n\n\n\nImportant\n\n\nDO NOT RUN THIS IN PRODUCTION\n\n\nThis deployment uses \nemptyDir\n in the \nvolumeMount\n which means the contents of the registry will be deleted when the pod dies.\n\n\n\n\nThe next required step is creation of the ingress rules. To do this we have two options: with and without TLS\n\n\nWithout TLS\n\u00b6\n\n\nDownload and edit the yaml deployment replacing \nregistry.\n with a valid DNS name pointing to the ingress controller:\n\n\nwget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-without-tls.yaml\n\n\n\n\n\n\n\n\nImportant\n\n\n\n\nRunning a docker registry without TLS requires we configure our local docker daemon with the insecure registry flag.\n\n\nPlease check \ndeploy a plain http registry\n\n\nWith TLS\n\u00b6\n\n\nDownload and edit the yaml deployment replacing \nregistry.\n with a valid DNS name pointing to the ingress controller:\n\n\nwget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-with-tls.yaml\n\n\n\n\n\n\nDeploy \nkube lego\n use \nLet's Encrypt\n certificates or edit the ingress rule to use a secret with an existing SSL certificate.\n\n\nTesting\n\u00b6\n\n\nTo test the registry is working correctly we download a known image from \ndocker hub\n, create a tag pointing to the new registry and upload the image:\n\n\ndocker pull ubuntu:16.04\n\n\ndocker tag ubuntu:16.04 `registry./ubuntu:16.04`\n\n\ndocker push `registry./ubuntu:16.04`\n\n\n\n\n\n\nPlease replace \nregistry.\n with your domain.", + "title": "Docker registry" + }, + { + "location": "/examples/docker-registry/README/#docker-registry", + "text": "This example demonstrates how to deploy a docker registry in the cluster and configure Ingress enable access from Internet", + "title": "Docker registry" + }, + { + "location": "/examples/docker-registry/README/#deployment", + "text": "First we deploy the docker registry in the cluster: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/deployment.yaml Important DO NOT RUN THIS IN PRODUCTION This deployment uses emptyDir in the volumeMount which means the contents of the registry will be deleted when the pod dies. The next required step is creation of the ingress rules. To do this we have two options: with and without TLS", + "title": "Deployment" + }, + { + "location": "/examples/docker-registry/README/#without-tls", + "text": "Download and edit the yaml deployment replacing registry. with a valid DNS name pointing to the ingress controller: wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-without-tls.yaml Important Running a docker registry without TLS requires we configure our local docker daemon with the insecure registry flag. Please check deploy a plain http registry", + "title": "Without TLS" + }, + { + "location": "/examples/docker-registry/README/#with-tls", + "text": "Download and edit the yaml deployment replacing registry. with a valid DNS name pointing to the ingress controller: wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-with-tls.yaml Deploy kube lego use Let's Encrypt certificates or edit the ingress rule to use a secret with an existing SSL certificate.", + "title": "With TLS" + }, + { + "location": "/examples/docker-registry/README/#testing", + "text": "To test the registry is working correctly we download a known image from docker hub , create a tag pointing to the new registry and upload the image: docker pull ubuntu:16.04 docker tag ubuntu:16.04 `registry./ubuntu:16.04` docker push `registry./ubuntu:16.04` Please replace registry. with your domain.", + "title": "Testing" + }, + { + "location": "/examples/grpc/README/", + "text": "gRPC\n\u00b6\n\n\nThis example demonstrates how to route traffic to a gRPC service through the\nnginx controller.\n\n\nPrerequisites\n\u00b6\n\n\n\n\nYou have a kubernetes cluster running.\n\n\nYou have a domain name such as \nexample.com\n that is configured to route\n traffic to the ingress controller. Replace references to\n \nfortune-teller.stack.build\n (the domain name used in this example) to your\n own domain name (you're also responsible for provisioning an SSL certificate\n for the ingress).\n\n\nYou have the nginx-ingress controller installed in typical fashion (must be\n at least\n \nquay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.13.0\n\n for grpc support.\n\n\nYou have a backend application running a gRPC server and listening for TCP\n traffic. If you prefer, you can use the\n \nfortune-teller\n\n application provided here as an example. \n\n\n\n\nStep 1: kubernetes \nDeployment\n\u00b6\n\n\n$ kubectl create -f app.yaml\n\n\n\n\n\nThis is a standard kubernetes deployment object. It is running a grpc service\nlistening on port \n50051\n.\n\n\nThe sample application\n\nfortune-teller-app\n\nis a grpc server implemented in go. Here's the stripped-down implementation:\n\n\nfunc\n \nmain\n()\n \n{\n\n \ngrpcServer\n \n:=\n \ngrpc\n.\nNewServer\n()\n\n \nfortune\n.\nRegisterFortuneTellerServer\n(\ngrpcServer\n,\n \n&\nFortuneTeller\n{})\n\n \nlis\n,\n \n_\n \n:=\n \nnet\n.\nListen\n(\n\"tcp\"\n,\n \n\":50051\"\n)\n\n \ngrpcServer\n.\nServe\n(\nlis\n)\n\n\n}\n\n\n\n\n\n\nThe takeaway is that we are not doing any TLS configuration on the server (as we\nare terminating TLS at the ingress level, grpc traffic will travel unencrypted\ninside the cluster and arrive \"insecure\").\n\n\nFor your own application you may or may not want to do this. If you prefer to\nforward encrypted traffic to your POD and terminate TLS at the gRPC server\nitself, add the ingress annotation \nnginx.ingress.kubernetes.io/secure-backends:\"true\"\n.\n\n\nStep 2: the kubernetes \nService\n\u00b6\n\n\n$ kubectl create -f svc.yaml\n\n\n\n\n\nHere we have a typical service. Nothing special, just routing traffic to the\nbackend application on port \n50051\n.\n\n\nStep 3: the kubernetes \nIngress\n\u00b6\n\n\n$ kubectl create -f ingress.yaml\n\n\n\n\n\nA few things to note:\n\n\n\n\nWe've tagged the ingress with the annotation\n \nnginx.ingress.kubernetes.io/grpc-backend: \"true\"\n. This is the magic\n ingredient that sets up the appropriate nginx configuration to route http/2\n traffic to our service.\n\n\nWe're terminating TLS at the ingress and have configured an SSL certificate\n \nfortune-teller.stack.build\n. The ingress matches traffic arriving as\n \nhttps://fortune-teller.stack.build:443\n and routes unencrypted messages to\n our kubernetes service.\n\n\n\n\nStep 4: test the connection\n\u00b6\n\n\nOnce we've applied our configuration to kubernetes, it's time to test that we\ncan actually talk to the backend. To do this, we'll use the\n\ngrpcurl\n utility:\n\n\n$ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict\n\n{\n\n \n\"message\"\n: \n\"Let us endeavor so to live that when we come to die even the undertaker will be sorry.\\n\\t\\t-- Mark Twain, \\\"Pudd'nhead Wilson's Calendar\\\"\"\n\n\n}\n\n\n\n\n\n\nDebugging Hints\n\u00b6\n\n\n\n\nObviously, watch the logs on your app.\n\n\nWatch the logs for the nginx-ingress-controller (increasing verbosity as\n needed).\n\n\nDouble-check your address and ports.\n\n\nSet the \nGODEBUG=http2debug=2\n environment variable to get detailed http/2\n logging on the client and/or server.\n\n\nStudy RFC 7540 (http/2) \nhttps://tools.ietf.org/html/rfc7540\n.\n\n\n\n\n\n\nIf you are developing public gRPC endpoints, check out\nhttps://proto.stack.build, a protocol buffer / gRPC build service that can use\nto help make it easier for your users to consume your API.", + "title": "gRPC" + }, + { + "location": "/examples/grpc/README/#grpc", + "text": "This example demonstrates how to route traffic to a gRPC service through the\nnginx controller.", + "title": "gRPC" + }, + { + "location": "/examples/grpc/README/#prerequisites", + "text": "You have a kubernetes cluster running. You have a domain name such as example.com that is configured to route\n traffic to the ingress controller. Replace references to\n fortune-teller.stack.build (the domain name used in this example) to your\n own domain name (you're also responsible for provisioning an SSL certificate\n for the ingress). You have the nginx-ingress controller installed in typical fashion (must be\n at least\n quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.13.0 \n for grpc support. You have a backend application running a gRPC server and listening for TCP\n traffic. If you prefer, you can use the\n fortune-teller \n application provided here as an example.", + "title": "Prerequisites" + }, + { + "location": "/examples/grpc/README/#step-1-kubernetes-deployment", + "text": "$ kubectl create -f app.yaml This is a standard kubernetes deployment object. It is running a grpc service\nlistening on port 50051 . The sample application fortune-teller-app \nis a grpc server implemented in go. Here's the stripped-down implementation: func main () { \n grpcServer := grpc . NewServer () \n fortune . RegisterFortuneTellerServer ( grpcServer , & FortuneTeller {}) \n lis , _ := net . Listen ( \"tcp\" , \":50051\" ) \n grpcServer . Serve ( lis ) } The takeaway is that we are not doing any TLS configuration on the server (as we\nare terminating TLS at the ingress level, grpc traffic will travel unencrypted\ninside the cluster and arrive \"insecure\"). For your own application you may or may not want to do this. If you prefer to\nforward encrypted traffic to your POD and terminate TLS at the gRPC server\nitself, add the ingress annotation nginx.ingress.kubernetes.io/secure-backends:\"true\" .", + "title": "Step 1: kubernetes Deployment" + }, + { + "location": "/examples/grpc/README/#step-2-the-kubernetes-service", + "text": "$ kubectl create -f svc.yaml Here we have a typical service. Nothing special, just routing traffic to the\nbackend application on port 50051 .", + "title": "Step 2: the kubernetes Service" + }, + { + "location": "/examples/grpc/README/#step-3-the-kubernetes-ingress", + "text": "$ kubectl create -f ingress.yaml A few things to note: We've tagged the ingress with the annotation\n nginx.ingress.kubernetes.io/grpc-backend: \"true\" . This is the magic\n ingredient that sets up the appropriate nginx configuration to route http/2\n traffic to our service. We're terminating TLS at the ingress and have configured an SSL certificate\n fortune-teller.stack.build . The ingress matches traffic arriving as\n https://fortune-teller.stack.build:443 and routes unencrypted messages to\n our kubernetes service.", + "title": "Step 3: the kubernetes Ingress" + }, + { + "location": "/examples/grpc/README/#step-4-test-the-connection", + "text": "Once we've applied our configuration to kubernetes, it's time to test that we\ncan actually talk to the backend. To do this, we'll use the grpcurl utility: $ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict { \n \"message\" : \"Let us endeavor so to live that when we come to die even the undertaker will be sorry.\\n\\t\\t-- Mark Twain, \\\"Pudd'nhead Wilson's Calendar\\\"\" }", + "title": "Step 4: test the connection" + }, + { + "location": "/examples/grpc/README/#debugging-hints", + "text": "Obviously, watch the logs on your app. Watch the logs for the nginx-ingress-controller (increasing verbosity as\n needed). Double-check your address and ports. Set the GODEBUG=http2debug=2 environment variable to get detailed http/2\n logging on the client and/or server. Study RFC 7540 (http/2) https://tools.ietf.org/html/rfc7540 . If you are developing public gRPC endpoints, check out\nhttps://proto.stack.build, a protocol buffer / gRPC build service that can use\nto help make it easier for your users to consume your API.", + "title": "Debugging Hints" + }, + { + "location": "/examples/multi-tls/README/", + "text": "Multi TLS certificate termination\n\u00b6\n\n\nThis example uses 2 different certificates to terminate SSL for 2 hostnames.\n\n\n\n\nDeploy the controller by creating the rc in the parent dir\n\n\nCreate tls secrets for foo.bar.com and bar.baz.com as indicated in the yaml\n\n\nCreate multi-tls.yaml\n\n\n\n\nThis should generate a segment like:\n\n\n$\n kubectl \nexec\n -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf \n|\n grep \n\"foo.bar.com\"\n -B \n7\n -A \n35\n\n\n server {\n\n\n listen 80;\n\n\n listen 443 ssl http2;\n\n\n ssl_certificate /etc/nginx-ssl/default-foobar.pem;\n\n\n ssl_certificate_key /etc/nginx-ssl/default-foobar.pem;\n\n\n\n\n server_name foo.bar.com;\n\n\n\n\n if ($scheme = http) {\n\n\n return 301 https://$host$request_uri;\n\n\n }\n\n\n\n\n\n location / {\n\n\n proxy_set_header Host $host;\n\n\n\n #\n Pass Real IP\n\n proxy_set_header X-Real-IP $remote_addr;\n\n\n\n #\n Allow websocket connections\n\n proxy_set_header Upgrade $http_upgrade;\n\n\n proxy_set_header Connection $connection_upgrade;\n\n\n\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n\n\n proxy_set_header X-Forwarded-Host $host;\n\n\n proxy_set_header X-Forwarded-Proto $pass_access_scheme;\n\n\n\n proxy_connect_timeout 5s;\n\n\n proxy_send_timeout 60s;\n\n\n proxy_read_timeout 60s;\n\n\n\n proxy_redirect off;\n\n\n proxy_buffering off;\n\n\n\n proxy_http_version 1.1;\n\n\n\n proxy_pass http://default-http-svc-80;\n\n\n }\n\n\n\n\n\n\nAnd you should be able to reach your nginx service or http-svc service using a hostname switch:\n\n\n$\n kubectl get ing\n\nNAME RULE BACKEND ADDRESS AGE\n\n\nfoo-tls - 104.154.30.67 13m\n\n\n foo.bar.com\n\n\n / http-svc:80\n\n\n bar.baz.com\n\n\n / nginx:80\n\n\n\n$\n curl https://104.154.30.67 -H \n'Host:foo.bar.com'\n -k\n\nCLIENT VALUES:\n\n\nclient_address=10.245.0.6\n\n\ncommand=GET\n\n\nreal path=/\n\n\nquery=nil\n\n\nrequest_version=1.1\n\n\nrequest_uri=http://foo.bar.com:8080/\n\n\n\nSERVER VALUES:\n\n\nserver_version=nginx: 1.9.11 - lua: 10001\n\n\n\nHEADERS RECEIVED:\n\n\naccept=*/*\n\n\nconnection=close\n\n\nhost=foo.bar.com\n\n\nuser-agent=curl/7.35.0\n\n\nx-forwarded-for=10.245.0.1\n\n\nx-forwarded-host=foo.bar.com\n\n\nx-forwarded-proto=https\n\n\n\n$\n curl https://104.154.30.67 -H \n'Host:bar.baz.com'\n -k\n\n\n\n\n\n\n\n\n\n\nWelcome to nginx on Debian!\n\n\n\n$\n curl \n104\n.154.30.67\n\ndefault backend - 404", + "title": "Multi TLS certificate termination" + }, + { + "location": "/examples/multi-tls/README/#multi-tls-certificate-termination", + "text": "This example uses 2 different certificates to terminate SSL for 2 hostnames. Deploy the controller by creating the rc in the parent dir Create tls secrets for foo.bar.com and bar.baz.com as indicated in the yaml Create multi-tls.yaml This should generate a segment like: $ kubectl exec -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf | grep \"foo.bar.com\" -B 7 -A 35 server { listen 80; listen 443 ssl http2; ssl_certificate /etc/nginx-ssl/default-foobar.pem; ssl_certificate_key /etc/nginx-ssl/default-foobar.pem; server_name foo.bar.com; if ($scheme = http) { return 301 https://$host$request_uri; } location / { proxy_set_header Host $host; # Pass Real IP proxy_set_header X-Real-IP $remote_addr; # Allow websocket connections proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Proto $pass_access_scheme; proxy_connect_timeout 5s; proxy_send_timeout 60s; proxy_read_timeout 60s; proxy_redirect off; proxy_buffering off; proxy_http_version 1.1; proxy_pass http://default-http-svc-80; } And you should be able to reach your nginx service or http-svc service using a hostname switch: $ kubectl get ing NAME RULE BACKEND ADDRESS AGE foo-tls - 104.154.30.67 13m foo.bar.com / http-svc:80 bar.baz.com / nginx:80 $ curl https://104.154.30.67 -H 'Host:foo.bar.com' -k CLIENT VALUES: client_address=10.245.0.6 command=GET real path=/ query=nil request_version=1.1 request_uri=http://foo.bar.com:8080/ SERVER VALUES: server_version=nginx: 1.9.11 - lua: 10001 HEADERS RECEIVED: accept=*/* connection=close host=foo.bar.com user-agent=curl/7.35.0 x-forwarded-for=10.245.0.1 x-forwarded-host=foo.bar.com x-forwarded-proto=https $ curl https://104.154.30.67 -H 'Host:bar.baz.com' -k Welcome to nginx on Debian! $ curl 104 .154.30.67 default backend - 404", + "title": "Multi TLS certificate termination" + }, + { + "location": "/examples/rewrite/README/", + "text": "Rewrite\n\u00b6\n\n\nThis example demonstrates how to use the Rewrite annotations\n\n\nPrerequisites\n\u00b6\n\n\nYou will need to make sure your Ingress targets exactly one Ingress\ncontroller by specifying the \ningress.class annotation\n,\nand that you have an ingress controller \nrunning\n in your cluster.\n\n\nDeployment\n\u00b6\n\n\nRewriting can be controlled using the following annotations:\n\n\n\n\n\n\n\n\nName\n\n\nDescription\n\n\nValues\n\n\n\n\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/rewrite-target\n\n\nTarget URI where the traffic must be redirected\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/add-base-url\n\n\nindicates if is required to add a base tag in the head of the responses from the upstream servers\n\n\nbool\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/base-url-scheme\n\n\nOverride for the scheme passed to the base tag\n\n\nstring\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/ssl-redirect\n\n\nIndicates if the location section is accessible SSL only (defaults to True when Ingress contains a Certificate)\n\n\nbool\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/force-ssl-redirect\n\n\nForces the redirection to HTTPS even if the Ingress is not TLS Enabled\n\n\nbool\n\n\n\n\n\n\nnginx.ingress.kubernetes.io/app-root\n\n\nDefines the Application Root that the Controller must redirect if it's in '/' context\n\n\nstring\n\n\n\n\n\n\n\n\nValidation\n\u00b6\n\n\nRewrite Target\n\u00b6\n\n\nCreate an Ingress rule with a rewrite annotation:\n\n\n$\n \necho\n \n\"\n\n\napiVersion: extensions/v1beta1\n\n\nkind: Ingress\n\n\nmetadata:\n\n\n annotations:\n\n\n nginx.ingress.kubernetes.io/rewrite-target: /\n\n\n name: rewrite\n\n\n namespace: default\n\n\nspec:\n\n\n rules:\n\n\n - host: rewrite.bar.com\n\n\n http:\n\n\n paths:\n\n\n - backend:\n\n\n serviceName: http-svc\n\n\n servicePort: 80\n\n\n path: /something\n\n\n\" | kubectl create -f -\n\n\n\n\n\n\nCheck the rewrite is working\n\n\n$ curl -v http://172.17.4.99/something -H \n'Host: rewrite.bar.com'\n\n* Trying \n172\n.17.4.99...\n* Connected to \n172\n.17.4.99 \n(\n172\n.17.4.99\n)\n port \n80\n \n(\n#0)\n\n> GET /something HTTP/1.1\n> Host: rewrite.bar.com\n> User-Agent: curl/7.43.0\n> Accept: */*\n>\n< HTTP/1.1 \n200\n OK\n< Server: nginx/1.11.0\n< Date: Tue, \n31\n May \n2016\n \n16\n:07:31 GMT\n< Content-Type: text/plain\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n<\nCLIENT VALUES:\n\nclient_address\n=\n10\n.2.56.9\n\ncommand\n=\nGET\nreal \npath\n=\n/\n\nquery\n=\nnil\n\nrequest_version\n=\n1\n.1\n\nrequest_uri\n=\nhttp://rewrite.bar.com:8080/\n\nSERVER VALUES:\n\nserver_version\n=\nnginx: \n1\n.9.11 - lua: \n10001\n\n\nHEADERS RECEIVED:\n\naccept\n=\n*/*\n\nconnection\n=\nclose\n\nhost\n=\nrewrite.bar.com\nuser-agent\n=\ncurl/7.43.0\nx-forwarded-for\n=\n10\n.2.56.1\nx-forwarded-host\n=\nrewrite.bar.com\nx-forwarded-port\n=\n80\n\nx-forwarded-proto\n=\nhttp\nx-real-ip\n=\n10\n.2.56.1\nBODY:\n* Connection \n#0 to host 172.17.4.99 left intact\n\n-no body in request-\n\n\n\n\n\nApp Root\n\u00b6\n\n\nCreate an Ingress rule with a app-root annotation:\n\n\n$ \necho\n \n\"\n\n\napiVersion: extensions/v1beta1\n\n\nkind: Ingress\n\n\nmetadata:\n\n\n annotations:\n\n\n nginx.ingress.kubernetes.io/app-root: /app1\n\n\n name: approot\n\n\n namespace: default\n\n\nspec:\n\n\n rules:\n\n\n - host: approot.bar.com\n\n\n http:\n\n\n paths:\n\n\n - backend:\n\n\n serviceName: http-svc\n\n\n servicePort: 80\n\n\n path: /\n\n\n\"\n \n|\n kubectl create -f -\n\n\n\n\n\nCheck the rewrite is working\n\n\n$ curl -I -k http://approot.bar.com/\nHTTP/1.1 \n302\n Moved Temporarily\nServer: nginx/1.11.10\nDate: Mon, \n13\n Mar \n2017\n \n14\n:57:15 GMT\nContent-Type: text/html\nContent-Length: \n162\n\nLocation: http://stickyingress.example.com/app1\nConnection: keep-alive", + "title": "Rewrite" + }, + { + "location": "/examples/rewrite/README/#rewrite", + "text": "This example demonstrates how to use the Rewrite annotations", + "title": "Rewrite" + }, + { + "location": "/examples/rewrite/README/#prerequisites", + "text": "You will need to make sure your Ingress targets exactly one Ingress\ncontroller by specifying the ingress.class annotation ,\nand that you have an ingress controller running in your cluster.", + "title": "Prerequisites" + }, + { + "location": "/examples/rewrite/README/#deployment", + "text": "Rewriting can be controlled using the following annotations: Name Description Values nginx.ingress.kubernetes.io/rewrite-target Target URI where the traffic must be redirected string nginx.ingress.kubernetes.io/add-base-url indicates if is required to add a base tag in the head of the responses from the upstream servers bool nginx.ingress.kubernetes.io/base-url-scheme Override for the scheme passed to the base tag string nginx.ingress.kubernetes.io/ssl-redirect Indicates if the location section is accessible SSL only (defaults to True when Ingress contains a Certificate) bool nginx.ingress.kubernetes.io/force-ssl-redirect Forces the redirection to HTTPS even if the Ingress is not TLS Enabled bool nginx.ingress.kubernetes.io/app-root Defines the Application Root that the Controller must redirect if it's in '/' context string", + "title": "Deployment" + }, + { + "location": "/examples/rewrite/README/#validation", + "text": "", + "title": "Validation" + }, + { + "location": "/examples/rewrite/README/#rewrite-target", + "text": "Create an Ingress rule with a rewrite annotation: $ echo \" apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: nginx.ingress.kubernetes.io/rewrite-target: / name: rewrite namespace: default spec: rules: - host: rewrite.bar.com http: paths: - backend: serviceName: http-svc servicePort: 80 path: /something \" | kubectl create -f - Check the rewrite is working $ curl -v http://172.17.4.99/something -H 'Host: rewrite.bar.com' \n* Trying 172 .17.4.99...\n* Connected to 172 .17.4.99 ( 172 .17.4.99 ) port 80 ( #0) \n> GET /something HTTP/1.1\n> Host: rewrite.bar.com\n> User-Agent: curl/7.43.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Server: nginx/1.11.0\n< Date: Tue, 31 May 2016 16 :07:31 GMT\n< Content-Type: text/plain\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n<\nCLIENT VALUES: client_address = 10 .2.56.9 command = GET\nreal path = / query = nil request_version = 1 .1 request_uri = http://rewrite.bar.com:8080/\n\nSERVER VALUES: server_version = nginx: 1 .9.11 - lua: 10001 \n\nHEADERS RECEIVED: accept = */* connection = close host = rewrite.bar.com\nuser-agent = curl/7.43.0\nx-forwarded-for = 10 .2.56.1\nx-forwarded-host = rewrite.bar.com\nx-forwarded-port = 80 \nx-forwarded-proto = http\nx-real-ip = 10 .2.56.1\nBODY:\n* Connection #0 to host 172.17.4.99 left intact \n-no body in request-", + "title": "Rewrite Target" + }, + { + "location": "/examples/rewrite/README/#app-root", + "text": "Create an Ingress rule with a app-root annotation: $ echo \" apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: nginx.ingress.kubernetes.io/app-root: /app1 name: approot namespace: default spec: rules: - host: approot.bar.com http: paths: - backend: serviceName: http-svc servicePort: 80 path: / \" | kubectl create -f - Check the rewrite is working $ curl -I -k http://approot.bar.com/\nHTTP/1.1 302 Moved Temporarily\nServer: nginx/1.11.10\nDate: Mon, 13 Mar 2017 14 :57:15 GMT\nContent-Type: text/html\nContent-Length: 162 \nLocation: http://stickyingress.example.com/app1\nConnection: keep-alive", + "title": "App Root" + }, + { + "location": "/examples/static-ip/README/", + "text": "Static IPs\n\u00b6\n\n\nThis example demonstrates how to assign a static-ip to an Ingress on through the Nginx controller.\n\n\nPrerequisites\n\u00b6\n\n\nYou need a \nTLS cert\n and a \ntest HTTP service\n for this example.\nYou will also need to make sure your Ingress targets exactly one Ingress\ncontroller by specifying the \ningress.class annotation\n,\nand that you have an ingress controller \nrunning\n in your cluster.\n\n\nAcquiring an IP\n\u00b6\n\n\nSince instances of the nginx controller actually run on nodes in your cluster,\nby default nginx Ingresses will only get static IPs if your cloudprovider\nsupports static IP assignments to nodes. On GKE/GCE for example, even though\nnodes get static IPs, the IPs are not retained across upgrade.\n\n\nTo acquire a static IP for the nginx ingress controller, simply put it\nbehind a Service of \nType=LoadBalancer\n.\n\n\nFirst, create a loadbalancer Service and wait for it to acquire an IP\n\n\n$\n kubectl create -f static-ip-svc.yaml\n\nservice \"nginx-ingress-lb\" created\n\n\n\n$\n kubectl get svc nginx-ingress-lb\n\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n\n\nnginx-ingress-lb 10.0.138.113 104.154.109.191 80:31457/TCP,443:32240/TCP 15m\n\n\n\n\n\n\nthen, update the ingress controller so it adopts the static IP of the Service\nby passing the \n--publish-service\n flag (the example yaml used in the next step\nalready has it set to \"nginx-ingress-lb\").\n\n\n$\n kubectl create -f nginx-ingress-controller.yaml\n\ndeployment \"nginx-ingress-controller\" created\n\n\n\n\n\n\nAssigning the IP to an Ingress\n\u00b6\n\n\nFrom here on every Ingress created with the \ningress.class\n annotation set to\n\nnginx\n will get the IP allocated in the previous step\n\n\n$\n kubectl create -f nginx-ingress.yaml\n\ningress \"nginx-ingress\" created\n\n\n\n$\n kubectl get ing nginx-ingress\n\nNAME HOSTS ADDRESS PORTS AGE\n\n\nnginx-ingress * 104.154.109.191 80, 443 13m\n\n\n\n$\n curl \n104\n.154.109.191 -kL\n\nCLIENT VALUES:\n\n\nclient_address=10.180.1.25\n\n\ncommand=GET\n\n\nreal path=/\n\n\nquery=nil\n\n\nrequest_version=1.1\n\n\nrequest_uri=http://104.154.109.191:8080/\n\n\n...\n\n\n\n\n\n\nRetaining the IP\n\u00b6\n\n\nYou can test retention by deleting the Ingress\n\n\n$\n kubectl delete ing nginx-ingress\n\ningress \"nginx-ingress\" deleted\n\n\n\n$\n kubectl create -f nginx-ingress.yaml\n\ningress \"nginx-ingress\" created\n\n\n\n$\n kubectl get ing nginx-ingress\n\nNAME HOSTS ADDRESS PORTS AGE\n\n\nnginx-ingress * 104.154.109.191 80, 443 13m\n\n\n\n\n\n\n\n\nNote that unlike the GCE Ingress, the same loadbalancer IP is shared amongst all\nIngresses, because all requests are proxied through the same set of nginx\ncontrollers.\n\n\n\n\nPromote ephemeral to static IP\n\u00b6\n\n\nTo promote the allocated IP to static, you can update the Service manifest\n\n\n$\n kubectl patch svc nginx-ingress-lb -p \n'{\"spec\": {\"loadBalancerIP\": \"104.154.109.191\"}}'\n\n\n\"nginx-ingress-lb\" patched\n\n\n\n\n\n\nand promote the IP to static (promotion works differently for cloudproviders,\nprovided example is for GKE/GCE)\n`\n\n\n$\n gcloud compute addresses create nginx-ingress-lb --addresses \n104\n.154.109.191 --region us-central1\n\nCreated [https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb].\n\n\n---\n\n\naddress: 104.154.109.191\n\n\ncreationTimestamp: '2017-01-31T16:34:50.089-08:00'\n\n\ndescription: ''\n\n\nid: '5208037144487826373'\n\n\nkind: compute#address\n\n\nname: nginx-ingress-lb\n\n\nregion: us-central1\n\n\nselfLink: https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb\n\n\nstatus: IN_USE\n\n\nusers:\n\n\n- us-central1/forwardingRules/a09f6913ae80e11e6a8c542010af0000\n\n\n\n\n\n\nNow even if the Service is deleted, the IP will persist, so you can recreate the\nService with \nspec.loadBalancerIP\n set to \n104.154.109.191\n.", + "title": "Static IPs" + }, + { + "location": "/examples/static-ip/README/#static-ips", + "text": "This example demonstrates how to assign a static-ip to an Ingress on through the Nginx controller.", + "title": "Static IPs" + }, + { + "location": "/examples/static-ip/README/#prerequisites", + "text": "You need a TLS cert and a test HTTP service for this example.\nYou will also need to make sure your Ingress targets exactly one Ingress\ncontroller by specifying the ingress.class annotation ,\nand that you have an ingress controller running in your cluster.", + "title": "Prerequisites" + }, + { + "location": "/examples/static-ip/README/#acquiring-an-ip", + "text": "Since instances of the nginx controller actually run on nodes in your cluster,\nby default nginx Ingresses will only get static IPs if your cloudprovider\nsupports static IP assignments to nodes. On GKE/GCE for example, even though\nnodes get static IPs, the IPs are not retained across upgrade. To acquire a static IP for the nginx ingress controller, simply put it\nbehind a Service of Type=LoadBalancer . First, create a loadbalancer Service and wait for it to acquire an IP $ kubectl create -f static-ip-svc.yaml service \"nginx-ingress-lb\" created $ kubectl get svc nginx-ingress-lb NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx-ingress-lb 10.0.138.113 104.154.109.191 80:31457/TCP,443:32240/TCP 15m then, update the ingress controller so it adopts the static IP of the Service\nby passing the --publish-service flag (the example yaml used in the next step\nalready has it set to \"nginx-ingress-lb\"). $ kubectl create -f nginx-ingress-controller.yaml deployment \"nginx-ingress-controller\" created", + "title": "Acquiring an IP" + }, + { + "location": "/examples/static-ip/README/#assigning-the-ip-to-an-ingress", + "text": "From here on every Ingress created with the ingress.class annotation set to nginx will get the IP allocated in the previous step $ kubectl create -f nginx-ingress.yaml ingress \"nginx-ingress\" created $ kubectl get ing nginx-ingress NAME HOSTS ADDRESS PORTS AGE nginx-ingress * 104.154.109.191 80, 443 13m $ curl 104 .154.109.191 -kL CLIENT VALUES: client_address=10.180.1.25 command=GET real path=/ query=nil request_version=1.1 request_uri=http://104.154.109.191:8080/ ...", + "title": "Assigning the IP to an Ingress" + }, + { + "location": "/examples/static-ip/README/#retaining-the-ip", + "text": "You can test retention by deleting the Ingress $ kubectl delete ing nginx-ingress ingress \"nginx-ingress\" deleted $ kubectl create -f nginx-ingress.yaml ingress \"nginx-ingress\" created $ kubectl get ing nginx-ingress NAME HOSTS ADDRESS PORTS AGE nginx-ingress * 104.154.109.191 80, 443 13m Note that unlike the GCE Ingress, the same loadbalancer IP is shared amongst all\nIngresses, because all requests are proxied through the same set of nginx\ncontrollers.", + "title": "Retaining the IP" + }, + { + "location": "/examples/static-ip/README/#promote-ephemeral-to-static-ip", + "text": "To promote the allocated IP to static, you can update the Service manifest $ kubectl patch svc nginx-ingress-lb -p '{\"spec\": {\"loadBalancerIP\": \"104.154.109.191\"}}' \"nginx-ingress-lb\" patched and promote the IP to static (promotion works differently for cloudproviders,\nprovided example is for GKE/GCE)\n` $ gcloud compute addresses create nginx-ingress-lb --addresses 104 .154.109.191 --region us-central1 Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb]. --- address: 104.154.109.191 creationTimestamp: '2017-01-31T16:34:50.089-08:00' description: '' id: '5208037144487826373' kind: compute#address name: nginx-ingress-lb region: us-central1 selfLink: https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb status: IN_USE users: - us-central1/forwardingRules/a09f6913ae80e11e6a8c542010af0000 Now even if the Service is deleted, the IP will persist, so you can recreate the\nService with spec.loadBalancerIP set to 104.154.109.191 .", + "title": "Promote ephemeral to static IP" + }, + { + "location": "/examples/tls-termination/README/", + "text": "TLS termination\n\u00b6\n\n\nThis example demonstrates how to terminate TLS through the nginx Ingress controller.\n\n\nPrerequisites\n\u00b6\n\n\nYou need a \nTLS cert\n and a \ntest HTTP service\n for this example.\n\n\nDeployment\n\u00b6\n\n\nThe following command instructs the controller to terminate traffic using the provided \nTLS cert, and forward un-encrypted HTTP traffic to the test HTTP service.\n\n\nkubectl apply -f ingress.yaml\n\n\n\n\n\n\nValidation\n\u00b6\n\n\nYou can confirm that the Ingress works.\n\n\n$\n kubectl describe ing nginx-test\n\nName: nginx-test\n\n\nNamespace: default\n\n\nAddress: 104.198.183.6\n\n\nDefault backend: default-http-backend:80 (10.180.0.4:8080,10.240.0.2:8080)\n\n\nTLS:\n\n\n tls-secret terminates\n\n\nRules:\n\n\n Host Path Backends\n\n\n ---- ---- --------\n\n\n *\n\n\n http-svc:80 ()\n\n\nAnnotations:\n\n\nEvents:\n\n\n FirstSeen LastSeen Count From SubObjectPath Type Reason Message\n\n\n --------- -------- ----- ---- ------------- -------- ------ -------\n\n\n 7s 7s 1 {nginx-ingress-controller } Normal CREATE default/nginx-test\n\n\n 7s 7s 1 {nginx-ingress-controller } Normal UPDATE default/nginx-test\n\n\n 7s 7s 1 {nginx-ingress-controller } Normal CREATE ip: 104.198.183.6\n\n\n 7s 7s 1 {nginx-ingress-controller } Warning MAPPING Ingress rule 'default/nginx-test' contains no path definition. Assuming /\n\n\n\n$\n curl \n104\n.198.183.6 -L\n\ncurl: (60) SSL certificate problem: self signed certificate\n\n\nMore details here: http://curl.haxx.se/docs/sslcerts.html\n\n\n\n$\n curl \n104\n.198.183.6 -Lk\n\nCLIENT VALUES:\n\n\nclient_address=10.240.0.4\n\n\ncommand=GET\n\n\nreal path=/\n\n\nquery=nil\n\n\nrequest_version=1.1\n\n\nrequest_uri=http://35.186.221.137:8080/\n\n\n\nSERVER VALUES:\n\n\nserver_version=nginx: 1.9.11 - lua: 10001\n\n\n\nHEADERS RECEIVED:\n\n\naccept=*/*\n\n\nconnection=Keep-Alive\n\n\nhost=35.186.221.137\n\n\nuser-agent=curl/7.46.0\n\n\nvia=1.1 google\n\n\nx-cloud-trace-context=f708ea7e369d4514fc90d51d7e27e91d/13322322294276298106\n\n\nx-forwarded-for=104.132.0.80, 35.186.221.137\n\n\nx-forwarded-proto=https\n\n\nBODY:", + "title": "TLS termination" + }, + { + "location": "/examples/tls-termination/README/#tls-termination", + "text": "This example demonstrates how to terminate TLS through the nginx Ingress controller.", + "title": "TLS termination" + }, + { + "location": "/examples/tls-termination/README/#prerequisites", + "text": "You need a TLS cert and a test HTTP service for this example.", + "title": "Prerequisites" + }, + { + "location": "/examples/tls-termination/README/#deployment", + "text": "The following command instructs the controller to terminate traffic using the provided \nTLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. kubectl apply -f ingress.yaml", + "title": "Deployment" + }, + { + "location": "/examples/tls-termination/README/#validation", + "text": "You can confirm that the Ingress works. $ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: 104.198.183.6 Default backend: default-http-backend:80 (10.180.0.4:8080,10.240.0.2:8080) TLS: tls-secret terminates Rules: Host Path Backends ---- ---- -------- * http-svc:80 () Annotations: Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 7s 7s 1 {nginx-ingress-controller } Normal CREATE default/nginx-test 7s 7s 1 {nginx-ingress-controller } Normal UPDATE default/nginx-test 7s 7s 1 {nginx-ingress-controller } Normal CREATE ip: 104.198.183.6 7s 7s 1 {nginx-ingress-controller } Warning MAPPING Ingress rule 'default/nginx-test' contains no path definition. Assuming / $ curl 104 .198.183.6 -L curl: (60) SSL certificate problem: self signed certificate More details here: http://curl.haxx.se/docs/sslcerts.html $ curl 104 .198.183.6 -Lk CLIENT VALUES: client_address=10.240.0.4 command=GET real path=/ query=nil request_version=1.1 request_uri=http://35.186.221.137:8080/ SERVER VALUES: server_version=nginx: 1.9.11 - lua: 10001 HEADERS RECEIVED: accept=*/* connection=Keep-Alive host=35.186.221.137 user-agent=curl/7.46.0 via=1.1 google x-cloud-trace-context=f708ea7e369d4514fc90d51d7e27e91d/13322322294276298106 x-forwarded-for=104.132.0.80, 35.186.221.137 x-forwarded-proto=https BODY:", + "title": "Validation" + }, + { + "location": "/development/", + "text": "Developing for NGINX Ingress Controller\n\u00b6\n\n\nThis document explains how to get started with developing for NGINX Ingress controller.\nIt includes how to build, test, and release ingress controllers.\n\n\nQuick Start\n\u00b6\n\n\nGetting the code\n\u00b6\n\n\nThe code must be checked out as a subdirectory of k8s.io, and not github.com.\n\n\nmkdir -p $GOPATH/src/k8s.io\ncd $GOPATH/src/k8s.io\n# Replace \"$YOUR_GITHUB_USERNAME\" below with your github username\ngit clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git\ncd ingress-nginx\n\n\n\n\n\nInitial developer environment build\n\u00b6\n\n\n\n\nPrequisites\n: Minikube must be installed.\nSee \nreleases\n for installation instructions. \n\n\n\n\nIf you are using \nMacOS\n and deploying to \nminikube\n, the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace \ningress-nginx\n:\n\n\n$ make dev-env\n\n\n\n\n\nUpdating the deployment\n\u00b6\n\n\nThe nginx controller container image can be rebuilt using:\n\n\n$ \nARCH\n=\namd64 \nTAG\n=\ndev \nREGISTRY\n=\n$USER\n/ingress-controller make build container\n\n\n\n\n\nThe image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up:\n\n\n$ kubectl get pods -n ingress-nginx\n$ kubectl delete pod -n ingress-nginx nginx-ingress-controller-\n\n\n\n\n\nDependencies\n\u00b6\n\n\nThe build uses dependencies in the \nvendor\n directory, which\nmust be installed before building a binary/image. Occasionally, you\nmight need to update the dependencies.\n\n\nThis guide requires you to install the \ndep\n dependency tool.\n\n\nCheck the version of \ndep\n you are using and make sure it is up to date.\n\n\n$\n dep version\n\ndep:\n\n\n version : devel\n\n\n build date : \n\n\n git hash : \n\n\n go version : go1.9\n\n\n go compiler : gc\n\n\n platform : linux/amd64\n\n\n\n\n\n\nIf you have an older version of \ndep\n, you can update it as follows:\n\n\n$\n go get -u github.com/golang/dep\n\n\n\n\n\nThis will automatically save the dependencies to the \nvendor/\n directory.\n\n\n$\n \ncd\n \n$GOPATH\n/src/k8s.io/ingress-nginx\n\n$\n dep ensure\n\n$\n dep ensure -update\n\n$\n dep prune\n\n\n\n\n\nBuilding\n\u00b6\n\n\nAll ingress controllers are built through a Makefile. Depending on your\nrequirements you can build a raw server binary, a local container image,\nor push an image to a remote repository.\n\n\nIn order to use your local Docker, you may need to set the following environment variables:\n\n\n#\n \n\"gcloud docker\"\n \n(\ndefault\n)\n or \n\"docker\"\n\n\n$\n \nexport\n \nDOCKER\n=\n\n\n\n#\n \n\"quay.io/kubernetes-ingress-controller\"\n \n(\ndefault\n)\n, \n\"index.docker.io\"\n, or your own registry\n\n$\n \nexport\n \nREGISTRY\n=\n\n\n\n\n\n\nTo find the registry simply run: \ndocker system info | grep Registry\n\n\nNginx Controller\n\u00b6\n\n\nBuild a raw server binary\n\n\n$\n make build\n\n\n\n\n\nTODO\n: add more specific instructions needed for raw server binary.\n\n\nBuild a local container image\n\n\n$\n \nTAG\n=\n \nREGISTRY\n=\n$USER\n/ingress-controller make docker-build\n\n\n\n\n\nPush the container image to a remote repository\n\n\n$\n \nTAG\n=\n \nREGISTRY\n=\n$USER\n/ingress-controller make docker-push\n\n\n\n\n\nDeploying\n\u00b6\n\n\nThere are several ways to deploy the ingress controller onto a cluster.\nPlease check the \ndeployment guide\n\n\nTesting\n\u00b6\n\n\nTo run unit-tests, just run\n\n\n$\n \ncd\n \n$GOPATH\n/src/k8s.io/ingress-nginx\n\n$\n make \ntest\n\n\n\n\n\n\nIf you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo.\n\n\n$\n \ncd\n \n$GOPATH\n/src/k8s.io/ingress-nginx\n\n$\n make e2e-test\n\n\n\n\n\nTo run unit-tests for lua code locally, run:\n\n\n$\n \ncd\n \n$GOPATH\n/src/k8s.io/ingress-nginx\n\n$\n ./rootfs/etc/nginx/lua/test/up.sh\n\n$\n make lua-test\n\n\n\n\n\nLua tests are located in \n$GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test\n. When creating a new test file it must follow the naming convention \n_test.lua\n or it will be ignored. \n\n\nReleasing\n\u00b6\n\n\nAll Makefiles will produce a release binary, as shown above. To publish this\nto a wider Kubernetes user base, push the image to a container registry, like\n\ngcr.io\n. All release images are hosted under \ngcr.io/google_containers\n and\ntagged according to a \nsemver\n scheme.\n\n\nAn example release might look like:\n\n\n$ make release\n\n\n\n\n\nPlease follow these guidelines to cut a release:\n\n\n\n\nUpdate the \nrelease\n\npage with a short description of the major changes that correspond to a given\nimage tag.\n\n\nCut a release branch, if appropriate. Release branches follow the format of\n\ncontroller-release-version\n. Typically, pre-releases are cut from HEAD.\nAll major feature work is done in HEAD. Specific bug fixes are\ncherry-picked into a release branch.\n\n\nIf you're not confident about the stability of the code,\n\ntag\n it as alpha or beta.\nTypically, a release branch should have stable code.", + "title": "Developing for NGINX Ingress Controller" + }, + { + "location": "/development/#developing-for-nginx-ingress-controller", + "text": "This document explains how to get started with developing for NGINX Ingress controller.\nIt includes how to build, test, and release ingress controllers.", + "title": "Developing for NGINX Ingress Controller" + }, + { + "location": "/development/#quick-start", + "text": "", + "title": "Quick Start" + }, + { + "location": "/development/#getting-the-code", + "text": "The code must be checked out as a subdirectory of k8s.io, and not github.com. mkdir -p $GOPATH/src/k8s.io\ncd $GOPATH/src/k8s.io\n# Replace \"$YOUR_GITHUB_USERNAME\" below with your github username\ngit clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git\ncd ingress-nginx", + "title": "Getting the code" + }, + { + "location": "/development/#initial-developer-environment-build", + "text": "Prequisites : Minikube must be installed.\nSee releases for installation instructions. If you are using MacOS and deploying to minikube , the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx : $ make dev-env", + "title": "Initial developer environment build" + }, + { + "location": "/development/#updating-the-deployment", + "text": "The nginx controller container image can be rebuilt using: $ ARCH = amd64 TAG = dev REGISTRY = $USER /ingress-controller make build container The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: $ kubectl get pods -n ingress-nginx\n$ kubectl delete pod -n ingress-nginx nginx-ingress-controller-", + "title": "Updating the deployment" + }, + { + "location": "/development/#dependencies", + "text": "The build uses dependencies in the vendor directory, which\nmust be installed before building a binary/image. Occasionally, you\nmight need to update the dependencies. This guide requires you to install the dep dependency tool. Check the version of dep you are using and make sure it is up to date. $ dep version dep: version : devel build date : git hash : go version : go1.9 go compiler : gc platform : linux/amd64 If you have an older version of dep , you can update it as follows: $ go get -u github.com/golang/dep This will automatically save the dependencies to the vendor/ directory. $ cd $GOPATH /src/k8s.io/ingress-nginx $ dep ensure $ dep ensure -update $ dep prune", + "title": "Dependencies" + }, + { + "location": "/development/#building", + "text": "All ingress controllers are built through a Makefile. Depending on your\nrequirements you can build a raw server binary, a local container image,\nor push an image to a remote repository. In order to use your local Docker, you may need to set the following environment variables: # \"gcloud docker\" ( default ) or \"docker\" $ export DOCKER = # \"quay.io/kubernetes-ingress-controller\" ( default ) , \"index.docker.io\" , or your own registry $ export REGISTRY = To find the registry simply run: docker system info | grep Registry", + "title": "Building" + }, + { + "location": "/development/#nginx-controller", + "text": "Build a raw server binary $ make build TODO : add more specific instructions needed for raw server binary. Build a local container image $ TAG = REGISTRY = $USER /ingress-controller make docker-build Push the container image to a remote repository $ TAG = REGISTRY = $USER /ingress-controller make docker-push", + "title": "Nginx Controller" + }, + { + "location": "/development/#deploying", + "text": "There are several ways to deploy the ingress controller onto a cluster.\nPlease check the deployment guide", + "title": "Deploying" + }, + { + "location": "/development/#testing", + "text": "To run unit-tests, just run $ cd $GOPATH /src/k8s.io/ingress-nginx $ make test If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo. $ cd $GOPATH /src/k8s.io/ingress-nginx $ make e2e-test To run unit-tests for lua code locally, run: $ cd $GOPATH /src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test . When creating a new test file it must follow the naming convention _test.lua or it will be ignored.", + "title": "Testing" + }, + { + "location": "/development/#releasing", + "text": "All Makefiles will produce a release binary, as shown above. To publish this\nto a wider Kubernetes user base, push the image to a container registry, like gcr.io . All release images are hosted under gcr.io/google_containers and\ntagged according to a semver scheme. An example release might look like: $ make release Please follow these guidelines to cut a release: Update the release \npage with a short description of the major changes that correspond to a given\nimage tag. Cut a release branch, if appropriate. Release branches follow the format of controller-release-version . Typically, pre-releases are cut from HEAD.\nAll major feature work is done in HEAD. Specific bug fixes are\ncherry-picked into a release branch. If you're not confident about the stability of the code, tag it as alpha or beta.\nTypically, a release branch should have stable code.", + "title": "Releasing" + }, + { + "location": "/how-it-works/", + "text": "How it works\n\u00b6\n\n\nThe objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one.\n\n\nNGINX configuration\n\u00b6\n\n\nThe goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. \nThough it is important to note that we don't reload Nginx on changes that impact only an \nupstream\n configuration (i.e Endpoints change when you deploy your app)\n. We use https://github.com/openresty/lua-nginx-module to achieve this. Check \nbelow\n to learn more about how it's done.\n\n\nNGINX model\n\u00b6\n\n\nUsually, a Kubernetes Controller utilizes the \nsynchronization loop pattern\n to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster.\n\n\nTo get this object from the cluster, we use \nKubernetes Informers\n, in particular, \nFilteredSharedInformer\n. This informers allows reacting to changes in using \ncallbacks\n to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload.\n\n\nOne of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions.\n\n\nThe final representation of the NGINX configuration is generated from a \nGo template\n using the new model as input for the variables required by the template.\n\n\nBuilding the NGINX model\n\u00b6\n\n\nBuilding a model is an expensive operation, for this reason, the use of the synchronization loop is a must. By using a \nwork queue\n it is possible to not lose changes and remove the use of \nsync.Mutex\n to force a single execution of the sync loop and additionally it is possible to create a time window between the start and end of the sync loop that allows us to discard unnecessary updates. It is important to understand that any change in the cluster could generate events that the informer will send to the controller and one of the reasons for the \nwork queue\n.\n\n\nOperations to build the model:\n\n\n\n\nOrder Ingress rules by \nResourceVersion\n field, i.e., old rules first.\n\n\nIf the same path for the same host is defined in more than one Ingress, the oldest rule wins.\n\n\nIf more than one Ingress contains a TLS section for the same host, the oldest rule wins.\n\n\n\n\nIf multiple Ingresses define an annotation that affects the configuration of the Server block, the oldest rule wins.\n\n\n\n\n\n\nCreate a list of NGINX Servers (per hostname)\n\n\n\n\nCreate a list of NGINX Upstreams\n\n\nIf multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions.\n\n\nAnnotations are applied to all the paths in the Ingress.\n\n\nMultiple Ingresses can define different annotations. These definitions are not shared between Ingresses.\n\n\n\n\nWhen a reload is required\n\u00b6\n\n\nThe next list describes the scenarios when a reload is required:\n\n\n\n\nNew Ingress Resource Created.\n\n\nTLS section is added to existing Ingress.\n\n\nChange in Ingress annotations that impacts more than just upstream configuration. For instance \nload-balance\n annotation does not require a reload.\n\n\nA path is added/removed from an Ingress.\n\n\nAn Ingress, Service, Secret is removed.\n\n\nSome missing referenced object from the Ingress is available, like a Service or Secret.\n\n\nA Secret is updated.\n\n\n\n\nAvoiding reloads\n\u00b6\n\n\nIn some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes.\n\n\nAvoiding reloads on Endpoints changes\n\u00b6\n\n\nOn every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in \nbalancer_by_lua\n context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. \nNote\n that this includes annotation changes that affects only \nupstream\n configuration in Nginx as well.\n\n\nIn a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on.", + "title": "How it works" + }, + { + "location": "/how-it-works/#how-it-works", + "text": "The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one.", + "title": "How it works" + }, + { + "location": "/how-it-works/#nginx-configuration", + "text": "The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app) . We use https://github.com/openresty/lua-nginx-module to achieve this. Check below to learn more about how it's done.", + "title": "NGINX configuration" + }, + { + "location": "/how-it-works/#nginx-model", + "text": "Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. To get this object from the cluster, we use Kubernetes Informers , in particular, FilteredSharedInformer . This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template.", + "title": "NGINX model" + }, + { + "location": "/how-it-works/#building-the-nginx-model", + "text": "Building a model is an expensive operation, for this reason, the use of the synchronization loop is a must. By using a work queue it is possible to not lose changes and remove the use of sync.Mutex to force a single execution of the sync loop and additionally it is possible to create a time window between the start and end of the sync loop that allows us to discard unnecessary updates. It is important to understand that any change in the cluster could generate events that the informer will send to the controller and one of the reasons for the work queue . Operations to build the model: Order Ingress rules by ResourceVersion field, i.e., old rules first. If the same path for the same host is defined in more than one Ingress, the oldest rule wins. If more than one Ingress contains a TLS section for the same host, the oldest rule wins. If multiple Ingresses define an annotation that affects the configuration of the Server block, the oldest rule wins. Create a list of NGINX Servers (per hostname) Create a list of NGINX Upstreams If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Annotations are applied to all the paths in the Ingress. Multiple Ingresses can define different annotations. These definitions are not shared between Ingresses.", + "title": "Building the NGINX model" + }, + { + "location": "/how-it-works/#when-a-reload-is-required", + "text": "The next list describes the scenarios when a reload is required: New Ingress Resource Created. TLS section is added to existing Ingress. Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. A path is added/removed from an Ingress. An Ingress, Service, Secret is removed. Some missing referenced object from the Ingress is available, like a Service or Secret. A Secret is updated.", + "title": "When a reload is required" + }, + { + "location": "/how-it-works/#avoiding-reloads", + "text": "In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes.", + "title": "Avoiding reloads" + }, + { + "location": "/how-it-works/#avoiding-reloads-on-endpoints-changes", + "text": "On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on.", + "title": "Avoiding reloads on Endpoints changes" + }, + { + "location": "/troubleshooting/", + "text": "Troubleshooting\n\u00b6\n\n\nIngress-Controller Logs and Events\n\u00b6\n\n\nThere are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting\nmethods to obtain more information.\n\n\nCheck the Ingress Resource Events\n\n\n$\n kubectl get ing -n \n\nNAME HOSTS ADDRESS PORTS AGE\n\n\ncafe-ingress cafe.com 10.0.2.15 80 25s\n\n\n\n$\n kubectl describe ing -n \n\nName: cafe-ingress\n\n\nNamespace: default\n\n\nAddress: 10.0.2.15\n\n\nDefault backend: default-http-backend:80 (172.17.0.5:8080)\n\n\nRules:\n\n\n Host Path Backends\n\n\n ---- ---- --------\n\n\n cafe.com\n\n\n /tea tea-svc:80 ()\n\n\n /coffee coffee-svc:80 ()\n\n\nAnnotations:\n\n\n kubectl.kubernetes.io/last-applied-configuration: {\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Ingress\",\"metadata\":{\"annotations\":{},\"name\":\"cafe-ingress\",\"namespace\":\"default\",\"selfLink\":\"/apis/extensions/v1beta1/namespaces/default/ingresses/cafe-ingress\"},\"spec\":{\"rules\":[{\"host\":\"cafe.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"tea-svc\",\"servicePort\":80},\"path\":\"/tea\"},{\"backend\":{\"serviceName\":\"coffee-svc\",\"servicePort\":80},\"path\":\"/coffee\"}]}}]},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"169.48.142.110\"}]}}}\n\n\n\nEvents:\n\n\n Type Reason Age From Message\n\n\n ---- ------ ---- ---- -------\n\n\n Normal CREATE 1m nginx-ingress-controller Ingress default/cafe-ingress\n\n\n Normal UPDATE 58s nginx-ingress-controller Ingress default/cafe-ingress\n\n\n\n\n\n\nCheck the Ingress Controller Logs\n\n\n$\n kubectl get pods -n \n\nNAME READY STATUS RESTARTS AGE\n\n\nnginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m\n\n\n\n$\n kubectl logs -n nginx-ingress-controller-67956bf89d-fv58j\n\n-------------------------------------------------------------------------------\n\n\nNGINX Ingress controller\n\n\n Release: 0.14.0\n\n\n Build: git-734361d\n\n\n Repository: https://github.com/kubernetes/ingress-nginx\n\n\n-------------------------------------------------------------------------------\n\n\n....\n\n\n\n\n\n\nCheck the Nginx Configuration\n\n\n$\n kubectl get pods -n \n\nNAME READY STATUS RESTARTS AGE\n\n\nnginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m\n\n\n\n$\n kubectl \nexec\n -it -n nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf\n\ndaemon off;\n\n\nworker_processes 2;\n\n\npid /run/nginx.pid;\n\n\nworker_rlimit_nofile 523264;\n\n\nworker_shutdown_timeout 10s;\n\n\nevents {\n\n\n multi_accept on;\n\n\n worker_connections 16384;\n\n\n use epoll;\n\n\n}\n\n\nhttp {\n\n\n....\n\n\n\n\n\n\nCheck if used Services Exist\n\n\n$\n kubectl get svc --all-namespaces\n\nNAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n\n\ndefault coffee-svc ClusterIP 10.106.154.35 80/TCP 18m\n\n\ndefault kubernetes ClusterIP 10.96.0.1 443/TCP 30m\n\n\ndefault tea-svc ClusterIP 10.104.172.12 80/TCP 18m\n\n\nkube-system default-http-backend NodePort 10.108.189.236 80:30001/TCP 30m\n\n\nkube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 30m\n\n\nkube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m\n\n\n\n\n\n\nDebug Logging\n\u00b6\n\n\nUsing the flag \n--v=XX\n it is possible to increase the level of logging. This is performed by editing\nthe deployment.\n\n\n$\n kubectl get deploy -n \n\nNAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE\n\n\ndefault-http-backend 1 1 1 1 35m\n\n\nnginx-ingress-controller 1 1 1 1 35m\n\n\n\n$\n kubectl edit deploy -n nginx-ingress-controller\n\n#\n Add --v\n=\nX to \n\"- args\"\n, where X is an integer\n\n\n\n\n\n\n\n--v=2\n shows details using \ndiff\n about the changes in the configuration in nginx\n\n\n--v=3\n shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format\n\n\n--v=5\n configures NGINX in \ndebug mode\n\n\n\n\nAuthentication to the Kubernetes API Server\n\u00b6\n\n\nA number of components are involved in the authentication process and the first step is to narrow\ndown the source of the problem, namely whether it is a problem with service authentication or\nwith the kubeconfig file.\n\n\nBoth authentications must work:\n\n\n+-------------+ service +------------+\n| | authentication | |\n+ apiserver +<-------------------+ ingress |\n| | | controller |\n+-------------+ +------------+\n\n\n\n\n\nService authentication\n\n\nThe Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways:\n\n\n\n\n\n\nService Account:\n This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details.\n\n\n\n\n\n\nKubeconfig file:\n In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the \n--kubeconfig\n flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the \n--kubeconfig\n does not requires the flag \n--apiserver-host\n.\n The format of the file is identical to \n~/.kube/config\n which is used by kubectl to connect to the API server. See 'kubeconfig' section for details.\n\n\n\n\n\n\nUsing the flag \n--apiserver-host\n:\n Using this flag \n--apiserver-host=http://localhost:8080\n it is possible to specify an unsecured API server or reach a remote kubernetes cluster using \nkubectl proxy\n.\n Please do not use this approach in production.\n\n\n\n\n\n\nIn the diagram below you can see the full authentication flow with all options, starting with the browser\non the lower left hand side.\n\n\nKubernetes Workstation\n+---------------------------------------------------+ +------------------+\n| | | |\n| +-----------+ apiserver +------------+ | | +------------+ |\n| | | proxy | | | | | | |\n| | apiserver | | ingress | | | | ingress | |\n| | | | controller | | | | controller | |\n| | | | | | | | | |\n| | | | | | | | | |\n| | | service account/ | | | | | | |\n| | | kubeconfig | | | | | | |\n| | +<-------------------+ | | | | | |\n| | | | | | | | | |\n| +------+----+ kubeconfig +------+-----+ | | +------+-----+ |\n| |<--------------------------------------------------------| |\n| | | |\n+---------------------------------------------------+ +------------------+\n\n\n\n\n\nService Account\n\u00b6\n\n\nIf using a service account to connect to the API server, Dashboard expects the file\n\n/var/run/secrets/kubernetes.io/serviceaccount/token\n to be present. It provides a secret\ntoken that is required to authenticate with the API server.\n\n\nVerify with the following commands:\n\n\n#\n start a container that contains curl\n\n$\n kubectl run \ntest\n --image\n=\ntutum/curl -- sleep \n10000\n\n\n\n#\n check that container is running\n\n$\n kubectl get pods\n\nNAME READY STATUS RESTARTS AGE\n\n\ntest-701078429-s5kca 1/1 Running 0 16s\n\n\n\n#\n check \nif\n secret exists\n\n$\n kubectl \nexec\n test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/\n\nca.crt\n\n\nnamespace\n\n\ntoken\n\n\n\n#\n get service IP of master\n\n$\n kubectl get services\n\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n\n\nkubernetes 10.0.0.1 443/TCP 1d\n\n\n\n#\n check base connectivity from cluster inside\n\n$\n kubectl \nexec\n test-701078429-s5kca -- curl -k https://10.0.0.1\n\nUnauthorized\n\n\n\n#\n connect using tokens\n\n$\n \nTOKEN_VALUE\n=\n$(\nkubectl \nexec\n test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token\n)\n\n\n$\n \necho\n \n$TOKEN_VALUE\n\n\neyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A\n\n\n$\n kubectl \nexec\n test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H \n\"Authorization: Bearer \n$TOKEN_VALUE\n\"\n https://10.0.0.1\n\n{\n\n\n \"paths\": [\n\n\n \"/api\",\n\n\n \"/api/v1\",\n\n\n \"/apis\",\n\n\n \"/apis/apps\",\n\n\n \"/apis/apps/v1alpha1\",\n\n\n \"/apis/authentication.k8s.io\",\n\n\n \"/apis/authentication.k8s.io/v1beta1\",\n\n\n \"/apis/authorization.k8s.io\",\n\n\n \"/apis/authorization.k8s.io/v1beta1\",\n\n\n \"/apis/autoscaling\",\n\n\n \"/apis/autoscaling/v1\",\n\n\n \"/apis/batch\",\n\n\n \"/apis/batch/v1\",\n\n\n \"/apis/batch/v2alpha1\",\n\n\n \"/apis/certificates.k8s.io\",\n\n\n \"/apis/certificates.k8s.io/v1alpha1\",\n\n\n \"/apis/extensions\",\n\n\n \"/apis/extensions/v1beta1\",\n\n\n \"/apis/policy\",\n\n\n \"/apis/policy/v1alpha1\",\n\n\n \"/apis/rbac.authorization.k8s.io\",\n\n\n \"/apis/rbac.authorization.k8s.io/v1alpha1\",\n\n\n \"/apis/storage.k8s.io\",\n\n\n \"/apis/storage.k8s.io/v1beta1\",\n\n\n \"/healthz\",\n\n\n \"/healthz/ping\",\n\n\n \"/logs\",\n\n\n \"/metrics\",\n\n\n \"/swaggerapi/\",\n\n\n \"/ui/\",\n\n\n \"/version\"\n\n\n ]\n\n\n}\n\n\n\n\n\n\nIf it is not working, there are two possible reasons:\n\n\n\n\n\n\nThe contents of the tokens are invalid. Find the secret name with \nkubectl get secrets | grep service-account\n and\n delete it with \nkubectl delete secret \n. It will automatically be recreated.\n\n\n\n\n\n\nYou have a non-standard Kubernetes installation and the file containing the token may not be present.\n The API server will mount a volume containing this file, but only if the API server is configured to use\n the ServiceAccount admission controller.\n If you experience this error, verify that your API server is using the ServiceAccount admission controller.\n If you are configuring the API server by hand, you can set this with the \n--admission-control\n parameter.\n\n\n\n\nNote that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers.\n\n\n\n\n\n\n\n\nMore information:\n\n\n\n\nUser Guide: Service Accounts\n\n\nCluster Administrator Guide: Managing Service Accounts\n\n\n\n\nKube-Config\n\u00b6\n\n\nIf you want to use a kubeconfig file for authentication, follow the \ndeploy procedure\n and\nadd the flag \n--kubeconfig=/etc/kubernetes/kubeconfig.yaml\n to the args section of the deployment.\n\n\nUsing GDB with Nginx\n\u00b6\n\n\nGdb\n can be used to with nginx to perform a configuration\ndump. This allows us to see which configuration is being used, as well as older configurations.\n\n\nNote: The below is based on the nginx \ndocumentation\n.\n\n\n\n\nSSH into the worker\n\n\n\n\n$\n ssh user@workerIP\n\n\n\n\n\n\n\nObtain the Docker Container Running nginx\n\n\n\n\n$\n docker ps \n|\n grep nginx-ingress-controller\n\nCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n\n\nd9e1d243156a quay.io/kubernetes-ingress-controller/nginx-ingress-controller \"/usr/bin/dumb-init \u2026\" 19 minutes ago Up 19 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0\n\n\n\n\n\n\n\n\nExec into the container\n\n\n\n\n$\n docker \nexec\n -it --user\n=\n0\n --privileged d9e1d243156a bash\n\n\n\n\n\n\n\nMake sure nginx is running in \n--with-debug\n\n\n\n\n$\n nginx -V \n2\n>\n&\n1\n \n|\n grep -- \n'--with-debug'\n\n\n\n\n\n\n\n\nGet list of processes running on container\n\n\n\n\n$\n ps -ef\n\nUID PID PPID C STIME TTY TIME CMD\n\n\nroot 1 0 0 20:23 ? 00:00:00 /usr/bin/dumb-init /nginx-ingres\n\n\nroot 5 1 0 20:23 ? 00:00:05 /nginx-ingress-controller --defa\n\n\nroot 21 5 0 20:23 ? 00:00:00 nginx: master process /usr/sbin/\n\n\nnobody 106 21 0 20:23 ? 00:00:00 nginx: worker process\n\n\nnobody 107 21 0 20:23 ? 00:00:00 nginx: worker process\n\n\nroot 172 0 0 20:43 pts/0 00:00:00 bash\n\n\n\n\n\n\n\n\nAttach gdb to the nginx master process\n\n\n\n\n$\n gdb -p \n21\n\n\n....\n\n\nAttaching to process 21\n\n\nReading symbols from /usr/sbin/nginx...done.\n\n\n....\n\n\n(gdb)\n\n\n\n\n\n\n\n\nCopy and paste the following:\n\n\n\n\nset $cd = ngx_cycle->config_dump\n\n\nset $nelts = $cd.nelts\n\n\nset $elts = (ngx_conf_dump_t*)($cd.elts)\n\n\nwhile ($nelts-- > 0)\n\n\nset $name = $elts[$nelts]->name.data\n\n\nprintf \"Dumping %s to nginx_conf.txt\\n\", $name\n\n\nappend memory nginx_conf.txt \\\n\n\n $\nelts\n[\n$nelts\n]\n->buffer.start \n$elts\n[\n$nelts\n]\n->buffer.end\n\nend\n\n\n\n\n\n\n\n\n\n\nQuit GDB by pressing CTRL+D\n\n\n\n\n\n\nOpen nginx_conf.txt\n\n\n\n\n\n\ncat nginx_conf.txt", + "title": "Troubleshooting" + }, + { + "location": "/troubleshooting/#troubleshooting", + "text": "", + "title": "Troubleshooting" + }, + { + "location": "/troubleshooting/#ingress-controller-logs-and-events", + "text": "There are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting\nmethods to obtain more information. Check the Ingress Resource Events $ kubectl get ing -n NAME HOSTS ADDRESS PORTS AGE cafe-ingress cafe.com 10.0.2.15 80 25s $ kubectl describe ing -n Name: cafe-ingress Namespace: default Address: 10.0.2.15 Default backend: default-http-backend:80 (172.17.0.5:8080) Rules: Host Path Backends ---- ---- -------- cafe.com /tea tea-svc:80 () /coffee coffee-svc:80 () Annotations: kubectl.kubernetes.io/last-applied-configuration: {\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Ingress\",\"metadata\":{\"annotations\":{},\"name\":\"cafe-ingress\",\"namespace\":\"default\",\"selfLink\":\"/apis/extensions/v1beta1/namespaces/default/ingresses/cafe-ingress\"},\"spec\":{\"rules\":[{\"host\":\"cafe.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"tea-svc\",\"servicePort\":80},\"path\":\"/tea\"},{\"backend\":{\"serviceName\":\"coffee-svc\",\"servicePort\":80},\"path\":\"/coffee\"}]}}]},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"169.48.142.110\"}]}}} Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal CREATE 1m nginx-ingress-controller Ingress default/cafe-ingress Normal UPDATE 58s nginx-ingress-controller Ingress default/cafe-ingress Check the Ingress Controller Logs $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl logs -n nginx-ingress-controller-67956bf89d-fv58j ------------------------------------------------------------------------------- NGINX Ingress controller Release: 0.14.0 Build: git-734361d Repository: https://github.com/kubernetes/ingress-nginx ------------------------------------------------------------------------------- .... Check the Nginx Configuration $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl exec -it -n nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf daemon off; worker_processes 2; pid /run/nginx.pid; worker_rlimit_nofile 523264; worker_shutdown_timeout 10s; events { multi_accept on; worker_connections 16384; use epoll; } http { .... Check if used Services Exist $ kubectl get svc --all-namespaces NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default coffee-svc ClusterIP 10.106.154.35 80/TCP 18m default kubernetes ClusterIP 10.96.0.1 443/TCP 30m default tea-svc ClusterIP 10.104.172.12 80/TCP 18m kube-system default-http-backend NodePort 10.108.189.236 80:30001/TCP 30m kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 30m kube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m", + "title": "Ingress-Controller Logs and Events" + }, + { + "location": "/troubleshooting/#debug-logging", + "text": "Using the flag --v=XX it is possible to increase the level of logging. This is performed by editing\nthe deployment. $ kubectl get deploy -n NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE default-http-backend 1 1 1 1 35m nginx-ingress-controller 1 1 1 1 35m $ kubectl edit deploy -n nginx-ingress-controller # Add --v = X to \"- args\" , where X is an integer --v=2 shows details using diff about the changes in the configuration in nginx --v=3 shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format --v=5 configures NGINX in debug mode", + "title": "Debug Logging" + }, + { + "location": "/troubleshooting/#authentication-to-the-kubernetes-api-server", + "text": "A number of components are involved in the authentication process and the first step is to narrow\ndown the source of the problem, namely whether it is a problem with service authentication or\nwith the kubeconfig file. Both authentications must work: +-------------+ service +------------+\n| | authentication | |\n+ apiserver +<-------------------+ ingress |\n| | | controller |\n+-------------+ +------------+ Service authentication The Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways: Service Account: This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details. Kubeconfig file: In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the --kubeconfig flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the --kubeconfig does not requires the flag --apiserver-host .\n The format of the file is identical to ~/.kube/config which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. Using the flag --apiserver-host : Using this flag --apiserver-host=http://localhost:8080 it is possible to specify an unsecured API server or reach a remote kubernetes cluster using kubectl proxy .\n Please do not use this approach in production. In the diagram below you can see the full authentication flow with all options, starting with the browser\non the lower left hand side. Kubernetes Workstation\n+---------------------------------------------------+ +------------------+\n| | | |\n| +-----------+ apiserver +------------+ | | +------------+ |\n| | | proxy | | | | | | |\n| | apiserver | | ingress | | | | ingress | |\n| | | | controller | | | | controller | |\n| | | | | | | | | |\n| | | | | | | | | |\n| | | service account/ | | | | | | |\n| | | kubeconfig | | | | | | |\n| | +<-------------------+ | | | | | |\n| | | | | | | | | |\n| +------+----+ kubeconfig +------+-----+ | | +------+-----+ |\n| |<--------------------------------------------------------| |\n| | | |\n+---------------------------------------------------+ +------------------+", + "title": "Authentication to the Kubernetes API Server" + }, + { + "location": "/troubleshooting/#service-account", + "text": "If using a service account to connect to the API server, Dashboard expects the file /var/run/secrets/kubernetes.io/serviceaccount/token to be present. It provides a secret\ntoken that is required to authenticate with the API server. Verify with the following commands: # start a container that contains curl $ kubectl run test --image = tutum/curl -- sleep 10000 # check that container is running $ kubectl get pods NAME READY STATUS RESTARTS AGE test-701078429-s5kca 1/1 Running 0 16s # check if secret exists $ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ ca.crt namespace token # get service IP of master $ kubectl get services NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes 10.0.0.1 443/TCP 1d # check base connectivity from cluster inside $ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 Unauthorized # connect using tokens $ TOKEN_VALUE = $( kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token ) $ echo $TOKEN_VALUE eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H \"Authorization: Bearer $TOKEN_VALUE \" https://10.0.0.1 { \"paths\": [ \"/api\", \"/api/v1\", \"/apis\", \"/apis/apps\", \"/apis/apps/v1alpha1\", \"/apis/authentication.k8s.io\", \"/apis/authentication.k8s.io/v1beta1\", \"/apis/authorization.k8s.io\", \"/apis/authorization.k8s.io/v1beta1\", \"/apis/autoscaling\", \"/apis/autoscaling/v1\", \"/apis/batch\", \"/apis/batch/v1\", \"/apis/batch/v2alpha1\", \"/apis/certificates.k8s.io\", \"/apis/certificates.k8s.io/v1alpha1\", \"/apis/extensions\", \"/apis/extensions/v1beta1\", \"/apis/policy\", \"/apis/policy/v1alpha1\", \"/apis/rbac.authorization.k8s.io\", \"/apis/rbac.authorization.k8s.io/v1alpha1\", \"/apis/storage.k8s.io\", \"/apis/storage.k8s.io/v1beta1\", \"/healthz\", \"/healthz/ping\", \"/logs\", \"/metrics\", \"/swaggerapi/\", \"/ui/\", \"/version\" ] } If it is not working, there are two possible reasons: The contents of the tokens are invalid. Find the secret name with kubectl get secrets | grep service-account and\n delete it with kubectl delete secret . It will automatically be recreated. You have a non-standard Kubernetes installation and the file containing the token may not be present.\n The API server will mount a volume containing this file, but only if the API server is configured to use\n the ServiceAccount admission controller.\n If you experience this error, verify that your API server is using the ServiceAccount admission controller.\n If you are configuring the API server by hand, you can set this with the --admission-control parameter. Note that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers. More information: User Guide: Service Accounts Cluster Administrator Guide: Managing Service Accounts", + "title": "Service Account" + }, + { + "location": "/troubleshooting/#kube-config", + "text": "If you want to use a kubeconfig file for authentication, follow the deploy procedure and\nadd the flag --kubeconfig=/etc/kubernetes/kubeconfig.yaml to the args section of the deployment.", + "title": "Kube-Config" + }, + { + "location": "/troubleshooting/#using-gdb-with-nginx", + "text": "Gdb can be used to with nginx to perform a configuration\ndump. This allows us to see which configuration is being used, as well as older configurations. Note: The below is based on the nginx documentation . SSH into the worker $ ssh user@workerIP Obtain the Docker Container Running nginx $ docker ps | grep nginx-ingress-controller CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES d9e1d243156a quay.io/kubernetes-ingress-controller/nginx-ingress-controller \"/usr/bin/dumb-init \u2026\" 19 minutes ago Up 19 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0 Exec into the container $ docker exec -it --user = 0 --privileged d9e1d243156a bash Make sure nginx is running in --with-debug $ nginx -V 2 > & 1 | grep -- '--with-debug' Get list of processes running on container $ ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 20:23 ? 00:00:00 /usr/bin/dumb-init /nginx-ingres root 5 1 0 20:23 ? 00:00:05 /nginx-ingress-controller --defa root 21 5 0 20:23 ? 00:00:00 nginx: master process /usr/sbin/ nobody 106 21 0 20:23 ? 00:00:00 nginx: worker process nobody 107 21 0 20:23 ? 00:00:00 nginx: worker process root 172 0 0 20:43 pts/0 00:00:00 bash Attach gdb to the nginx master process $ gdb -p 21 .... Attaching to process 21 Reading symbols from /usr/sbin/nginx...done. .... (gdb) Copy and paste the following: set $cd = ngx_cycle->config_dump set $nelts = $cd.nelts set $elts = (ngx_conf_dump_t*)($cd.elts) while ($nelts-- > 0) set $name = $elts[$nelts]->name.data printf \"Dumping %s to nginx_conf.txt\\n\", $name append memory nginx_conf.txt \\ $ elts [ $nelts ] ->buffer.start $elts [ $nelts ] ->buffer.end end Quit GDB by pressing CTRL+D Open nginx_conf.txt cat nginx_conf.txt", + "title": "Using GDB with Nginx" + } + ] +} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index 1d7d17151..96b8bf16c 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,222 +2,222 @@ / - 2018-08-29 + 2018-08-30 daily /deploy/ - 2018-08-29 + 2018-08-30 daily /deploy/rbac/ - 2018-08-29 + 2018-08-30 daily /deploy/upgrade/ - 2018-08-29 + 2018-08-30 daily /user-guide/nginx-configuration/ - 2018-08-29 + 2018-08-30 daily /user-guide/nginx-configuration/annotations/ - 2018-08-29 + 2018-08-30 daily /user-guide/nginx-configuration/configmap/ - 2018-08-29 + 2018-08-30 daily /user-guide/nginx-configuration/custom-template/ - 2018-08-29 + 2018-08-30 daily /user-guide/nginx-configuration/log-format/ - 2018-08-29 + 2018-08-30 daily /user-guide/cli-arguments/ - 2018-08-29 + 2018-08-30 daily /user-guide/custom-errors/ - 2018-08-29 + 2018-08-30 daily /user-guide/default-backend/ - 2018-08-29 + 2018-08-30 daily /user-guide/exposing-tcp-udp-services/ - 2018-08-29 + 2018-08-30 daily /user-guide/external-articles/ - 2018-08-29 + 2018-08-30 daily /user-guide/miscellaneous/ - 2018-08-29 + 2018-08-30 daily /user-guide/monitoring/ - 2018-08-29 + 2018-08-30 daily /user-guide/multiple-ingress/ - 2018-08-29 + 2018-08-30 daily /user-guide/tls/ - 2018-08-29 + 2018-08-30 daily /user-guide/third-party-addons/modsecurity/ - 2018-08-29 + 2018-08-30 daily /user-guide/third-party-addons/opentracing/ - 2018-08-29 + 2018-08-30 daily /examples/ - 2018-08-29 + 2018-08-30 daily /examples/PREREQUISITES/ - 2018-08-29 + 2018-08-30 daily /examples/affinity/cookie/README/ - 2018-08-29 + 2018-08-30 daily /examples/auth/basic/README/ - 2018-08-29 + 2018-08-30 daily /examples/auth/client-certs/README/ - 2018-08-29 + 2018-08-30 daily /examples/auth/external-auth/README/ - 2018-08-29 + 2018-08-30 daily /examples/auth/oauth-external-auth/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/configuration-snippets/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/custom-configuration/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/custom-errors/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/custom-headers/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/custom-upstream-check/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/external-auth-headers/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/ssl-dh-param/README/ - 2018-08-29 + 2018-08-30 daily /examples/customization/sysctl/README/ - 2018-08-29 + 2018-08-30 daily /examples/docker-registry/README/ - 2018-08-29 + 2018-08-30 daily /examples/grpc/README/ - 2018-08-29 + 2018-08-30 daily /examples/multi-tls/README/ - 2018-08-29 + 2018-08-30 daily /examples/rewrite/README/ - 2018-08-29 + 2018-08-30 daily /examples/static-ip/README/ - 2018-08-29 + 2018-08-30 daily /examples/tls-termination/README/ - 2018-08-29 + 2018-08-30 daily /development/ - 2018-08-29 + 2018-08-30 daily /how-it-works/ - 2018-08-29 + 2018-08-30 daily /troubleshooting/ - 2018-08-29 + 2018-08-30 daily \ No newline at end of file diff --git a/troubleshooting/index.html b/troubleshooting/index.html new file mode 100644 index 000000000..61d65c12e --- /dev/null +++ b/troubleshooting/index.html @@ -0,0 +1,1578 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting - NGINX Ingress Controller + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content + + + +
    + +
    + +
    + + + + + + + + +
    +
    + + +
    +
    +
    + +
    +
    +
    + + +
    +
    +
    + + +
    +
    +
    + + +
    +
    + + + + + + + +

    Troubleshooting

    +

    Ingress-Controller Logs and Events

    +

    There are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting +methods to obtain more information.

    +

    Check the Ingress Resource Events

    +
    $ kubectl get ing -n <namespace-of-ingress-resource>
    +NAME           HOSTS      ADDRESS     PORTS     AGE
    +cafe-ingress   cafe.com   10.0.2.15   80        25s
    +
    +$ kubectl describe ing <ingress-resource-name> -n <namespace-of-ingress-resource>
    +Name:             cafe-ingress
    +Namespace:        default
    +Address:          10.0.2.15
    +Default backend:  default-http-backend:80 (172.17.0.5:8080)
    +Rules:
    +  Host      Path  Backends
    +  ----      ----  --------
    +  cafe.com
    +            /tea      tea-svc:80 (<none>)
    +            /coffee   coffee-svc:80 (<none>)
    +Annotations:
    +  kubectl.kubernetes.io/last-applied-configuration:  {"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"annotations":{},"name":"cafe-ingress","namespace":"default","selfLink":"/apis/extensions/v1beta1/namespaces/default/ingresses/cafe-ingress"},"spec":{"rules":[{"host":"cafe.com","http":{"paths":[{"backend":{"serviceName":"tea-svc","servicePort":80},"path":"/tea"},{"backend":{"serviceName":"coffee-svc","servicePort":80},"path":"/coffee"}]}}]},"status":{"loadBalancer":{"ingress":[{"ip":"169.48.142.110"}]}}}
    +
    +Events:
    +  Type    Reason  Age   From                      Message
    +  ----    ------  ----  ----                      -------
    +  Normal  CREATE  1m    nginx-ingress-controller  Ingress default/cafe-ingress
    +  Normal  UPDATE  58s   nginx-ingress-controller  Ingress default/cafe-ingress
    +
    + + +

    Check the Ingress Controller Logs

    +
    $ kubectl get pods -n <namespace-of-ingress-controller>
    +NAME                                        READY     STATUS    RESTARTS   AGE
    +nginx-ingress-controller-67956bf89d-fv58j   1/1       Running   0          1m
    +
    +$ kubectl logs -n <namespace> nginx-ingress-controller-67956bf89d-fv58j
    +-------------------------------------------------------------------------------
    +NGINX Ingress controller
    +  Release:    0.14.0
    +  Build:      git-734361d
    +  Repository: https://github.com/kubernetes/ingress-nginx
    +-------------------------------------------------------------------------------
    +....
    +
    + + +

    Check the Nginx Configuration

    +
    $ kubectl get pods -n <namespace-of-ingress-controller>
    +NAME                                        READY     STATUS    RESTARTS   AGE
    +nginx-ingress-controller-67956bf89d-fv58j   1/1       Running   0          1m
    +
    +$ kubectl exec -it -n <namespace-of-ingress-controller> nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf
    +daemon off;
    +worker_processes 2;
    +pid /run/nginx.pid;
    +worker_rlimit_nofile 523264;
    +worker_shutdown_timeout 10s;
    +events {
    +    multi_accept        on;
    +    worker_connections  16384;
    +    use                 epoll;
    +}
    +http {
    +....
    +
    + + +

    Check if used Services Exist

    +
    $ kubectl get svc --all-namespaces
    +NAMESPACE     NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
    +default       coffee-svc             ClusterIP   10.106.154.35    <none>        80/TCP          18m
    +default       kubernetes             ClusterIP   10.96.0.1        <none>        443/TCP         30m
    +default       tea-svc                ClusterIP   10.104.172.12    <none>        80/TCP          18m
    +kube-system   default-http-backend   NodePort    10.108.189.236   <none>        80:30001/TCP    30m
    +kube-system   kube-dns               ClusterIP   10.96.0.10       <none>        53/UDP,53/TCP   30m
    +kube-system   kubernetes-dashboard   NodePort    10.103.128.17    <none>        80:30000/TCP    30m
    +
    + + +

    Debug Logging

    +

    Using the flag --v=XX it is possible to increase the level of logging. This is performed by editing +the deployment.

    +
    $ kubectl get deploy -n <namespace-of-ingress-controller>
    +NAME                       DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
    +default-http-backend       1         1         1            1           35m
    +nginx-ingress-controller   1         1         1            1           35m
    +
    +$ kubectl edit deploy -n <namespace-of-ingress-controller> nginx-ingress-controller
    +# Add --v=X to "- args", where X is an integer
    +
    + + +
      +
    • --v=2 shows details using diff about the changes in the configuration in nginx
    • +
    • --v=3 shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format
    • +
    • --v=5 configures NGINX in debug mode
    • +
    +

    Authentication to the Kubernetes API Server

    +

    A number of components are involved in the authentication process and the first step is to narrow +down the source of the problem, namely whether it is a problem with service authentication or +with the kubeconfig file.

    +

    Both authentications must work:

    +
    +-------------+   service          +------------+
    +|             |   authentication   |            |
    ++  apiserver  +<-------------------+  ingress   |
    +|             |                    | controller |
    ++-------------+                    +------------+
    +
    + + +

    Service authentication

    +

    The Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways:

    +
      +
    1. +

      Service Account: This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details.

      +
    2. +
    3. +

      Kubeconfig file: In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the --kubeconfig flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the --kubeconfig does not requires the flag --apiserver-host. + The format of the file is identical to ~/.kube/config which is used by kubectl to connect to the API server. See 'kubeconfig' section for details.

      +
    4. +
    5. +

      Using the flag --apiserver-host: Using this flag --apiserver-host=http://localhost:8080 it is possible to specify an unsecured API server or reach a remote kubernetes cluster using kubectl proxy. + Please do not use this approach in production.

      +
    6. +
    +

    In the diagram below you can see the full authentication flow with all options, starting with the browser +on the lower left hand side.

    +
    Kubernetes                                                  Workstation
    ++---------------------------------------------------+     +------------------+
    +|                                                   |     |                  |
    +|  +-----------+   apiserver        +------------+  |     |  +------------+  |
    +|  |           |   proxy            |            |  |     |  |            |  |
    +|  | apiserver |                    |  ingress   |  |     |  |  ingress   |  |
    +|  |           |                    | controller |  |     |  | controller |  |
    +|  |           |                    |            |  |     |  |            |  |
    +|  |           |                    |            |  |     |  |            |  |
    +|  |           |  service account/  |            |  |     |  |            |  |
    +|  |           |  kubeconfig        |            |  |     |  |            |  |
    +|  |           +<-------------------+            |  |     |  |            |  |
    +|  |           |                    |            |  |     |  |            |  |
    +|  +------+----+      kubeconfig    +------+-----+  |     |  +------+-----+  |
    +|         |<--------------------------------------------------------|        |
    +|                                                   |     |                  |
    ++---------------------------------------------------+     +------------------+
    +
    + + +

    Service Account

    +

    If using a service account to connect to the API server, Dashboard expects the file +/var/run/secrets/kubernetes.io/serviceaccount/token to be present. It provides a secret +token that is required to authenticate with the API server.

    +

    Verify with the following commands:

    +
    # start a container that contains curl
    +$ kubectl run test --image=tutum/curl -- sleep 10000
    +
    +# check that container is running
    +$ kubectl get pods
    +NAME                   READY     STATUS    RESTARTS   AGE
    +test-701078429-s5kca   1/1       Running   0          16s
    +
    +# check if secret exists
    +$ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/
    +ca.crt
    +namespace
    +token
    +
    +# get service IP of master
    +$ kubectl get services
    +NAME         CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
    +kubernetes   10.0.0.1     <none>        443/TCP   1d
    +
    +# check base connectivity from cluster inside
    +$ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1
    +Unauthorized
    +
    +# connect using tokens
    +$ TOKEN_VALUE=$(kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token)
    +$ echo $TOKEN_VALUE
    +eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A
    +$ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H  "Authorization: Bearer $TOKEN_VALUE" https://10.0.0.1
    +{
    +  "paths": [
    +    "/api",
    +    "/api/v1",
    +    "/apis",
    +    "/apis/apps",
    +    "/apis/apps/v1alpha1",
    +    "/apis/authentication.k8s.io",
    +    "/apis/authentication.k8s.io/v1beta1",
    +    "/apis/authorization.k8s.io",
    +    "/apis/authorization.k8s.io/v1beta1",
    +    "/apis/autoscaling",
    +    "/apis/autoscaling/v1",
    +    "/apis/batch",
    +    "/apis/batch/v1",
    +    "/apis/batch/v2alpha1",
    +    "/apis/certificates.k8s.io",
    +    "/apis/certificates.k8s.io/v1alpha1",
    +    "/apis/extensions",
    +    "/apis/extensions/v1beta1",
    +    "/apis/policy",
    +    "/apis/policy/v1alpha1",
    +    "/apis/rbac.authorization.k8s.io",
    +    "/apis/rbac.authorization.k8s.io/v1alpha1",
    +    "/apis/storage.k8s.io",
    +    "/apis/storage.k8s.io/v1beta1",
    +    "/healthz",
    +    "/healthz/ping",
    +    "/logs",
    +    "/metrics",
    +    "/swaggerapi/",
    +    "/ui/",
    +    "/version"
    +  ]
    +}
    +
    + + +

    If it is not working, there are two possible reasons:

    +
      +
    1. +

      The contents of the tokens are invalid. Find the secret name with kubectl get secrets | grep service-account and + delete it with kubectl delete secret <name>. It will automatically be recreated.

      +
    2. +
    3. +

      You have a non-standard Kubernetes installation and the file containing the token may not be present. + The API server will mount a volume containing this file, but only if the API server is configured to use + the ServiceAccount admission controller. + If you experience this error, verify that your API server is using the ServiceAccount admission controller. + If you are configuring the API server by hand, you can set this with the --admission-control parameter.

      +
      +

      Note that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers.

      +
      +
    4. +
    +

    More information:

    + +

    Kube-Config

    +

    If you want to use a kubeconfig file for authentication, follow the deploy procedure and +add the flag --kubeconfig=/etc/kubernetes/kubeconfig.yaml to the args section of the deployment.

    +

    Using GDB with Nginx

    +

    Gdb can be used to with nginx to perform a configuration +dump. This allows us to see which configuration is being used, as well as older configurations.

    +

    Note: The below is based on the nginx documentation.

    +
      +
    1. SSH into the worker
    2. +
    +
    $ ssh user@workerIP
    +
    + + +
      +
    1. Obtain the Docker Container Running nginx
    2. +
    +
    $ docker ps | grep nginx-ingress-controller
    +CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
    +d9e1d243156a        quay.io/kubernetes-ingress-controller/nginx-ingress-controller   "/usr/bin/dumb-init …"   19 minutes ago      Up 19 minutes                                                                            k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0
    +
    + + +
      +
    1. Exec into the container
    2. +
    +
    $ docker exec -it --user=0 --privileged d9e1d243156a bash
    +
    + + +
      +
    1. Make sure nginx is running in --with-debug
    2. +
    +
    $ nginx -V 2>&1 | grep -- '--with-debug'
    +
    + + +
      +
    1. Get list of processes running on container
    2. +
    +
    $ ps -ef
    +UID        PID  PPID  C STIME TTY          TIME CMD
    +root         1     0  0 20:23 ?        00:00:00 /usr/bin/dumb-init /nginx-ingres
    +root         5     1  0 20:23 ?        00:00:05 /nginx-ingress-controller --defa
    +root        21     5  0 20:23 ?        00:00:00 nginx: master process /usr/sbin/
    +nobody     106    21  0 20:23 ?        00:00:00 nginx: worker process
    +nobody     107    21  0 20:23 ?        00:00:00 nginx: worker process
    +root       172     0  0 20:43 pts/0    00:00:00 bash
    +
    + + +
      +
    1. Attach gdb to the nginx master process
    2. +
    +
    $ gdb -p 21
    +....
    +Attaching to process 21
    +Reading symbols from /usr/sbin/nginx...done.
    +....
    +(gdb)
    +
    + + +
      +
    1. Copy and paste the following:
    2. +
    +
    set $cd = ngx_cycle->config_dump
    +set $nelts = $cd.nelts
    +set $elts = (ngx_conf_dump_t*)($cd.elts)
    +while ($nelts-- > 0)
    +set $name = $elts[$nelts]->name.data
    +printf "Dumping %s to nginx_conf.txt\n", $name
    +append memory nginx_conf.txt \
    +        $elts[$nelts]->buffer.start $elts[$nelts]->buffer.end
    +end
    +
    + + +
      +
    1. +

      Quit GDB by pressing CTRL+D

      +
    2. +
    3. +

      Open nginx_conf.txt

      +
    4. +
    +
    cat nginx_conf.txt
    +
    + + + + + + + + + +
    +
    +
    +
    + + + + +
    + + + + + + + + + + + + + \ No newline at end of file diff --git a/user-guide/cli-arguments/index.html b/user-guide/cli-arguments/index.html index 04fa3b192..778d1e1d6 100644 --- a/user-guide/cli-arguments/index.html +++ b/user-guide/cli-arguments/index.html @@ -761,8 +761,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -773,8 +773,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/custom-errors/index.html b/user-guide/custom-errors/index.html index 5676d068b..3f560148b 100644 --- a/user-guide/custom-errors/index.html +++ b/user-guide/custom-errors/index.html @@ -761,8 +761,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -773,8 +773,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1093,8 +1093,8 @@ example, if the value of the Accept header send could decide to return the error payload as a JSON document instead of HTML.

    Important

    -

    The custom backend is expected to return the correct HTTP status code instead of 200. NGINX does not change -the response from the custom default backend.

    +

    The custom backend is expected to return the correct HTTP status code instead of 200. +NGINX does not change the response from the custom default backend.

    An example of such custom backend is available inside the source repository at images/custom-error-pages.

    See also the Custom errors example.

    diff --git a/user-guide/default-backend/index.html b/user-guide/default-backend/index.html index ad945c171..07b9c50b9 100644 --- a/user-guide/default-backend/index.html +++ b/user-guide/default-backend/index.html @@ -761,8 +761,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -773,8 +773,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/exposing-tcp-udp-services/index.html b/user-guide/exposing-tcp-udp-services/index.html index e4ee7fb54..86f04d7e3 100644 --- a/user-guide/exposing-tcp-udp-services/index.html +++ b/user-guide/exposing-tcp-udp-services/index.html @@ -761,8 +761,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -773,8 +773,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/external-articles/index.html b/user-guide/external-articles/index.html index cbc536786..aea36f7cb 100644 --- a/user-guide/external-articles/index.html +++ b/user-guide/external-articles/index.html @@ -761,8 +761,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -773,8 +773,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/miscellaneous/index.html b/user-guide/miscellaneous/index.html index d3a8fb893..c1c88877e 100644 --- a/user-guide/miscellaneous/index.html +++ b/user-guide/miscellaneous/index.html @@ -832,8 +832,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -844,8 +844,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/monitoring/index.html b/user-guide/monitoring/index.html index 0734657e4..b4681a87f 100644 --- a/user-guide/monitoring/index.html +++ b/user-guide/monitoring/index.html @@ -817,8 +817,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -829,8 +829,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1148,7 +1148,10 @@

    Prometheus and Grafana installation

    This tutorial will show you how to install Prometheus and Grafana for scraping the metrics of the NGINX Ingress controller.

    -

    !!! Important: this example uses emptyDir volumes for Prometheus and Grafana. This means once the pod gets terminated you will lose all the data.

    +
    +

    Important

    +

    This example uses emptyDir volumes for Prometheus and Grafana. This means once the pod gets terminated you will lose all the data.

    +

    Before You Begin

    The NGINX Ingress controller should already be deployed according to the deployment instructions here.

    Note that the yaml files used in this tutorial are stored in the deploy/monitoring folder of the GitHub repository kubernetes/ingress-nginx.

    diff --git a/user-guide/multiple-ingress/index.html b/user-guide/multiple-ingress/index.html index 9b4c47955..ed83478b1 100644 --- a/user-guide/multiple-ingress/index.html +++ b/user-guide/multiple-ingress/index.html @@ -610,13 +610,6 @@ -
  • - - !!! important - - -
  • - @@ -797,8 +790,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -809,8 +802,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • @@ -1080,13 +1073,6 @@ -
  • - - !!! important - - -
  • - @@ -1147,11 +1133,13 @@ Here is a partial example:

    -

    !!! important

    +
    +

    Important

    Deploying multiple Ingress controllers, of different types (e.g., ingress-nginx & gce), and not specifying a class annotation will result in both or all controllers fighting to satisfy the Ingress, and all of them racing to update Ingress status field in confusing ways.

    When running multiple ingress-nginx controllers, it will only process an unset class annotation if one of the controllers uses the default - --ingress-class value (see IsValid method in internal/ingress/annotations/class/main.go), otherwise the class annotation become required.

    +--ingress-class value (see IsValid method in internal/ingress/annotations/class/main.go), otherwise the class annotation become required.

    +
    diff --git a/user-guide/nginx-configuration/annotations/index.html b/user-guide/nginx-configuration/annotations/index.html index 14c2b899e..89a1ec7e2 100644 --- a/user-guide/nginx-configuration/annotations/index.html +++ b/user-guide/nginx-configuration/annotations/index.html @@ -1077,8 +1077,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -1089,8 +1089,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/nginx-configuration/configmap/index.html b/user-guide/nginx-configuration/configmap/index.html index dd16cd6ee..662c1268b 100644 --- a/user-guide/nginx-configuration/configmap/index.html +++ b/user-guide/nginx-configuration/configmap/index.html @@ -1590,8 +1590,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -1602,8 +1602,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/nginx-configuration/custom-template/index.html b/user-guide/nginx-configuration/custom-template/index.html index 15aee8984..d7737af6e 100644 --- a/user-guide/nginx-configuration/custom-template/index.html +++ b/user-guide/nginx-configuration/custom-template/index.html @@ -763,8 +763,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -775,8 +775,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/nginx-configuration/index.html b/user-guide/nginx-configuration/index.html index 8a26c37c3..b23f8d2cc 100644 --- a/user-guide/nginx-configuration/index.html +++ b/user-guide/nginx-configuration/index.html @@ -763,8 +763,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -775,8 +775,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/nginx-configuration/log-format/index.html b/user-guide/nginx-configuration/log-format/index.html index b59306b25..0aa65a3ff 100644 --- a/user-guide/nginx-configuration/log-format/index.html +++ b/user-guide/nginx-configuration/log-format/index.html @@ -763,8 +763,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -775,8 +775,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/third-party-addons/modsecurity/index.html b/user-guide/third-party-addons/modsecurity/index.html index 5e0e6508b..b9fa446ea 100644 --- a/user-guide/third-party-addons/modsecurity/index.html +++ b/user-guide/third-party-addons/modsecurity/index.html @@ -763,8 +763,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -775,8 +775,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/third-party-addons/opentracing/index.html b/user-guide/third-party-addons/opentracing/index.html index 29e954a63..19bdce6aa 100644 --- a/user-guide/third-party-addons/opentracing/index.html +++ b/user-guide/third-party-addons/opentracing/index.html @@ -819,8 +819,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -831,8 +831,8 @@
  • - - External Authentication + + External OAUTH Authentication
  • diff --git a/user-guide/tls/index.html b/user-guide/tls/index.html index e2fec822c..90cfb6f38 100644 --- a/user-guide/tls/index.html +++ b/user-guide/tls/index.html @@ -845,8 +845,8 @@
  • - - External authentication + + External Basic Authentication
  • @@ -857,8 +857,8 @@
  • - - External Authentication + + External OAUTH Authentication