Merge pull request #1536 from aledbf/update-docs

Update documentation and examples [ci skip]
This commit is contained in:
Manuel Alejandro de Brito Fontes 2017-10-16 08:56:46 -04:00 committed by GitHub
commit 9fc28366c2
24 changed files with 194 additions and 663 deletions

View file

@ -12,6 +12,9 @@ spec:
metadata: metadata:
labels: labels:
app: ingress-nginx app: ingress-nginx
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec: spec:
serviceAccountName: nginx-ingress-serviceaccount serviceAccountName: nginx-ingress-serviceaccount
containers: containers:

View file

@ -12,6 +12,9 @@ spec:
metadata: metadata:
labels: labels:
app: ingress-nginx app: ingress-nginx
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec: spec:
containers: containers:
- name: nginx-ingress-controller - name: nginx-ingress-controller

View file

@ -4,7 +4,7 @@ Using a [ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) is possibl
For example, if we want to change the timeouts we need to create a ConfigMap: For example, if we want to change the timeouts we need to create a ConfigMap:
``` ```
$ cat nginx-load-balancer-conf.yaml $ cat configmap.yaml
apiVersion: v1 apiVersion: v1
data: data:
proxy-connect-timeout: "10" proxy-connect-timeout: "10"
@ -16,9 +16,8 @@ metadata:
``` ```
``` ```
$ kubectl create -f nginx-load-balancer-conf.yaml curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/customization/cutom-configuration/configmap.yaml \
| kubectl apply -f -
``` ```
Please check the example `nginx-custom-configuration.yaml`
If the Configmap it is updated, NGINX will be reloaded with the new configuration. If the Configmap it is updated, NGINX will be reloaded with the new configuration.

View file

@ -1,10 +1,10 @@
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: nginx-custom-configuration name: nginx-configuration
namespace: ingress-nginx
labels: labels:
k8s-app: nginx-ingress-controller app: ingress-nginx
namespace: kube-system
data: data:
proxy-connect-timeout: "10" proxy-connect-timeout: "10"
proxy-read-timeout: "120" proxy-read-timeout: "120"

View file

@ -1,56 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
terminationGracePeriodSeconds: 60
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-custom-configuration

View file

@ -1,73 +1,16 @@
# Deploying the Nginx Ingress controller # Custom configuration
This example aims to demonstrate the deployment of an nginx ingress controller and This example aims to demonstrate the deployment of an nginx ingress controller and
use a ConfigMap to configure a custom list of headers to be passed to the upstream use a ConfigMap to configure a custom list of headers to be passed to the upstream
server server
## Default Backend
The default backend is a Service capable of handling all url paths and hosts the
nginx controller doesn't understand. This most basic implementation just returns
a 404 page:
```console ```console
$ kubectl apply -f default-backend.yaml curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/customization/custom-headers/configmap.yaml \
deployment "default-http-backend" created | kubectl apply -f -
service "default-http-backend" created
$ kubectl -n kube-system get po curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/customization/custom-headers/custom-headers.yaml \
NAME READY STATUS RESTARTS AGE | kubectl apply -f -
default-http-backend-2657704409-qgwdd 1/1 Running 0 28s
```
## Custom configuration
```console
$ cat nginx-load-balancer-conf.yaml
apiVersion: v1
data:
proxy-set-headers: "default/custom-headers"
kind: ConfigMap
metadata:
name: nginx-load-balancer-conf
```
```console
$ kubectl create -f nginx-load-balancer-conf.yaml
```
## Custom headers
```console
$ cat custom-headers.yaml
apiVersion: v1
data:
X-Different-Name: "true"
X-Request-Start: t=${msec}
X-Using-Nginx-Controller: "true"
kind: ConfigMap
metadata:
name: proxy-headers
namespace: default
```
```console
$ kubectl create -f custom-headers.yaml
```
## Controller
You can deploy the controller as follows:
```console
$ kubectl apply -f nginx-ingress-controller.yaml
deployment "nginx-ingress-controller" created
$ kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
default-http-backend-2657704409-qgwdd 1/1 Running 0 2m
nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s
``` ```
## Test ## Test

View file

@ -0,0 +1,9 @@
apiVersion: v1
data:
proxy-set-headers: "ingress-nginx/custom-headers"
kind: ConfigMap
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx

View file

@ -6,4 +6,4 @@ data:
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: custom-headers name: custom-headers
namespace: kube-system namespace: ingress-nginx

View file

@ -1,51 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
k8s-app: default-http-backend
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend

View file

@ -1,53 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
terminationGracePeriodSeconds: 60
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf

View file

@ -1,7 +0,0 @@
apiVersion: v1
data:
proxy-set-headers: "kube-system/custom-headers"
kind: ConfigMap
metadata:
name: nginx-load-balancer-conf
namespace: kube-system

View file

@ -3,68 +3,43 @@
This example aims to demonstrate the deployment of an nginx ingress controller and use a ConfigMap to enable [nginx vts module](https://github.com/vozlt/nginx-module-vts This example aims to demonstrate the deployment of an nginx ingress controller and use a ConfigMap to enable [nginx vts module](https://github.com/vozlt/nginx-module-vts
) to export metrics in prometheus format. ) to export metrics in prometheus format.
# vts-metrics ## vts-metrics
Vts-metrics export NGINX metrics. To deploy all the files simply run `kubectl apply -f nginx`. A deployment and service will be Vts-metrics export NGINX metrics. To deploy all the files simply run `kubectl apply -f nginx`. A deployment and service will be
created which already has a `prometheus.io/scrape: 'true'` annotation and if you added created which already has a `prometheus.io/scrape: 'true'` annotation and if you added
the recommended Prometheus service-endpoint scraping [configuration](https://raw.githubusercontent.com/prometheus/prometheus/master/documentation/examples/prometheus-kubernetes.yml), the recommended Prometheus service-endpoint scraping [configuration](https://raw.githubusercontent.com/prometheus/prometheus/master/documentation/examples/prometheus-kubernetes.yml),
Prometheus will scrape it automatically and you start using the generated metrics right away. Prometheus will scrape it automatically and you start using the generated metrics right away.
## Default Backend
The default backend is a Service capable of handling all url paths and hosts the
nginx controller doesn't understand. This most basic implementation just returns
a 404 page:
```console
$ kubectl apply -f default-backend.yaml
deployment "default-http-backend" created
service "default-http-backend" created
$ kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
default-http-backend-2657704409-qgwdd 1/1 Running 0 28s
```
## Custom configuration ## Custom configuration
```console ```console
$ cat nginx-vts-metrics-conf.yaml
apiVersion: v1 apiVersion: v1
data: data:
enable-vts-status: "true" enable-vts-status: "true"
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: nginx-vts-metrics-conf name: nginx-configuration
namespace: kube-system namespace: ingress-nginx
labels:
app: ingress-nginx
``` ```
```console ```console
$ kubectl create -f nginx-vts-metrics-conf.yaml $ kubectl apply -f nginx-vts-metrics-conf.yaml
```
## Controller
You can deploy the controller as follows:
```console
$ kubectl apply -f nginx-ingress-controller.yaml
deployment "nginx-ingress-controller" created
$ kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
default-http-backend-2657704409-qgwdd 1/1 Running 0 2m
nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s
``` ```
## Result ## Result
Check whether the ingress controller successfully generated the NGINX vts status: Check whether the ingress controller successfully generated the NGINX vts status:
```console ```console
$ kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf|grep vhost_traffic_status_display $ kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf|grep vhost_traffic_status_display
vhost_traffic_status_display; vhost_traffic_status_display;
vhost_traffic_status_display_format html; vhost_traffic_status_display_format html;
``` ```
### NGINX vts dashboard ### NGINX vts dashboard
The vts dashboard provides real time metrics. The vts dashboard provides real time metrics.
![vts dashboard](imgs/vts-dashboard.png) ![vts dashboard](imgs/vts-dashboard.png)
@ -72,17 +47,17 @@ The vts dashboard provides real time metrics.
Because the vts port it's not yet exposed, you should forward the controller port to see it. Because the vts port it's not yet exposed, you should forward the controller port to see it.
```console ```console
$ kubectl port-forward $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) -n kube-system 18080 $ kubectl port-forward $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n ingress-nginx --output=jsonpath={.items..metadata.name}) -n ingress-nginx 18080
``` ```
Now open the url [http://localhost:18080/nginx_status](http://localhost:18080/nginx_status) in your browser. Now open the url [http://localhost:18080/nginx_status](http://localhost:18080/nginx_status) in your browser.
### Prometheus metrics output ### Prometheus metrics output
NGINX Ingress controller already has a parser to convert vts metrics to Prometheus format. It exports prometheus metrics to the address `:10254/metrics`. NGINX Ingress controller already has a parser to convert vts metrics to Prometheus format. It exports prometheus metrics to the address `:10254/metrics`.
```console ```console
$ kubectl exec -ti -n kube-system $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) curl localhost:10254/metrics $ kubectl exec -ti -n ingress-nginx $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) curl localhost:10254/metrics
ingress_controller_ssl_expire_time_seconds{host="foo.bar.com"} -6.21355968e+10 ingress_controller_ssl_expire_time_seconds{host="foo.bar.com"} -6.21355968e+10
# HELP ingress_controller_success Cumulative number of Ingress controller reload operations # HELP ingress_controller_success Cumulative number of Ingress controller reload operations
# TYPE ingress_controller_success counter # TYPE ingress_controller_success counter

View file

@ -1,51 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
k8s-app: default-http-backend
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress-controller-service
namespace: kube-system
labels:
k8s-app: nginx-ingress-controller-service
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
ports:
- name: http-metrics
port: 8080
targetPort: 10254
protocol: TCP
selector:
k8s-app: nginx-ingress-controller

View file

@ -1,56 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
terminationGracePeriodSeconds: 60
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-vts-metrics-conf

View file

@ -3,5 +3,7 @@ data:
enable-vts-status: "true" enable-vts-status: "true"
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: nginx-vts-metrics-conf name: nginx-configuration
namespace: kube-system namespace: ingress-nginx
labels:
app: ingress-nginx

View file

@ -4,36 +4,23 @@ This example aims to demonstrate the deployment of an nginx ingress controller a
use a ConfigMap to configure custom Diffie-Hellman parameters file to help with use a ConfigMap to configure custom Diffie-Hellman parameters file to help with
"Perfect Forward Secrecy". "Perfect Forward Secrecy".
## Default Backend
The default backend is a Service capable of handling all url paths and hosts the
nginx controller doesn't understand. This most basic implementation just returns
a 404 page:
```console
$ kubectl apply -f default-backend.yaml
deployment "default-http-backend" created
service "default-http-backend" created
$ kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
default-http-backend-2657704409-qgwdd 1/1 Running 0 28s
```
## Custom configuration ## Custom configuration
```console ```console
$ cat nginx-load-balancer-conf.yaml $ cat configmap.yaml
apiVersion: v1 apiVersion: v1
data: data:
ssl-dh-param: "kube-system/lb-dhparam" ssl-dh-param: "ingress-nginx/lb-dhparam"
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: nginx-load-balancer-conf name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx
``` ```
```console ```console
$ kubectl create -f nginx-load-balancer-conf.yaml $ kubectl create -f configmap.yaml
``` ```
## Custom DH parameters secret ## Custom DH parameters secret
@ -48,31 +35,18 @@ $ cat ssl-dh-param.yaml
apiVersion: v1 apiVersion: v1
data: data:
dhparam.pem: "LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ..." dhparam.pem: "LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ..."
kind: Secret kind: ConfigMap
type: Opaque
metadata: metadata:
name: lb-dhparam name: nginx-configuration
namespace: kube-system namespace: ingress-nginx
labels:
app: ingress-nginx
``` ```
```console ```console
$ kubectl create -f ssl-dh-param.yaml $ kubectl create -f ssl-dh-param.yaml
``` ```
## Controller
You can deploy the controller as follows:
```console
$ kubectl apply -f nginx-ingress-controller.yaml
deployment "nginx-ingress-controller" created
$ kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
default-http-backend-2657704409-qgwdd 1/1 Running 0 2m
nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s
```
## Test ## Test
Check the contents of the configmap is present in the nginx.conf file using: Check the contents of the configmap is present in the nginx.conf file using:

View file

@ -0,0 +1,9 @@
apiVersion: v1
data:
ssl-dh-param: "ingress-nginx/lb-dhparam"
kind: ConfigMap
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx

View file

@ -1,51 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
k8s-app: default-http-backend
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend

View file

@ -1,53 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
terminationGracePeriodSeconds: 60
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf

View file

@ -1,7 +0,0 @@
apiVersion: v1
data:
ssl-dh-param: "kube-system/lb-dhparam"
kind: ConfigMap
metadata:
name: nginx-load-balancer-conf
namespace: kube-system

View file

@ -5,4 +5,4 @@ kind: Secret
type: Opaque type: Opaque
metadata: metadata:
name: lb-dhparam name: lb-dhparam
namespace: kube-system namespace: ingress-nginx

View file

@ -24,8 +24,10 @@ Rewriting can be controlled using the following annotations:
## Validation ## Validation
### Rewrite Target ### Rewrite Target
Create an Ingress rule with a rewrite annotation: Create an Ingress rule with a rewrite annotation:
```
```console
$ echo " $ echo "
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress

View file

@ -2,17 +2,7 @@
--- ---
#### proxy-body-size: ### Logs
Sets the maximum allowed size of the client request body.
See NGINX [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size).
#### custom-http-errors:
Enables which HTTP codes should be passed for processing with the [error_page directive](http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page).
Setting at least one code also enables [proxy_intercept_errors](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors) which are required to process error_page.
Example usage: `custom-http-errors: 404,415`
#### disable-access-log #### disable-access-log
@ -20,11 +10,109 @@ Disables the Access Log from the entire Ingress Controller. This is 'false' by d
#### access-log-path #### access-log-path
Access log path. Goes to '/var/log/nginx/access.log' by default. http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log Access log path. Goes to '/var/log/nginx/access.log' by default.
_References:_
- http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log
#### error-log-level
Configures the logging level of errors. Log levels above are listed in the order of increasing severity.
_References:_
- http://nginx.org/en/docs/ngx_core_module.html#error_log
#### error-log-path #### error-log-path
Error log path. Goes to '/var/log/nginx/error.log' by default. http://nginx.org/en/docs/ngx_core_module.html#error_log Error log path. Goes to '/var/log/nginx/error.log' by default.
_References:_
- http://nginx.org/en/docs/ngx_core_module.html#error_log
#### log-format-stream
Sets the nginx [stream format](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format).
#### log-format-upstream
Sets the nginx [log format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format).
Example for json output:
```console
log-format-upstream: '{ "time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr",
"x-forward-for": "$proxy_add_x_forwarded_for", "request_id": "$request_id", "remote_user":
"$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, "status":
$status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri",
"request_query": "$args", "request_length": $request_length, "duration": $request_time,
"method": "$request_method", "http_referrer": "$http_referer", "http_user_agent":
"$http_user_agent" }'
```
Please check [log-format](log-format.md) for definition of each field.
### Proxy configuration
#### load-balance
Sets the algorithm to use for load balancing.
The value can either be:
- round_robin: to use the default round robin loadbalancer
- least_conn: to use the least connected method
- ip_hash: to use a hash of the server for routing.
The default is least_conn.
_References:_
- http://nginx.org/en/docs/http/load_balancing.html.
#### proxy-body-size
Sets the maximum allowed size of the client request body.
See NGINX [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size).
#### proxy-buffer-size
Sets the size of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) received from the proxied server. This part usually contains a small response header.
#### proxy-connect-timeout
Sets the timeout for [establishing a connection with a proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout). It should be noted that this timeout cannot usually exceed 75 seconds.
#### proxy-cookie-domain
Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the “Set-Cookie” header fields of a proxied server response.
#### proxy-cookie-path
Sets a text that [should be changed in the path attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path) of the “Set-Cookie” header fields of a proxied server response.
#### proxy-next-upstream
Specifies in [which cases](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream) a request should be passed to the next server.
#### proxy-read-timeout
Sets the timeout in seconds for [reading a response from the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout). The timeout is set only between two successive read operations, not for the transmission of the whole response.
#### proxy-send-timeout
Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request.
#### proxy-request-buffering
Enables or disables [buffering of a client request body](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering).
#### custom-http-errors
Enables which HTTP codes should be passed for processing with the [error_page directive](http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page).
Setting at least one code also enables [proxy_intercept_errors](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors) which are required to process error_page.
Example usage: `custom-http-errors: 404,415`
#### enable-modsecurity #### enable-modsecurity
@ -36,7 +124,7 @@ By default this is disabled.
Eenables the OWASP ModSecurity Core Rule Set (CRS) Eenables the OWASP ModSecurity Core Rule Set (CRS)
By default this is disabled. By default this is disabled.
#### disable-ipv6: #### disable-ipv6
Disable listening on IPV6. Disable listening on IPV6.
By default this is disabled. By default this is disabled.
@ -57,14 +145,6 @@ By default this is disabled.
Allows the replacement of the default status page with a third party module named [nginx-module-vts](https://github.com/vozlt/nginx-module-vts). Allows the replacement of the default status page with a third party module named [nginx-module-vts](https://github.com/vozlt/nginx-module-vts).
By default this is disabled. By default this is disabled.
#### error-log-level
Configures the logging level of errors. Log levels above are listed in the order of increasing severity.
_References:_
- http://nginx.org/en/docs/ngx_core_module.html#error_log
#### gzip-types #### gzip-types
Sets the MIME types in addition to "text/html" to compress. The special value "\*" matches any MIME type. Sets the MIME types in addition to "text/html" to compress. The special value "\*" matches any MIME type.
@ -97,7 +177,7 @@ Enables or disables the preload attribute in the HSTS feature (when it is enable
Set if header fields with invalid names should be ignored. Set if header fields with invalid names should be ignored.
By default this is enabled. By default this is enabled.
#### keep-alive: #### keep-alive
Sets the time during which a keep-alive client connection will stay open on the server side. Sets the time during which a keep-alive client connection will stay open on the server side.
The zero value disables keep-alive client connections. The zero value disables keep-alive client connections.
@ -106,78 +186,10 @@ _References:_
- http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout - http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout
#### load-balance
Sets the algorithm to use for load balancing.
The value can either be:
- round_robin: to use the default round robin loadbalancer
- least_conn: to use the least connected method
- ip_hash: to use a hash of the server for routing.
The default is least_conn.
_References:_
- http://nginx.org/en/docs/http/load_balancing.html.
#### log-format-upstream
Sets the nginx [log format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format).
Example for json output:
```console
log-format-upstream: '{ "time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr",
"x-forward-for": "$proxy_add_x_forwarded_for", "request_id": "$request_id", "remote_user":
"$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, "status":
$status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri",
"request_query": "$args", "request_length": $request_length, "duration": $request_time,
"method": "$request_method", "http_referrer": "$http_referer", "http_user_agent":
"$http_user_agent" }'
```
Please check [log-format](log-format.md) for definition of each field.
#### log-format-stream
Sets the nginx [stream format](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format).
#### max-worker-connections #### max-worker-connections
Sets the maximum number of simultaneous connections that can be opened by each [worker process](http://nginx.org/en/docs/ngx_core_module.html#worker_connections) Sets the maximum number of simultaneous connections that can be opened by each [worker process](http://nginx.org/en/docs/ngx_core_module.html#worker_connections)
#### proxy-buffer-size
Sets the size of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) received from the proxied server. This part usually contains a small response header.
#### proxy-connect-timeout
Sets the timeout for [establishing a connection with a proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout). It should be noted that this timeout cannot usually exceed 75 seconds.
#### proxy-cookie-domain
Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the “Set-Cookie” header fields of a proxied server response.
#### proxy-cookie-path
Sets a text that [should be changed in the path attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path) of the “Set-Cookie” header fields of a proxied server response.
#### proxy-read-timeout
Sets the timeout in seconds for [reading a response from the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout). The timeout is set only between two successive read operations, not for the transmission of the whole response.
#### proxy-send-timeout
Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request.
#### proxy-next-upstream
Specifies in [which cases](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream) a request should be passed to the next server.
#### proxy-request-buffering
Enables or disables [buffering of a client request body](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering).
#### retry-non-idempotent #### retry-non-idempotent
Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server.
@ -338,7 +350,7 @@ Sets parameters for a shared memory zone that will keep states for various keys
#### proxy-set-headers #### proxy-set-headers
Sets custom headers from a configmap before sending traffic to backends. See [example](https://github.com/kubernetes/ingress-nginx/tree/master/deploy/examples/customization/custom-headers) Sets custom headers from a configmap before sending traffic to backends. See [example](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers)
#### add-headers #### add-headers
@ -349,6 +361,23 @@ Sets custom headers from a configmap before sending traffic to the client. See `
Sets the addresses on which the server will accept requests instead of *. Sets the addresses on which the server will accept requests instead of *.
It should be noted that these addresses must exist in the runtime environment or the controller will crash loop. It should be noted that these addresses must exist in the runtime environment or the controller will crash loop.
#### http-snippet
Adds custom configuration to the http section of the nginx configuration
Default: ""
#### server-snippet
Adds custom configuration to all the servers in the nginx configuration
Default: ""
#### location-snippet
Adds custom configuration to all the locations in the nginx configuration
Default: ""
### Opentracing
#### enable-opentracing #### enable-opentracing
Enables the nginx Opentracing extension https://github.com/rnburn/nginx-opentracing Enables the nginx Opentracing extension https://github.com/rnburn/nginx-opentracing
@ -368,22 +397,6 @@ Default: 9411
Specifies the service name to use for any traces created Specifies the service name to use for any traces created
Default: nginx Default: nginx
#### http-snippet
Adds custom configuration to the http section of the nginx configuration
Default: ""
#### server-snippet
Adds custom configuration to all the servers in the nginx configuration
Default: ""
#### location-snippet
Adds custom configuration to all the locations in the nginx configuration
Default: ""
### Default configuration options ### Default configuration options
The following table shows the options, the default value and a description. The following table shows the options, the default value and a description.
@ -397,6 +410,7 @@ The following table shows the options, the default value and a description.
|enable-underscores-in-headers|"false"| |enable-underscores-in-headers|"false"|
|enable-vts-status|"false"| |enable-vts-status|"false"|
|error-log-level|notice| |error-log-level|notice|
|forwarded-for-header|X-Forwarded-For|
|gzip-types|see use-gzip description above| |gzip-types|see use-gzip description above|
|hsts|"true"| |hsts|"true"|
|hsts-include-subdomains|"true"| |hsts-include-subdomains|"true"|
@ -417,12 +431,13 @@ The following table shows the options, the default value and a description.
|proxy-read-timeout|"60"| |proxy-read-timeout|"60"|
|proxy-real-ip-cidr|0.0.0.0/0| |proxy-real-ip-cidr|0.0.0.0/0|
|proxy-send-timeout|"60"| |proxy-send-timeout|"60"|
|proxy-stream-timeout|"600s"|
|retry-non-idempotent|"false"| |retry-non-idempotent|"false"|
|server-name-hash-bucket-size|"64"| |server-name-hash-bucket-size|"64"|
|server-name-hash-max-size|"512"| |server-name-hash-max-size|"512"|
|server-tokens|"true"| |server-tokens|"true"|
|ssl-buffer-size|4k| |ssl-buffer-size|4k|
|ssl-ciphers|| |ssl-ciphers|ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256|
|ssl-dh-param|value from openssl| |ssl-dh-param|value from openssl|
|ssl-protocols|TLSv1.2| |ssl-protocols|TLSv1.2|
|ssl-session-cache|"true"| |ssl-session-cache|"true"|