diff --git a/controllers/nginx/.dockerignore b/.dockerignore similarity index 100% rename from controllers/nginx/.dockerignore rename to .dockerignore diff --git a/controllers/nginx/Changelog.md b/Changelog.md similarity index 100% rename from controllers/nginx/Changelog.md rename to Changelog.md diff --git a/Makefile b/Makefile index ff2d71a2b..d467e0c20 100644 --- a/Makefile +++ b/Makefile @@ -1,58 +1,136 @@ -all: fmt lint vet +all: push BUILDTAGS= -# base package. It contains the common and backends code -PKG := "k8s.io/ingress" +# Use the 0.0 tag for testing, it shouldn't clobber any release builds +TAG?=0.9.0-beta.15 +REGISTRY?=gcr.io/google_containers +GOOS?=linux +DOCKER?=gcloud docker -- +SED_I?=sed -i +GOHOSTOS ?= $(shell go env GOHOSTOS) -GO_LIST_FILES=$(shell go list ${PKG}/... | grep -v vendor | grep -v -e "test/e2e") +ifeq ($(GOHOSTOS),darwin) + SED_I=sed -i '' +endif -.PHONY: fmt -fmt: - @go list -f '{{if len .TestGoFiles}}"gofmt -s -l {{.Dir}}"{{end}}' ${GO_LIST_FILES} | xargs -L 1 sh -c +REPO_INFO=$(shell git config --get remote.origin.url) -.PHONY: lint -lint: - @go list -f '{{if len .TestGoFiles}}"golint -min_confidence=0.85 {{.Dir}}/..."{{end}}' ${GO_LIST_FILES} | xargs -L 1 sh -c +ifndef COMMIT + COMMIT := git-$(shell git rev-parse --short HEAD) +endif -.PHONY: test -test: - @go test -v -race -tags "$(BUILDTAGS) cgo" ${GO_LIST_FILES} +PKG=k8s.io/ingress/controllers/nginx -.PHONY: test-e2e -test-e2e: ginkgo - @go run hack/e2e.go -v --up --test --down +ARCH ?= $(shell go env GOARCH) +GOARCH = ${ARCH} +DUMB_ARCH = ${ARCH} -.PHONY: cover -cover: - @go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' ${GO_LIST_FILES} | xargs -L 1 sh -c - gover - goveralls -coverprofile=gover.coverprofile -service travis-ci +ALL_ARCH = amd64 arm arm64 ppc64le -.PHONY: vet -vet: - @go vet ${GO_LIST_FILES} +QEMUVERSION=v2.9.1 + +IMGNAME = nginx-ingress-controller +IMAGE = $(REGISTRY)/$(IMGNAME) +MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) + +# Set default base image dynamically for each arch +BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.25 + +ifeq ($(ARCH),arm) + QEMUARCH=arm + GOARCH=arm + DUMB_ARCH=armhf +endif +ifeq ($(ARCH),arm64) + QEMUARCH=aarch64 +endif +ifeq ($(ARCH),ppc64le) + QEMUARCH=ppc64le + GOARCH=ppc64le + DUMB_ARCH=ppc64el +endif +#ifeq ($(ARCH),s390x) +# QEMUARCH=s390x +#endif + +TEMP_DIR := $(shell mktemp -d) + +DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile + +all: all-container + +sub-container-%: + $(MAKE) ARCH=$* build container + +sub-push-%: + $(MAKE) ARCH=$* push + +all-container: $(addprefix sub-container-,$(ALL_ARCH)) + +all-push: $(addprefix sub-push-,$(ALL_ARCH)) + +container: .container-$(ARCH) +.container-$(ARCH): + cp -r ./* $(TEMP_DIR) + $(SED_I) 's|BASEIMAGE|$(BASEIMAGE)|g' $(DOCKERFILE) + $(SED_I) "s|QEMUARCH|$(QEMUARCH)|g" $(DOCKERFILE) + $(SED_I) "s|DUMB_ARCH|$(DUMB_ARCH)|g" $(DOCKERFILE) + +ifeq ($(ARCH),amd64) + # When building "normally" for amd64, remove the whole line, it has no part in the amd64 image + $(SED_I) "/CROSS_BUILD_/d" $(DOCKERFILE) +else + # When cross-building, only the placeholder "CROSS_BUILD_" should be removed + # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel + $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset + curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs + $(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE) +endif + + $(DOCKER) build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR)/rootfs + +ifeq ($(ARCH), amd64) + # This is for to maintain the backward compatibility + $(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) +endif + +push: .push-$(ARCH) +.push-$(ARCH): + $(DOCKER) push $(MULTI_ARCH_IMG):$(TAG) +ifeq ($(ARCH), amd64) + $(DOCKER) push $(IMAGE):$(TAG) +endif -.PHONY: clean clean: - make -C controllers/nginx clean + $(DOCKER) rmi -f $(MULTI_ARCH_IMG):$(TAG) || true -.PHONY: controllers -controllers: - make -C controllers/nginx build +build: clean + CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -a -installsuffix cgo \ + -ldflags "-s -w -X ${PKG}/pkg/version.RELEASE=${TAG} -X ${PKG}/pkg/version.COMMIT=${COMMIT} -X ${PKG}/pkg/version.REPO=${REPO_INFO}" \ + -o ${TEMP_DIR}/rootfs/nginx-ingress-controller ${PKG}/pkg/cmd/controller -.PHONY: docker-build -docker-build: - make -C controllers/nginx all-container +fmt: + @echo "+ $@" + @go list -f '{{if len .TestGoFiles}}"gofmt -s -l {{.Dir}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c -.PHONY: docker-push -docker-push: - make -C controllers/nginx all-push +lint: + @echo "+ $@" + @go list -f '{{if len .TestGoFiles}}"golint {{.Dir}}/..."{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c -.PHONE: release -release: - make -C controllers/nginx release +test: fmt lint vet + @echo "+ $@" + @go test -v -race -tags "$(BUILDTAGS) cgo" $(shell go list ${PKG}/... | grep -v vendor) -.PHONY: ginkgo -ginkgo: - go get github.com/onsi/ginkgo/ginkgo +cover: + @echo "+ $@" + @go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c + gover + goveralls -coverprofile=gover.coverprofile -service travis-ci -repotoken ${COVERALLS_TOKEN} + +vet: + @echo "+ $@" + @go vet $(shell go list ${PKG}/... | grep -v vendor) + +release: all-container all-push + echo "done" diff --git a/README.md b/README.md index 41606d8d3..11c3f6816 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,553 @@ -# NGINX Ingress +# Nginx Ingress Controller -[![Build Status](https://travis-ci.org/kubernetes/ingress-nginx.svg?branch=master)](https://travis-ci.org/kubernetes/ingress-nginx) -[![Coverage Status](https://coveralls.io/repos/github/kubernetes/ingress-nginx/badge.svg?branch=master)](https://coveralls.io/github/kubernetes/ingress-nginx?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes/ingress-nginx)](https://goreportcard.com/report/github.com/kubernetes/ingress-nginx) -[![GoDoc](https://godoc.org/github.com/kubernetes/ingress-nginx?status.svg)](https://godoc.org/github.com/kubernetes/ingress-nginx) +This is an nginx Ingress controller that uses [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/#understanding-configmaps) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works. -## Description +## Contents +* [Conventions](#conventions) +* [Requirements](#requirements) +* [Command line arguments](#command-line-arguments) +* [Dry running](#try-running-the-ingress-controller) +* [Deployment](#deployment) +* [HTTP](#http) +* [HTTPS](#https) + * [Default SSL Certificate](#default-ssl-certificate) + * [HTTPS enforcement](#server-side-https-enforcement) + * [HSTS](#http-strict-transport-security) + * [Kube-Lego](#automated-certificate-management-with-kube-lego) +* [Source IP address](#source-ip-address) +* [TCP Services](#exposing-tcp-services) +* [UDP Services](#exposing-udp-services) +* [Proxy Protocol](#proxy-protocol) +* [Opentracing](#opentracing) +* [NGINX customization](configuration.md) +* [Custom errors](#custom-errors) +* [NGINX status page](#nginx-status-page) +* [Running multiple ingress controllers](#running-multiple-ingress-controllers) +* [Running on Cloudproviders](#running-on-cloudproviders) +* [Disabling NGINX ingress controller](#disabling-nginx-ingress-controller) +* [Log format](#log-format) +* [Local cluster](#local-cluster) +* [Debug & Troubleshooting](#debug--troubleshooting) +* [Limitations](#limitations) +* [Why endpoints and not services?](#why-endpoints-and-not-services) +* [NGINX Notes](#nginx-notes) -This repository contains the NGINX controller built around the [Kubernetes Ingress resource](http://kubernetes.io/docs/user-guide/ingress/). +## Conventions -The GCE ingress controller was moved to [github.com/kubernetes/ingress-gce](https://github.com/kubernetes/ingress-gce). +Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). You can generate such a certificate with: + `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}"` + and create the secret via `kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}` -## Navigation -* Learn more about using Ingress - * See our user documentation on [k8s.io](http://kubernetes.io/docs/user-guide/ingress/) - * Follow through to the respective platform specific [examples](examples/README.md) -* Write your own Ingress controller - * See our [developer documentation](docs/dev/README.md) -* Deploy existing Ingress controllers - * See our [admin documentation](docs/admin.md) -* Contribute - * See the [contributor guidelines](CONTRIBUTING.md) -* Debug - * Peruse the [FAQ section](docs/faq/README.md) - * Ask on one of the [user-support channels](CONTRIBUTING.md#support-channels) + +## Requirements +- Default backend [404-server](https://github.com/kubernetes/ingress/tree/master/images/404-server) + + +## Command line arguments +``` +Usage of : + --alsologtostderr log to standard error as well as files + --apiserver-host string The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted. + --configmap string Name of the ConfigMap that contains the custom configuration to use + --default-backend-service string Service used to serve a 404 page for the default backend. Takes the form + namespace/name. The controller uses the first node port of this Service for + the default backend. + --default-server-port int Default port to use for exposing the default server (catch all) (default 8181) + --default-ssl-certificate string Name of the secret + that contains a SSL certificate to be used as default for a HTTPS catch-all server + --disable-node-list Disable querying nodes. If --force-namespace-isolation is true, this should also be set. + --election-id string Election id to use for status update. (default "ingress-controller-leader") + --enable-ssl-passthrough Enable SSL passthrough feature. Default is disabled + --force-namespace-isolation Force namespace isolation. This flag is required to avoid the reference of secrets or + configmaps located in a different namespace than the specified in the flag --watch-namespace. + --health-check-path string Defines + the URL to be used as health check inside in the default server in NGINX. (default "/healthz") + --healthz-port int port for healthz endpoint. (default 10254) + --http-port int Indicates the port to use for HTTP traffic (default 80) + --https-port int Indicates the port to use for HTTPS traffic (default 443) + --ingress-class string Name of the ingress class to route through this controller. + --kubeconfig string Path to kubeconfig file with authorization and master location information. + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --publish-service string Service fronting the ingress controllers. Takes the form + namespace/name. The controller will set the endpoint records on the + ingress objects to reflect those on the service. + --sort-backends Defines if backends and it's endpoints should be sorted + --ssl-passtrough-proxy-port int Default port to use internally for SSL when SSL Passthgough is enabled (default 442) + --status-port int Indicates the TCP port to use for exposing the nginx status page (default 18080) + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + --sync-period duration Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s) + --tcp-services-configmap string Name of the ConfigMap that contains the definition of the TCP services to expose. + The key in the map indicates the external port to be used. The value is the name of the + service with the format namespace/serviceName and the port of the service could be a + number of the name of the port. + The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend + --udp-services-configmap string Name of the ConfigMap that contains the definition of the UDP services to expose. + The key in the map indicates the external port to be used. The value is the name of the + service with the format namespace/serviceName and the port of the service could be a + number of the name of the port. + --update-status Indicates if the + ingress controller should update the Ingress status IP/hostname. Default is true (default true) + --update-status-on-shutdown Indicates if the + ingress controller should update the Ingress status IP/hostname when the controller + is being stopped. Default is true (default true) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --watch-namespace string Namespace to watch for Ingress. Default is to watch all namespaces +``` + +## Try running the Ingress controller + +Before deploying the controller to production you might want to run it outside the cluster and observe it. + +```console +$ make build +$ mkdir /etc/nginx-ssl +$ ./rootfs/nginx-ingress-controller --running-in-cluster=false --default-backend-service=kube-system/default-http-backend +``` + +## Deployment + +First create a default backend and it's corresponding service: +``` +$ kubectl create -f examples/default-backend.yaml +``` + +Follow the [example-deployment](../../examples/deployment/nginx/README.md) steps to deploy nginx-ingress-controller in Kubernetes cluster (you may prefer other type of workloads, like Daemonset, in production environment). +Loadbalancers are created via a ReplicationController or Daemonset: + + +## HTTP + +First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app](https://github.com/kubernetes/contrib/blob/master/ingress/echoheaders/echo-app.yaml) that just returns information about the http request as output +``` +kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.8 --replicas=1 --port=8080 +``` + +Now we expose the same application in two different services (so we can create different Ingress rules) +``` +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y +``` + +Next we create a couple of Ingress rules +``` +kubectl create -f examples/ingress.yaml +``` + +we check that ingress rules are defined: +``` +$ kubectl get ing +NAME RULE BACKEND ADDRESS +echomap - + foo.bar.com + /foo echoheaders-x:80 + bar.baz.com + /bar echoheaders-y:80 + /foo echoheaders-x:80 +``` + +Before the deploy of the Ingress controller we need a default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) +``` +kubectl create -f examples/default-backend.yaml +kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend +``` + +Check NGINX it is running with the defined Ingress rules: + +``` +$ LBIP=$(kubectl get node `kubectl get po -l name=nginx-ingress-lb --template '{{range .items}}{{.spec.nodeName}}{{end}}'` --template '{{range $i, $n := .status.addresses}}{{if eq $n.type "ExternalIP"}}{{$n.address}}{{end}}{{end}}') +$ curl $LBIP/foo -H 'Host: foo.bar.com' +``` + +## HTTPS + +You can secure an Ingress by specifying a secret that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. This controller supports SNI. The TLS secret must contain keys named tls.crt and tls.key that contain the certificate and private key to use for TLS, eg: + +``` +apiVersion: v1 +data: + tls.crt: base64 encoded cert + tls.key: base64 encoded key +kind: Secret +metadata: + name: foo-secret + namespace: default +type: kubernetes.io/tls +``` + +Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS: + +``` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: no-rules-map +spec: + tls: + secretName: foo-secret + backend: + serviceName: s1 + servicePort: 80 +``` +Please follow [PREREQUISITES](../../examples/PREREQUISITES.md) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate. + +Check the [example](../../examples/tls-termination/nginx) + +### Default SSL Certificate + +NGINX provides the option [server name](http://nginx.org/en/docs/http/server_names.html) as a catch-all in case of requests that do not match one of the configured server names. This configuration works without issues for HTTP traffic. In case of HTTPS NGINX requires a certificate. For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned case. +If this flag is not provided NGINX will use a self signed certificate. + +Running without the flag `--default-ssl-certificate`: + +``` +$ curl -v https://10.2.78.7:443 -k +* Rebuilt URL to: https://10.2.78.7:443/ +* Trying 10.2.78.4... +* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) +* ALPN, offering http/1.1 +* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt + CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS header, Certificate Status (22): +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Client hello (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS change cipher, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use http/1.1 +* Server certificate: +* subject: CN=foo.bar.com +* start date: Apr 13 00:50:56 2016 GMT +* expire date: Apr 13 00:50:56 2017 GMT +* issuer: CN=foo.bar.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET / HTTP/1.1 +> Host: 10.2.78.7 +> User-Agent: curl/7.47.1 +> Accept: */* +> +< HTTP/1.1 404 Not Found +< Server: nginx/1.11.1 +< Date: Thu, 21 Jul 2016 15:38:46 GMT +< Content-Type: text/html +< Transfer-Encoding: chunked +< Connection: keep-alive +< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload +< +The page you're looking for could not be found. + +* Connection #0 to host 10.2.78.7 left intact +``` + +Specifying `--default-ssl-certificate=default/foo-tls`: + +``` +core@localhost ~ $ curl -v https://10.2.78.7:443 -k +* Rebuilt URL to: https://10.2.78.7:443/ +* Trying 10.2.78.7... +* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) +* ALPN, offering http/1.1 +* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt + CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS header, Certificate Status (22): +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Client hello (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS change cipher, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use http/1.1 +* Server certificate: +* subject: CN=foo.bar.com +* start date: Apr 13 00:50:56 2016 GMT +* expire date: Apr 13 00:50:56 2017 GMT +* issuer: CN=foo.bar.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET / HTTP/1.1 +> Host: 10.2.78.7 +> User-Agent: curl/7.47.1 +> Accept: */* +> +< HTTP/1.1 404 Not Found +< Server: nginx/1.11.1 +< Date: Mon, 18 Jul 2016 21:02:59 GMT +< Content-Type: text/html +< Transfer-Encoding: chunked +< Connection: keep-alive +< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload +< +The page you're looking for could not be found. + +* Connection #0 to host 10.2.78.7 left intact +``` + + +### Server-side HTTPS enforcement + +By default the controller redirects (301) to HTTPS if TLS is enabled for that ingress . If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the NGINX config map. + +To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. + + +### HTTP Strict Transport Security + +HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. + +By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. + +To disable this behavior use `hsts=false` in the NGINX config map. + + +### Automated Certificate Management with Kube-Lego + +[Kube-Lego] automatically requests missing or expired certificates from +[Let's Encrypt] by monitoring ingress resources and their referenced secrets. To +enable this for an ingress resource you have to add an annotation: + +``` +kubectl annotate ing ingress-demo kubernetes.io/tls-acme="true" +``` + +To setup Kube-Lego you can take a look at this [full example]. The first +version to fully support Kube-Lego is nginx Ingress controller 0.8. + +[full example]:https://github.com/jetstack/kube-lego/tree/master/examples +[Kube-Lego]:https://github.com/jetstack/kube-lego +[Let's Encrypt]:https://letsencrypt.org + +## Source IP address + +By default NGINX uses the content of the header `X-Forwarded-For` as the source of truth to get information about the client IP address. This works without issues in L7 **if we configure the setting `proxy-real-ip-cidr`** with the correct information of the IP/network address of the external load balancer. +If the ingress controller is running in AWS we need to use the VPC IPv4 CIDR. This allows NGINX to avoid the spoofing of the header. +Another option is to enable proxy protocol using `use-proxy-protocol: "true"`. +In this mode NGINX do not uses the content of the header to get the source IP address of the connection. + +## Exposing TCP services + +Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `::[PROXY]:[PROXY]` +It is possible to use a number or the name of the port. The two last fields are optional. Adding `PROXY` in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service (https://www.nginx.com/resources/admin-guide/proxy-protocol/). + +The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000` +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-configmap-example +data: + 9000: "default/example-go:8080" +``` + + +Please check the [tcp services](../../examples/tcp/nginx/README.md) example + +## Exposing UDP services + +Since 1.9.13 NGINX provides [UDP Load Balancing](https://www.nginx.com/blog/announcing-udp-load-balancing/). + +Ingress does not support UDP services (yet). For this reason this Ingress controller uses the flag `--udp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `:` +It is possible to use a number or the name of the port. + +The next example shows how to expose the service `kube-dns` running in the namespace `kube-system` in the port `53` using the port `53` +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: udp-configmap-example +data: + 53: "kube-system/kube-dns:53" +``` + + +Please check the [udp services](../../examples/udp/nginx/README.md) example + +## Proxy Protocol + +If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP addresses. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself. + +Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol. + +Please check the [proxy-protocol](examples/proxy-protocol/) example + +### Opentracing + +Using the third party module [rnburn/nginx-opentracing](https://github.com/rnburn/nginx-opentracing) the NGINX ingress controller can configure NGINX to enable [OpenTracing](http://opentracing.io) instrumentation. +By default this feature is disabled. + +To enable the instrumentation we just need to enable the instrumentation in the configuration configmap and set the host where we should send the traces. + +In the [aledbf/zipkin-js-example](https://github.com/aledbf/zipkin-js-example) github repository is possible to see a dockerized version of zipkin-js-example with the required Kubernetes descriptors. +To install the example and the zipkin collector we just need to run: + +``` +$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/zipkin.yaml +$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/deployment.yaml +``` + +Also we need to configure the NGINX controller configmap with the required values: + +``` +apiVersion: v1 +data: + enable-opentracing: "true" + zipkin-collector-host: zipkin.default.svc.cluster.local +kind: ConfigMap +metadata: + labels: + k8s-app: nginx-ingress-controller + name: nginx-custom-configuration +``` + +Using curl we can generate some traces: +``` +$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example' +$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example' +``` + +In the zipkin inteface we can see the details: + +![zipkin screenshot](docs/images/zipkin-demo.png "zipkin collector screenshot") + +### Custom errors + +In case of an error in a request the body of the response is obtained from the `default backend`. +Each request to the default backend includes two headers: + +- `X-Code` indicates the HTTP code to be returned to the client. +- `X-Format` the value of the `Accept` header. + +**Important:** the custom backend must return the correct HTTP status code to be returned. NGINX do not changes the reponse from the custom default backend. + +Using this two headers is possible to use a custom backend service like [this one](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-errors/nginx) that inspect each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](examples/customization/custom-errors/nginx/README.md) + +NGINX sends aditional headers that can be used to build custom response: + +- X-Original-URI +- X-Namespace +- X-Ingress-Name +- X-Service-Name + +### NGINX status page + +The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`. +This controller provides an alternative to this module using [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) third party module. +To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 18080. +Please check the example `example/rc-default.yaml` + +![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter") + +To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` + +### Running multiple ingress controllers + +If you're running multiple ingress controllers, or running on a cloudprovider that natively handles +ingress, you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses +that you would like this controller to claim. Not specifying the annotation will lead to multiple +ingress controllers claiming the same ingress. Specifying the wrong value will result in all ingress +controllers ignoring the ingress. Multiple ingress controllers running in the same cluster was not +supported in Kubernetes versions < 1.3. + +### Running on Cloudproviders + +If you're running this ingress controller on a cloudprovider, you should assume the provider also has a native +Ingress controller and specify the ingress.class annotation as indicated in this section. +In addition to this, you will need to add a firewall rule for each port this controller is listening on, i.e :80 and :443. + +### Disabling NGINX ingress controller + +Setting the annotation `kubernetes.io/ingress.class` to any value other than "nginx" or the empty string, will force the NGINX Ingress controller to ignore your Ingress. Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller. + +### Log format + +The default configuration uses a custom logging format to add additional information about upstreams + +``` + log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - ' + '[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + '$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status'; +``` + +Sources: + - [upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables) + - [embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables) + +Description: +- `$proxy_protocol_addr`: if PROXY protocol is enabled +- `$remote_addr`: if PROXY protocol is disabled (default) +- `$proxy_add_x_forwarded_for`: the `X-Forwarded-For` client request header field with the $remote_addr variable appended to it, separated by a comma +- `$remote_user`: user name supplied with the Basic authentication +- `$time_local`: local time in the Common Log Format +- `$request`: full original request line +- `$status`: response status +- `$body_bytes_sent`: number of bytes sent to a client, not counting the response header +- `$http_referer`: value of the Referer header +- `$http_user_agent`: value of User-Agent header +- `$request_length`: request length (including request line, header, and request body) +- `$request_time`: time elapsed since the first bytes were read from the client +- `$proxy_upstream_name`: name of the upstream. The format is `upstream---` +- `$upstream_addr`: keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas +- `$upstream_response_length`: keeps the length of the response obtained from the upstream server +- `$upstream_response_time`: keeps time spent on receiving the response from the upstream server; the time is kept in seconds with millisecond resolution +- `$upstream_status`: keeps status code of the response obtained from the upstream server + +### Local cluster + +Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md) for more details. + +Use of `hostNetwork: true` in the ingress controller is required to falls back at localhost:8080 for the apiserver if every other client creation check fails (eg: service account not present, kubeconfig doesn't exist, no master env vars...) + +### Debug & Troubleshooting + +Using the flag `--v=XX` it is possible to increase the level of logging. +In particular: +- `--v=2` shows details using `diff` about the changes in the configuration in nginx + +``` +I0316 12:24:37.581267 1 utils.go:148] NGINX configuration diff a//etc/nginx/nginx.conf b//etc/nginx/nginx.conf +I0316 12:24:37.581356 1 utils.go:149] --- /tmp/922554809 2016-03-16 12:24:37.000000000 +0000 ++++ /tmp/079811012 2016-03-16 12:24:37.000000000 +0000 +@@ -235,7 +235,6 @@ + + upstream default-echoheadersx { + least_conn; +- server 10.2.112.124:5000; + server 10.2.208.50:5000; + + } +I0316 12:24:37.610073 1 command.go:69] change in configuration detected. Reloading... +``` + +- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format +- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html) + +### Limitations + +- Ingress rules for TLS require the definition of the field `host` + +### Why endpoints and not services + +The NGINX ingress controller does not uses [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT. + +### NGINX notes + +Since `gcr.io/google_containers/nginx-slim:0.8` NGINX contains the next patches: +- Dynamic TLS record size [nginx__dynamic_tls_records.patch](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) +NGINX provides the parameter `ssl_buffer_size` to adjust the size of the buffer. Default value in NGINX is 16KB. The ingress controller changes the default to 4KB. This improves the [TLS Time To First Byte (TTTFB)](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) but the size is fixed. This patches adapts the size of the buffer to the content is being served helping to improve the perceived latency. diff --git a/controllers/nginx/configuration.md b/configuration.md similarity index 100% rename from controllers/nginx/configuration.md rename to configuration.md diff --git a/controllers/nginx/.gitignore b/controllers/nginx/.gitignore deleted file mode 100644 index f7fc6d3ef..000000000 --- a/controllers/nginx/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -rootfs/nginx-ingress-controller -*/**/.coverprofile \ No newline at end of file diff --git a/controllers/nginx/Makefile b/controllers/nginx/Makefile deleted file mode 100644 index d467e0c20..000000000 --- a/controllers/nginx/Makefile +++ /dev/null @@ -1,136 +0,0 @@ -all: push - -BUILDTAGS= - -# Use the 0.0 tag for testing, it shouldn't clobber any release builds -TAG?=0.9.0-beta.15 -REGISTRY?=gcr.io/google_containers -GOOS?=linux -DOCKER?=gcloud docker -- -SED_I?=sed -i -GOHOSTOS ?= $(shell go env GOHOSTOS) - -ifeq ($(GOHOSTOS),darwin) - SED_I=sed -i '' -endif - -REPO_INFO=$(shell git config --get remote.origin.url) - -ifndef COMMIT - COMMIT := git-$(shell git rev-parse --short HEAD) -endif - -PKG=k8s.io/ingress/controllers/nginx - -ARCH ?= $(shell go env GOARCH) -GOARCH = ${ARCH} -DUMB_ARCH = ${ARCH} - -ALL_ARCH = amd64 arm arm64 ppc64le - -QEMUVERSION=v2.9.1 - -IMGNAME = nginx-ingress-controller -IMAGE = $(REGISTRY)/$(IMGNAME) -MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) - -# Set default base image dynamically for each arch -BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.25 - -ifeq ($(ARCH),arm) - QEMUARCH=arm - GOARCH=arm - DUMB_ARCH=armhf -endif -ifeq ($(ARCH),arm64) - QEMUARCH=aarch64 -endif -ifeq ($(ARCH),ppc64le) - QEMUARCH=ppc64le - GOARCH=ppc64le - DUMB_ARCH=ppc64el -endif -#ifeq ($(ARCH),s390x) -# QEMUARCH=s390x -#endif - -TEMP_DIR := $(shell mktemp -d) - -DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile - -all: all-container - -sub-container-%: - $(MAKE) ARCH=$* build container - -sub-push-%: - $(MAKE) ARCH=$* push - -all-container: $(addprefix sub-container-,$(ALL_ARCH)) - -all-push: $(addprefix sub-push-,$(ALL_ARCH)) - -container: .container-$(ARCH) -.container-$(ARCH): - cp -r ./* $(TEMP_DIR) - $(SED_I) 's|BASEIMAGE|$(BASEIMAGE)|g' $(DOCKERFILE) - $(SED_I) "s|QEMUARCH|$(QEMUARCH)|g" $(DOCKERFILE) - $(SED_I) "s|DUMB_ARCH|$(DUMB_ARCH)|g" $(DOCKERFILE) - -ifeq ($(ARCH),amd64) - # When building "normally" for amd64, remove the whole line, it has no part in the amd64 image - $(SED_I) "/CROSS_BUILD_/d" $(DOCKERFILE) -else - # When cross-building, only the placeholder "CROSS_BUILD_" should be removed - # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel - $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset - curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs - $(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE) -endif - - $(DOCKER) build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR)/rootfs - -ifeq ($(ARCH), amd64) - # This is for to maintain the backward compatibility - $(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) -endif - -push: .push-$(ARCH) -.push-$(ARCH): - $(DOCKER) push $(MULTI_ARCH_IMG):$(TAG) -ifeq ($(ARCH), amd64) - $(DOCKER) push $(IMAGE):$(TAG) -endif - -clean: - $(DOCKER) rmi -f $(MULTI_ARCH_IMG):$(TAG) || true - -build: clean - CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -a -installsuffix cgo \ - -ldflags "-s -w -X ${PKG}/pkg/version.RELEASE=${TAG} -X ${PKG}/pkg/version.COMMIT=${COMMIT} -X ${PKG}/pkg/version.REPO=${REPO_INFO}" \ - -o ${TEMP_DIR}/rootfs/nginx-ingress-controller ${PKG}/pkg/cmd/controller - -fmt: - @echo "+ $@" - @go list -f '{{if len .TestGoFiles}}"gofmt -s -l {{.Dir}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c - -lint: - @echo "+ $@" - @go list -f '{{if len .TestGoFiles}}"golint {{.Dir}}/..."{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c - -test: fmt lint vet - @echo "+ $@" - @go test -v -race -tags "$(BUILDTAGS) cgo" $(shell go list ${PKG}/... | grep -v vendor) - -cover: - @echo "+ $@" - @go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c - gover - goveralls -coverprofile=gover.coverprofile -service travis-ci -repotoken ${COVERALLS_TOKEN} - -vet: - @echo "+ $@" - @go vet $(shell go list ${PKG}/... | grep -v vendor) - -release: all-container all-push - echo "done" diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md deleted file mode 100644 index 11c3f6816..000000000 --- a/controllers/nginx/README.md +++ /dev/null @@ -1,553 +0,0 @@ -# Nginx Ingress Controller - -This is an nginx Ingress controller that uses [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/#understanding-configmaps) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works. - -## Contents -* [Conventions](#conventions) -* [Requirements](#requirements) -* [Command line arguments](#command-line-arguments) -* [Dry running](#try-running-the-ingress-controller) -* [Deployment](#deployment) -* [HTTP](#http) -* [HTTPS](#https) - * [Default SSL Certificate](#default-ssl-certificate) - * [HTTPS enforcement](#server-side-https-enforcement) - * [HSTS](#http-strict-transport-security) - * [Kube-Lego](#automated-certificate-management-with-kube-lego) -* [Source IP address](#source-ip-address) -* [TCP Services](#exposing-tcp-services) -* [UDP Services](#exposing-udp-services) -* [Proxy Protocol](#proxy-protocol) -* [Opentracing](#opentracing) -* [NGINX customization](configuration.md) -* [Custom errors](#custom-errors) -* [NGINX status page](#nginx-status-page) -* [Running multiple ingress controllers](#running-multiple-ingress-controllers) -* [Running on Cloudproviders](#running-on-cloudproviders) -* [Disabling NGINX ingress controller](#disabling-nginx-ingress-controller) -* [Log format](#log-format) -* [Local cluster](#local-cluster) -* [Debug & Troubleshooting](#debug--troubleshooting) -* [Limitations](#limitations) -* [Why endpoints and not services?](#why-endpoints-and-not-services) -* [NGINX Notes](#nginx-notes) - -## Conventions - -Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). You can generate such a certificate with: - `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}"` - and create the secret via `kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}` - - - -## Requirements -- Default backend [404-server](https://github.com/kubernetes/ingress/tree/master/images/404-server) - - -## Command line arguments -``` -Usage of : - --alsologtostderr log to standard error as well as files - --apiserver-host string The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted. - --configmap string Name of the ConfigMap that contains the custom configuration to use - --default-backend-service string Service used to serve a 404 page for the default backend. Takes the form - namespace/name. The controller uses the first node port of this Service for - the default backend. - --default-server-port int Default port to use for exposing the default server (catch all) (default 8181) - --default-ssl-certificate string Name of the secret - that contains a SSL certificate to be used as default for a HTTPS catch-all server - --disable-node-list Disable querying nodes. If --force-namespace-isolation is true, this should also be set. - --election-id string Election id to use for status update. (default "ingress-controller-leader") - --enable-ssl-passthrough Enable SSL passthrough feature. Default is disabled - --force-namespace-isolation Force namespace isolation. This flag is required to avoid the reference of secrets or - configmaps located in a different namespace than the specified in the flag --watch-namespace. - --health-check-path string Defines - the URL to be used as health check inside in the default server in NGINX. (default "/healthz") - --healthz-port int port for healthz endpoint. (default 10254) - --http-port int Indicates the port to use for HTTP traffic (default 80) - --https-port int Indicates the port to use for HTTPS traffic (default 443) - --ingress-class string Name of the ingress class to route through this controller. - --kubeconfig string Path to kubeconfig file with authorization and master location information. - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files - --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) - --publish-service string Service fronting the ingress controllers. Takes the form - namespace/name. The controller will set the endpoint records on the - ingress objects to reflect those on the service. - --sort-backends Defines if backends and it's endpoints should be sorted - --ssl-passtrough-proxy-port int Default port to use internally for SSL when SSL Passthgough is enabled (default 442) - --status-port int Indicates the TCP port to use for exposing the nginx status page (default 18080) - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - --sync-period duration Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s) - --tcp-services-configmap string Name of the ConfigMap that contains the definition of the TCP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port. - The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend - --udp-services-configmap string Name of the ConfigMap that contains the definition of the UDP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port. - --update-status Indicates if the - ingress controller should update the Ingress status IP/hostname. Default is true (default true) - --update-status-on-shutdown Indicates if the - ingress controller should update the Ingress status IP/hostname when the controller - is being stopped. Default is true (default true) - -v, --v Level log level for V logs - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --watch-namespace string Namespace to watch for Ingress. Default is to watch all namespaces -``` - -## Try running the Ingress controller - -Before deploying the controller to production you might want to run it outside the cluster and observe it. - -```console -$ make build -$ mkdir /etc/nginx-ssl -$ ./rootfs/nginx-ingress-controller --running-in-cluster=false --default-backend-service=kube-system/default-http-backend -``` - -## Deployment - -First create a default backend and it's corresponding service: -``` -$ kubectl create -f examples/default-backend.yaml -``` - -Follow the [example-deployment](../../examples/deployment/nginx/README.md) steps to deploy nginx-ingress-controller in Kubernetes cluster (you may prefer other type of workloads, like Daemonset, in production environment). -Loadbalancers are created via a ReplicationController or Daemonset: - - -## HTTP - -First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app](https://github.com/kubernetes/contrib/blob/master/ingress/echoheaders/echo-app.yaml) that just returns information about the http request as output -``` -kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.8 --replicas=1 --port=8080 -``` - -Now we expose the same application in two different services (so we can create different Ingress rules) -``` -kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x -kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y -``` - -Next we create a couple of Ingress rules -``` -kubectl create -f examples/ingress.yaml -``` - -we check that ingress rules are defined: -``` -$ kubectl get ing -NAME RULE BACKEND ADDRESS -echomap - - foo.bar.com - /foo echoheaders-x:80 - bar.baz.com - /bar echoheaders-y:80 - /foo echoheaders-x:80 -``` - -Before the deploy of the Ingress controller we need a default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) -``` -kubectl create -f examples/default-backend.yaml -kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend -``` - -Check NGINX it is running with the defined Ingress rules: - -``` -$ LBIP=$(kubectl get node `kubectl get po -l name=nginx-ingress-lb --template '{{range .items}}{{.spec.nodeName}}{{end}}'` --template '{{range $i, $n := .status.addresses}}{{if eq $n.type "ExternalIP"}}{{$n.address}}{{end}}{{end}}') -$ curl $LBIP/foo -H 'Host: foo.bar.com' -``` - -## HTTPS - -You can secure an Ingress by specifying a secret that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. This controller supports SNI. The TLS secret must contain keys named tls.crt and tls.key that contain the certificate and private key to use for TLS, eg: - -``` -apiVersion: v1 -data: - tls.crt: base64 encoded cert - tls.key: base64 encoded key -kind: Secret -metadata: - name: foo-secret - namespace: default -type: kubernetes.io/tls -``` - -Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS: - -``` -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: no-rules-map -spec: - tls: - secretName: foo-secret - backend: - serviceName: s1 - servicePort: 80 -``` -Please follow [PREREQUISITES](../../examples/PREREQUISITES.md) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate. - -Check the [example](../../examples/tls-termination/nginx) - -### Default SSL Certificate - -NGINX provides the option [server name](http://nginx.org/en/docs/http/server_names.html) as a catch-all in case of requests that do not match one of the configured server names. This configuration works without issues for HTTP traffic. In case of HTTPS NGINX requires a certificate. For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned case. -If this flag is not provided NGINX will use a self signed certificate. - -Running without the flag `--default-ssl-certificate`: - -``` -$ curl -v https://10.2.78.7:443 -k -* Rebuilt URL to: https://10.2.78.7:443/ -* Trying 10.2.78.4... -* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/certs/ca-certificates.crt - CApath: /etc/ssl/certs -* TLSv1.2 (OUT), TLS header, Certificate Status (22): -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=foo.bar.com -* start date: Apr 13 00:50:56 2016 GMT -* expire date: Apr 13 00:50:56 2017 GMT -* issuer: CN=foo.bar.com -* SSL certificate verify result: self signed certificate (18), continuing anyway. -> GET / HTTP/1.1 -> Host: 10.2.78.7 -> User-Agent: curl/7.47.1 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.11.1 -< Date: Thu, 21 Jul 2016 15:38:46 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload -< -The page you're looking for could not be found. - -* Connection #0 to host 10.2.78.7 left intact -``` - -Specifying `--default-ssl-certificate=default/foo-tls`: - -``` -core@localhost ~ $ curl -v https://10.2.78.7:443 -k -* Rebuilt URL to: https://10.2.78.7:443/ -* Trying 10.2.78.7... -* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/certs/ca-certificates.crt - CApath: /etc/ssl/certs -* TLSv1.2 (OUT), TLS header, Certificate Status (22): -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=foo.bar.com -* start date: Apr 13 00:50:56 2016 GMT -* expire date: Apr 13 00:50:56 2017 GMT -* issuer: CN=foo.bar.com -* SSL certificate verify result: self signed certificate (18), continuing anyway. -> GET / HTTP/1.1 -> Host: 10.2.78.7 -> User-Agent: curl/7.47.1 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.11.1 -< Date: Mon, 18 Jul 2016 21:02:59 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload -< -The page you're looking for could not be found. - -* Connection #0 to host 10.2.78.7 left intact -``` - - -### Server-side HTTPS enforcement - -By default the controller redirects (301) to HTTPS if TLS is enabled for that ingress . If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the NGINX config map. - -To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. - - -### HTTP Strict Transport Security - -HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. - -By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. - -To disable this behavior use `hsts=false` in the NGINX config map. - - -### Automated Certificate Management with Kube-Lego - -[Kube-Lego] automatically requests missing or expired certificates from -[Let's Encrypt] by monitoring ingress resources and their referenced secrets. To -enable this for an ingress resource you have to add an annotation: - -``` -kubectl annotate ing ingress-demo kubernetes.io/tls-acme="true" -``` - -To setup Kube-Lego you can take a look at this [full example]. The first -version to fully support Kube-Lego is nginx Ingress controller 0.8. - -[full example]:https://github.com/jetstack/kube-lego/tree/master/examples -[Kube-Lego]:https://github.com/jetstack/kube-lego -[Let's Encrypt]:https://letsencrypt.org - -## Source IP address - -By default NGINX uses the content of the header `X-Forwarded-For` as the source of truth to get information about the client IP address. This works without issues in L7 **if we configure the setting `proxy-real-ip-cidr`** with the correct information of the IP/network address of the external load balancer. -If the ingress controller is running in AWS we need to use the VPC IPv4 CIDR. This allows NGINX to avoid the spoofing of the header. -Another option is to enable proxy protocol using `use-proxy-protocol: "true"`. -In this mode NGINX do not uses the content of the header to get the source IP address of the connection. - -## Exposing TCP services - -Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `::[PROXY]:[PROXY]` -It is possible to use a number or the name of the port. The two last fields are optional. Adding `PROXY` in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service (https://www.nginx.com/resources/admin-guide/proxy-protocol/). - -The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000` -``` -apiVersion: v1 -kind: ConfigMap -metadata: - name: tcp-configmap-example -data: - 9000: "default/example-go:8080" -``` - - -Please check the [tcp services](../../examples/tcp/nginx/README.md) example - -## Exposing UDP services - -Since 1.9.13 NGINX provides [UDP Load Balancing](https://www.nginx.com/blog/announcing-udp-load-balancing/). - -Ingress does not support UDP services (yet). For this reason this Ingress controller uses the flag `--udp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `:` -It is possible to use a number or the name of the port. - -The next example shows how to expose the service `kube-dns` running in the namespace `kube-system` in the port `53` using the port `53` -``` -apiVersion: v1 -kind: ConfigMap -metadata: - name: udp-configmap-example -data: - 53: "kube-system/kube-dns:53" -``` - - -Please check the [udp services](../../examples/udp/nginx/README.md) example - -## Proxy Protocol - -If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP addresses. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself. - -Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol. - -Please check the [proxy-protocol](examples/proxy-protocol/) example - -### Opentracing - -Using the third party module [rnburn/nginx-opentracing](https://github.com/rnburn/nginx-opentracing) the NGINX ingress controller can configure NGINX to enable [OpenTracing](http://opentracing.io) instrumentation. -By default this feature is disabled. - -To enable the instrumentation we just need to enable the instrumentation in the configuration configmap and set the host where we should send the traces. - -In the [aledbf/zipkin-js-example](https://github.com/aledbf/zipkin-js-example) github repository is possible to see a dockerized version of zipkin-js-example with the required Kubernetes descriptors. -To install the example and the zipkin collector we just need to run: - -``` -$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/zipkin.yaml -$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/deployment.yaml -``` - -Also we need to configure the NGINX controller configmap with the required values: - -``` -apiVersion: v1 -data: - enable-opentracing: "true" - zipkin-collector-host: zipkin.default.svc.cluster.local -kind: ConfigMap -metadata: - labels: - k8s-app: nginx-ingress-controller - name: nginx-custom-configuration -``` - -Using curl we can generate some traces: -``` -$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example' -$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example' -``` - -In the zipkin inteface we can see the details: - -![zipkin screenshot](docs/images/zipkin-demo.png "zipkin collector screenshot") - -### Custom errors - -In case of an error in a request the body of the response is obtained from the `default backend`. -Each request to the default backend includes two headers: - -- `X-Code` indicates the HTTP code to be returned to the client. -- `X-Format` the value of the `Accept` header. - -**Important:** the custom backend must return the correct HTTP status code to be returned. NGINX do not changes the reponse from the custom default backend. - -Using this two headers is possible to use a custom backend service like [this one](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-errors/nginx) that inspect each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](examples/customization/custom-errors/nginx/README.md) - -NGINX sends aditional headers that can be used to build custom response: - -- X-Original-URI -- X-Namespace -- X-Ingress-Name -- X-Service-Name - -### NGINX status page - -The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`. -This controller provides an alternative to this module using [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) third party module. -To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 18080. -Please check the example `example/rc-default.yaml` - -![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter") - -To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` - -### Running multiple ingress controllers - -If you're running multiple ingress controllers, or running on a cloudprovider that natively handles -ingress, you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses -that you would like this controller to claim. Not specifying the annotation will lead to multiple -ingress controllers claiming the same ingress. Specifying the wrong value will result in all ingress -controllers ignoring the ingress. Multiple ingress controllers running in the same cluster was not -supported in Kubernetes versions < 1.3. - -### Running on Cloudproviders - -If you're running this ingress controller on a cloudprovider, you should assume the provider also has a native -Ingress controller and specify the ingress.class annotation as indicated in this section. -In addition to this, you will need to add a firewall rule for each port this controller is listening on, i.e :80 and :443. - -### Disabling NGINX ingress controller - -Setting the annotation `kubernetes.io/ingress.class` to any value other than "nginx" or the empty string, will force the NGINX Ingress controller to ignore your Ingress. Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller. - -### Log format - -The default configuration uses a custom logging format to add additional information about upstreams - -``` - log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - ' - '[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' - '$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status'; -``` - -Sources: - - [upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables) - - [embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables) - -Description: -- `$proxy_protocol_addr`: if PROXY protocol is enabled -- `$remote_addr`: if PROXY protocol is disabled (default) -- `$proxy_add_x_forwarded_for`: the `X-Forwarded-For` client request header field with the $remote_addr variable appended to it, separated by a comma -- `$remote_user`: user name supplied with the Basic authentication -- `$time_local`: local time in the Common Log Format -- `$request`: full original request line -- `$status`: response status -- `$body_bytes_sent`: number of bytes sent to a client, not counting the response header -- `$http_referer`: value of the Referer header -- `$http_user_agent`: value of User-Agent header -- `$request_length`: request length (including request line, header, and request body) -- `$request_time`: time elapsed since the first bytes were read from the client -- `$proxy_upstream_name`: name of the upstream. The format is `upstream---` -- `$upstream_addr`: keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas -- `$upstream_response_length`: keeps the length of the response obtained from the upstream server -- `$upstream_response_time`: keeps time spent on receiving the response from the upstream server; the time is kept in seconds with millisecond resolution -- `$upstream_status`: keeps status code of the response obtained from the upstream server - -### Local cluster - -Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md) for more details. - -Use of `hostNetwork: true` in the ingress controller is required to falls back at localhost:8080 for the apiserver if every other client creation check fails (eg: service account not present, kubeconfig doesn't exist, no master env vars...) - -### Debug & Troubleshooting - -Using the flag `--v=XX` it is possible to increase the level of logging. -In particular: -- `--v=2` shows details using `diff` about the changes in the configuration in nginx - -``` -I0316 12:24:37.581267 1 utils.go:148] NGINX configuration diff a//etc/nginx/nginx.conf b//etc/nginx/nginx.conf -I0316 12:24:37.581356 1 utils.go:149] --- /tmp/922554809 2016-03-16 12:24:37.000000000 +0000 -+++ /tmp/079811012 2016-03-16 12:24:37.000000000 +0000 -@@ -235,7 +235,6 @@ - - upstream default-echoheadersx { - least_conn; -- server 10.2.112.124:5000; - server 10.2.208.50:5000; - - } -I0316 12:24:37.610073 1 command.go:69] change in configuration detected. Reloading... -``` - -- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format -- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html) - -### Limitations - -- Ingress rules for TLS require the definition of the field `host` - -### Why endpoints and not services - -The NGINX ingress controller does not uses [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT. - -### NGINX notes - -Since `gcr.io/google_containers/nginx-slim:0.8` NGINX contains the next patches: -- Dynamic TLS record size [nginx__dynamic_tls_records.patch](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) -NGINX provides the parameter `ssl_buffer_size` to adjust the size of the buffer. Default value in NGINX is 16KB. The ingress controller changes the default to 4KB. This improves the [TLS Time To First Byte (TTTFB)](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) but the size is fixed. This patches adapts the size of the buffer to the content is being served helping to improve the perceived latency. diff --git a/controllers/nginx/docs/images/zipkin-demo.png b/docs/images/zipkin-demo.png similarity index 100% rename from controllers/nginx/docs/images/zipkin-demo.png rename to docs/images/zipkin-demo.png diff --git a/controllers/nginx/examples/default-backend.yaml b/examples/default-backend.yaml similarity index 100% rename from controllers/nginx/examples/default-backend.yaml rename to examples/default-backend.yaml diff --git a/examples/deployment/gce/README.md b/examples/deployment/gce/README.md deleted file mode 100644 index 229478917..000000000 --- a/examples/deployment/gce/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Deploying the GCE Ingress controller - -This example demonstrates the deployment of a GCE Ingress controller. - -Note: __all GCE/GKE clusters already have an Ingress controller running -on the master. The only reason to deploy another GCE controller is if you want -to debug or otherwise observe its operation (eg via kubectl logs).__ - -__Before deploying another one in your cluster, make sure you disable the master controller.__ - -## Disabling the master controller - -See the hard disable options [here](/docs/faq/gce.md#how-do-i-disable-the-gce-ingress-controller). - -## Deploying a new controller - -The following command deploys a GCE Ingress controller in your cluster: - -```console -$ kubectl create -f gce-ingress-controller.yaml -service "default-http-backend" created -replicationcontroller "l7-lb-controller" created - -$ kubectl get po -l name=glbc -NAME READY STATUS RESTARTS AGE -l7-lb-controller-1s22c 2/2 Running 0 27s -``` - -Now you can create an Ingress and observe the controller: - -```console -$ kubectl create -f gce-tls-ingress.yaml -ingress "test" created - -$ kubectl logs l7-lb-controller-1s22c -c l7-lb-controller -I0201 01:03:17.387548 1 main.go:179] Starting GLBC image: glbc:0.9.2, cluster name -I0201 01:03:18.459740 1 main.go:291] Using saved cluster uid "32658fa96c080068" -I0201 01:03:18.459771 1 utils.go:122] Changing cluster name from to 32658fa96c080068 -I0201 01:03:18.461652 1 gce.go:331] Using existing Token Source &oauth2.reuseTokenSource{new:google.computeSource{account:""}, mu:sync.Mutex{state:0, sema:0x0}, t:(*oauth2.Token)(nil)} -I0201 01:03:18.553142 1 cluster_manager.go:264] Created GCE client without a config file -I0201 01:03:18.553773 1 controller.go:234] Starting loadbalancer controller -I0201 01:04:58.314271 1 event.go:217] Event(api.ObjectReference{Kind:"Ingress", Namespace:"default", Name:"test", UID:"73549716-e81a-11e6-a8c5-42010af00002", APIVersion:"extensions", ResourceVersion:"673016", FieldPath:""}): type: 'Normal' reason: 'ADD' default/test -I0201 01:04:58.413616 1 instances.go:76] Creating instance group k8s-ig--32658fa96c080068 in zone us-central1-b -I0201 01:05:01.998169 1 gce.go:2084] Adding port 30301 to instance group k8s-ig--32658fa96c080068 with 0 ports -I0201 01:05:02.444014 1 backends.go:149] Creating backend for 1 instance groups, port 30301 named port &{port30301 30301 []} -I0201 01:05:02.444175 1 utils.go:495] No pod in service http-svc with node port 30301 has declared a matching readiness probe for health checks. -I0201 01:05:02.555599 1 healthchecks.go:62] Creating health check k8s-be-30301--32658fa96c080068 -I0201 01:05:11.300165 1 gce.go:2084] Adding port 31938 to instance group k8s-ig--32658fa96c080068 with 1 ports -I0201 01:05:11.743914 1 backends.go:149] Creating backend for 1 instance groups, port 31938 named port &{port31938 31938 []} -I0201 01:05:11.744008 1 utils.go:495] No pod in service default-http-backend with node port 31938 has declared a matching readiness probe for health checks. -I0201 01:05:11.811972 1 healthchecks.go:62] Creating health check k8s-be-31938--32658fa96c080068 -I0201 01:05:19.871791 1 loadbalancers.go:121] Creating l7 default-test--32658fa96c080068 -... - -$ kubectl get ing test -NAME HOSTS ADDRESS PORTS AGE -test * 35.186.208.106 80, 443 4m - -$ curl 35.186.208.106 -kL -CLIENT VALUES: -client_address=10.180.3.1 -command=GET -real path=/ -query=nil -request_version=1.1 -request_uri=http://35.186.208.106:8080/ -... -``` diff --git a/examples/deployment/gce/gce-ingress-controller.yaml b/examples/deployment/gce/gce-ingress-controller.yaml deleted file mode 100644 index 8bbee4bba..000000000 --- a/examples/deployment/gce/gce-ingress-controller.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - # This must match the --default-backend-service argument of the l7 lb - # controller and is required because GCE mandates a default backend. - name: default-http-backend - labels: - k8s-app: glbc -spec: - # The default backend must be of type NodePort. - type: NodePort - ports: - - port: 80 - targetPort: 8080 - protocol: TCP - name: http - selector: - k8s-app: glbc - ---- -apiVersion: v1 -kind: ReplicationController -metadata: - name: l7-lb-controller - labels: - k8s-app: glbc - version: v0.9.0 -spec: - # There should never be more than 1 controller alive simultaneously. - replicas: 1 - selector: - k8s-app: glbc - version: v0.9.0 - template: - metadata: - labels: - k8s-app: glbc - version: v0.9.0 - name: glbc - spec: - terminationGracePeriodSeconds: 600 - containers: - - name: default-http-backend - # Any image is permissable as long as: - # 1. It serves a 404 page at / - # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.0 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi - - image: gcr.io/google_containers/glbc:0.9.2 - livenessProbe: - httpGet: - path: /healthz - port: 8081 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - name: l7-lb-controller - resources: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - args: - - --default-backend-service=default/default-http-backend - - --sync-period=300s diff --git a/examples/deployment/gce/gce-tls-ingress.yaml b/examples/deployment/gce/gce-tls-ingress.yaml deleted file mode 100644 index 705a17d36..000000000 --- a/examples/deployment/gce/gce-tls-ingress.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: test - annotations: - kubernetes.io/ingress.class: "gce" -spec: - tls: - # This assumes tls-secret exists. - - secretName: tls-secret - backend: - # This assumes http-svc exists and routes to healthy endpoints. - serviceName: http-svc - servicePort: 80 - diff --git a/controllers/nginx/examples/echo-header.yaml b/examples/echo-header.yaml similarity index 100% rename from controllers/nginx/examples/echo-header.yaml rename to examples/echo-header.yaml diff --git a/examples/health-checks/gce/README.md b/examples/health-checks/gce/README.md deleted file mode 100644 index a2d6e710a..000000000 --- a/examples/health-checks/gce/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Simple HTTP health check example - -The GCE Ingress controller adopts the readiness probe from the matching endpoints, provided the readiness probe doesn't require HTTPS or special headers. - -Create the following app: -```console -$ kubectl create -f health_check_app.yaml -replicationcontroller "echoheaders" created -You have exposed your service on an external port on all nodes in your -cluster. If you want to expose this service to the external internet, you may -need to set up firewall rules for the service port(s) (tcp:31165) to serve traffic. - -See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. -service "echoheadersx" created -You have exposed your service on an external port on all nodes in your -cluster. If you want to expose this service to the external internet, you may -need to set up firewall rules for the service port(s) (tcp:31020) to serve traffic. - -See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. -service "echoheadersy" created -ingress "echomap" created -``` - -You should soon find an Ingress that is backed by a GCE Loadbalancer. - -```console -$ kubectl describe ing echomap -Name: echomap -Namespace: default -Address: 107.178.255.228 -Default backend: default-http-backend:80 (10.180.0.9:8080,10.240.0.2:8080) -Rules: - Host Path Backends - ---- ---- -------- - foo.bar.com - /foo echoheadersx:80 () - bar.baz.com - /bar echoheadersy:80 () - /foo echoheadersx:80 () -Annotations: - target-proxy: k8s-tp-default-echomap--a9d60e8176d933ee - url-map: k8s-um-default-echomap--a9d60e8176d933ee - backends: {"k8s-be-31020--a9d60e8176d933ee":"HEALTHY","k8s-be-31165--a9d60e8176d933ee":"HEALTHY","k8s-be-31686--a9d60e8176d933ee":"HEALTHY"} - forwarding-rule: k8s-fw-default-echomap--a9d60e8176d933ee -Events: - FirstSeen LastSeen Count From SubobjectPath Type Reason Message - --------- -------- ----- ---- ------------- -------- ------ ------- - 17m 17m 1 {loadbalancer-controller } Normal ADD default/echomap - 15m 15m 1 {loadbalancer-controller } Normal CREATE ip: 107.178.255.228 - -$ curl 107.178.255.228/foo -H 'Host:foo.bar.com' -CLIENT VALUES: -client_address=10.240.0.5 -command=GET -real path=/foo -query=nil -request_version=1.1 -request_uri=http://foo.bar.com:8080/foo -... -``` - -You can confirm the health check endpoint point it's using one of 2 ways: -* Through the cloud console: compute > health checks > lookup your health check. It takes the form k8s-be-nodePort-hash, where nodePort in the example above is 31165 and 31020, as shown by the kubectl output. -* Through gcloud: Run `gcloud compute http-health-checks list` - -## Limitations - -A few points to note: -* The readiness probe must be exposed on the port matching the `servicePort` specified in the Ingress -* The readiness probe cannot have special requirements like headers -* The probe timeouts are translated to GCE health check timeouts -* You must create the pods backing the endpoints with the given readiness probe. This *will not* work if you update the replication controller with a different readiness probe. diff --git a/examples/health-checks/gce/health_check_app.yaml b/examples/health-checks/gce/health_check_app.yaml deleted file mode 100644 index b8d36bf38..000000000 --- a/examples/health-checks/gce/health_check_app.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: echoheaders -spec: - replicas: 1 - template: - metadata: - labels: - app: echoheaders - spec: - containers: - - name: echoheaders - image: gcr.io/google_containers/echoserver:1.8 - ports: - - containerPort: 8080 - readinessProbe: - httpGet: - path: /healthz - port: 8080 - periodSeconds: 1 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 10 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - ---- -apiVersion: v1 -kind: Service -metadata: - name: echoheadersx - labels: - app: echoheaders -spec: - type: NodePort - ports: - - port: 80 - targetPort: 8080 - protocol: TCP - name: http - selector: - app: echoheaders ---- -apiVersion: v1 -kind: Service -metadata: - name: echoheadersy - labels: - app: echoheaders -spec: - type: NodePort - ports: - - port: 80 - targetPort: 8080 - protocol: TCP - name: http - selector: - app: echoheaders ---- -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: echomap -spec: - rules: - - host: foo.bar.com - http: - paths: - - path: /foo - backend: - serviceName: echoheadersx - servicePort: 80 - - host: bar.baz.com - http: - paths: - - path: /bar - backend: - serviceName: echoheadersy - servicePort: 80 - - path: /foo - backend: - serviceName: echoheadersx - servicePort: 80 - diff --git a/controllers/nginx/examples/ingress.yaml b/examples/ingress.yaml similarity index 100% rename from controllers/nginx/examples/ingress.yaml rename to examples/ingress.yaml diff --git a/examples/static-ip/gce/README.md b/examples/static-ip/gce/README.md deleted file mode 100644 index 07917ac50..000000000 --- a/examples/static-ip/gce/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# Static IPs - -This example demonstrates how to assign a [static-ip](https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) to an Ingress on GCE. - -## Prerequisites - -You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example. -You will also need to make sure you Ingress targets exactly one Ingress -controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), -and that you have an ingress controller [running](/examples/deployment) in your cluster. - -## Acquiring a static IP - -In GCE, static IP belongs to a given project until the owner decides to release -it. If you create a static IP and assign it to an Ingress, deleting the Ingress -or tearing down the GKE cluster *will not* delete the static IP. You can check -the static IPs you have as follows - -```console -$ gcloud compute addresses list --global -NAME REGION ADDRESS STATUS -test-ip 35.186.221.137 RESERVED - -$ gcloud compute addresses list -NAME REGION ADDRESS STATUS -test-ip 35.186.221.137 RESERVED -test-ip us-central1 35.184.21.228 RESERVED -``` - -Note the difference between a regional and a global static ip. Only global -static-ips will work with Ingress. If you don't already have an IP, you can -create it - -```console -$ gcloud compute addresses create test-ip --global -Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/global/addresses/test-ip]. ---- -address: 35.186.221.137 -creationTimestamp: '2017-01-31T10:32:29.889-08:00' -description: '' -id: '9221457935391876818' -kind: compute#address -name: test-ip -selfLink: https://www.googleapis.com/compute/v1/projects/kubernetesdev/global/addresses/test-ip -status: RESERVED -``` - -## Assigning a static IP to an Ingress - -You can now add the static IP from the previous step to an Ingress, -by specifying the `kubernetes.io/global-static-ip-name` annotation, -the example yaml in this directory already has it set to `test-ip` - -```console -$ kubectl create -f gce-static-ip-ingress.yaml -ingress "static-ip" created - -$ gcloud compute addresses list test-ip -NAME REGION ADDRESS STATUS -test-ip 35.186.221.137 IN_USE -test-ip us-central1 35.184.21.228 RESERVED - -$ kubectl get ing -NAME HOSTS ADDRESS PORTS AGE -static-ip * 35.186.221.137 80, 443 1m - -$ curl 35.186.221.137 -Lk -CLIENT VALUES: -client_address=10.180.1.1 -command=GET -real path=/ -query=nil -request_version=1.1 -request_uri=http://35.186.221.137:8080/ -... -``` - -## Retaining the static IP - -You can test retention by deleting the Ingress - -```console -$ kubectl delete -f gce-static-ip-ingress.yaml -ingress "static-ip" deleted - -$ kubectl get ing -No resources found. - -$ gcloud compute addresses list test-ip --global -NAME REGION ADDRESS STATUS -test-ip 35.186.221.137 RESERVED -``` - -## Promote ephemeral to static IP - -If you simply create a HTTP Ingress resource, it gets an ephemeral IP - -```console -$ kubectl create -f gce-http-ingress.yaml -ingress "http-ingress" created - -$ kubectl get ing -NAME HOSTS ADDRESS PORTS AGE -http-ingress * 35.186.195.33 80 1h - -$ gcloud compute forwarding-rules list -NAME REGION IP_ADDRESS IP_PROTOCOL TARGET -k8s-fw-default-http-ingress--32658fa96c080068 35.186.195.33 TCP k8s-tp-default-http-ingress--32658fa96c080068 -``` - -Note that because this is an ephemeral IP, it won't show up in the output of -`gcloud compute addresses list`. - -If you either directly create an Ingress with a TLS section, or modify a HTTP -Ingress to have a TLS section, it gets a static IP. - -```console -$ kubectl patch ing http-ingress -p '{"spec":{"tls":[{"secretName":"tls-secret"}]}}' -"http-ingress" patched - -$ kubectl get ing -NAME HOSTS ADDRESS PORTS AGE -http-ingress * 35.186.195.33 80, 443 1h - -$ gcloud compute addresses list -NAME REGION ADDRESS STATUS -k8s-fw-default-http-ingress--32658fa96c080068 35.186.195.33 IN_USE -``` - diff --git a/examples/static-ip/gce/gce-http-ingress.yaml b/examples/static-ip/gce/gce-http-ingress.yaml deleted file mode 100644 index ca0e34ca5..000000000 --- a/examples/static-ip/gce/gce-http-ingress.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: http-ingress - annotations: - kubernetes.io/ingress.class: "gce" -spec: - backend: - # This assumes http-svc exists and routes to healthy endpoints. - serviceName: http-svc - servicePort: 80 - diff --git a/examples/static-ip/gce/gce-static-ip-ingress.yaml b/examples/static-ip/gce/gce-static-ip-ingress.yaml deleted file mode 100644 index 7742b8705..000000000 --- a/examples/static-ip/gce/gce-static-ip-ingress.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: static-ip - # Assumes a global static ip with the same name exists. - # You can acquire a static IP by running - # gcloud compute addresses create test-ip --global - annotations: - kubernetes.io/ingress.global-static-ip-name: "test-ip" - kubernetes.io/ingress.class: "gce" -spec: - tls: - # This assumes tls-secret exists. - - secretName: tls-secret - backend: - # This assumes http-svc exists and routes to healthy endpoints. - serviceName: http-svc - servicePort: 80 - diff --git a/examples/tls-termination/gce/README.md b/examples/tls-termination/gce/README.md deleted file mode 100644 index bc674d8a2..000000000 --- a/examples/tls-termination/gce/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# TLS termination - -This example demonstrates how to terminate TLS through the GCE Ingress controller. - -## Prerequisites - -You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example. -You will also need to make sure you Ingress targets exactly one Ingress -controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), -and that you have an ingress controller [running](/examples/deployment) in your cluster. - -## Deployment - -The following command instructs the controller to terminate traffic using -the provided TLS cert, and forward un-encrypted HTTP traffic to the test -HTTP service. - -```console -$ kubectl create -f gce-tls-ingress.yaml -``` - -## Validation - -You can confirm that the Ingress works. - -```console -$ kubectl describe ing gce-test -Name: gce-test -Namespace: default -Address: 35.186.221.137 -Default backend: http-svc:80 (10.180.1.9:8080,10.180.3.6:8080) -TLS: - tls-secret terminates -Rules: - Host Path Backends - ---- ---- -------- - * * http-svc:80 (10.180.1.9:8080,10.180.3.6:8080) -Annotations: - target-proxy: k8s-tp-default-gce-test--32658fa96c080068 - url-map: k8s-um-default-gce-test--32658fa96c080068 - backends: {"k8s-be-30301--32658fa96c080068":"Unknown"} - forwarding-rule: k8s-fw-default-gce-test--32658fa96c080068 - https-forwarding-rule: k8s-fws-default-gce-test--32658fa96c080068 - https-target-proxy: k8s-tps-default-gce-test--32658fa96c080068 - static-ip: k8s-fw-default-gce-test--32658fa96c080068 -Events: - FirstSeen LastSeen Count From SubObjectPath Type Reason Message - --------- -------- ----- ---- ------------- -------- ------ ------- - 2m 2m 1 {loadbalancer-controller } Normal ADD default/gce-test - 1m 1m 1 {loadbalancer-controller } Normal CREATE ip: 35.186.221.137 - 1m 1m 3 {loadbalancer-controller } Normal Service default backend set to http-svc:30301 - -$ curl 35.186.221.137 -k -curl 35.186.221.137 -L -curl: (60) SSL certificate problem: self signed certificate -More details here: http://curl.haxx.se/docs/sslcerts.html - -$ curl 35.186.221.137 -kl -CLIENT VALUES: -client_address=10.240.0.3 -command=GET -real path=/ -query=nil -request_version=1.1 -request_uri=http://35.186.221.137:8080/ - -SERVER VALUES: -server_version=nginx: 1.9.11 - lua: 10001 - -HEADERS RECEIVED: -accept=*/* -connection=Keep-Alive -host=35.186.221.137 -user-agent=curl/7.46.0 -via=1.1 google -x-cloud-trace-context=bfa123130fd623989cca0192e43d9ba4/8610689379063045825 -x-forwarded-for=104.132.0.80, 35.186.221.137 -x-forwarded-proto=https -``` diff --git a/examples/tls-termination/gce/gce-tls-ingress.yaml b/examples/tls-termination/gce/gce-tls-ingress.yaml deleted file mode 100644 index 705a17d36..000000000 --- a/examples/tls-termination/gce/gce-tls-ingress.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: test - annotations: - kubernetes.io/ingress.class: "gce" -spec: - tls: - # This assumes tls-secret exists. - - secretName: tls-secret - backend: - # This assumes http-svc exists and routes to healthy endpoints. - serviceName: http-svc - servicePort: 80 - diff --git a/hack/e2e-internal/e2e-down.sh b/hack/e2e-internal/e2e-down.sh deleted file mode 100755 index 62ee7aec2..000000000 --- a/hack/e2e-internal/e2e-down.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -[[ $DEBUG ]] && set -x - -set -eof pipefail - -# include env -. hack/e2e-internal/e2e-env.sh - -echo "Destroying running docker containers..." -# do not failt if the container is not running -docker rm -f kubelet || true -docker rm -f apiserver || true -docker rm -f etcd || true diff --git a/hack/e2e-internal/e2e-env.sh b/hack/e2e-internal/e2e-env.sh deleted file mode 100755 index d0747bb6e..000000000 --- a/hack/e2e-internal/e2e-env.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -[[ $DEBUG ]] && set -x - -export ETCD_VERSION=3.0.14 -export K8S_VERSION=1.4.5 - -export PWD=`pwd` -export BASEDIR="$(dirname ${BASH_SOURCE})" -export KUBECTL="${BASEDIR}/kubectl" -export GOOS="${GOOS:-linux}" - -if [ ! -e ${KUBECTL} ]; then - echo "kubectl binary is missing. downloading..." - curl -sSL http://storage.googleapis.com/kubernetes-release/release/v${K8S_VERSION}/bin/${GOOS}/amd64/kubectl -o ${KUBECTL} - chmod u+x ${KUBECTL} -fi - -${KUBECTL} config set-cluster travis --server=http://0.0.0.0:8080 -${KUBECTL} config set-context travis --cluster=travis -${KUBECTL} config use-context travis diff --git a/hack/e2e-internal/e2e-status.sh b/hack/e2e-internal/e2e-status.sh deleted file mode 100755 index 21a4e9b29..000000000 --- a/hack/e2e-internal/e2e-status.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -[[ $DEBUG ]] && set -x - -set -eof pipefail - -# include env -. hack/e2e-internal/e2e-env.sh - -echo "Kubernetes information:" -${KUBECTL} version diff --git a/hack/e2e-internal/e2e-up.sh b/hack/e2e-internal/e2e-up.sh deleted file mode 100755 index 15b2d4631..000000000 --- a/hack/e2e-internal/e2e-up.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -[[ $DEBUG ]] && set -x - -set -eof pipefail - -# include env -. hack/e2e-internal/e2e-env.sh - -echo "Starting etcd..." -docker run -d \ - --net=host \ - --name=etcd \ - quay.io/coreos/etcd:v$ETCD_VERSION - -echo "Starting kubernetes..." - -docker run -d --name=apiserver \ - --net=host \ - --pid=host \ - --privileged=true \ - gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ - /hyperkube apiserver \ - --insecure-bind-address=0.0.0.0 \ - --service-cluster-ip-range=10.0.0.1/24 \ - --etcd_servers=http://127.0.0.1:4001 \ - --v=2 - -docker run -d --name=kubelet \ - --volume=/:/rootfs:ro \ - --volume=/sys:/sys:ro \ - --volume=/dev:/dev \ - --volume=/var/lib/docker/:/var/lib/docker:rw \ - --volume=/var/lib/kubelet/:/var/lib/kubelet:rw \ - --volume=/var/run:/var/run:rw \ - --net=host \ - --pid=host \ - --privileged=true \ - gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ - /hyperkube kubelet \ - --containerized \ - --hostname-override="0.0.0.0" \ - --address="0.0.0.0" \ - --cluster_dns=10.0.0.10 --cluster_domain=cluster.local \ - --api-servers=http://localhost:8080 \ - --config=/etc/kubernetes/manifests-multi - -echo "waiting until api server is available..." -until curl -o /dev/null -sIf http://0.0.0.0:8080; do \ - sleep 10; -done; - -echo "Kubernetes started" -echo "Kubernetes information:" -${KUBECTL} version diff --git a/hack/e2e-internal/ginkgo-e2e.sh b/hack/e2e-internal/ginkgo-e2e.sh deleted file mode 100755 index aa3c61ce6..000000000 --- a/hack/e2e-internal/ginkgo-e2e.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -echo "running ginkgo" \ No newline at end of file diff --git a/hack/e2e.go b/hack/e2e.go deleted file mode 100644 index be9f2aa28..000000000 --- a/hack/e2e.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// e2e.go runs the e2e test suite. No non-standard package dependencies; call with "go run". -package main - -import ( - "encoding/xml" - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "strings" - "time" -) - -var ( - build = flag.Bool("build", true, "Build the backends images indicated by the env var BACKENDS required to run e2e tests.") - up = flag.Bool("up", true, "Creates a kubernetes cluster using hyperkube (containerized kubelet).") - down = flag.Bool("down", true, "destroys the created cluster.") - test = flag.Bool("test", true, "Run Ginkgo tests.") - dump = flag.String("dump", "", "If set, dump cluster logs to this location on test or cluster-up failure") - testArgs = flag.String("test-args", "", "Space-separated list of arguments to pass to Ginkgo test runner.") - deployment = flag.String("deployment", "bash", "up/down mechanism") - verbose = flag.Bool("v", false, "If true, print all command output.") -) - -func appendError(errs []error, err error) []error { - if err != nil { - return append(errs, err) - } - return errs -} - -func validWorkingDirectory() error { - cwd, err := os.Getwd() - if err != nil { - return fmt.Errorf("could not get pwd: %v", err) - } - acwd, err := filepath.Abs(cwd) - if err != nil { - return fmt.Errorf("failed to convert %s to an absolute path: %v", cwd, err) - } - if !strings.Contains(filepath.Base(acwd), "ingress") { - return fmt.Errorf("must run from git root directory: %v", acwd) - } - return nil -} - -type TestCase struct { - XMLName xml.Name `xml:"testcase"` - ClassName string `xml:"classname,attr"` - Name string `xml:"name,attr"` - Time float64 `xml:"time,attr"` - Failure string `xml:"failure,omitempty"` -} - -type TestSuite struct { - XMLName xml.Name `xml:"testsuite"` - Failures int `xml:"failures,attr"` - Tests int `xml:"tests,attr"` - Time float64 `xml:"time,attr"` - Cases []TestCase -} - -var suite TestSuite - -func xmlWrap(name string, f func() error) error { - start := time.Now() - err := f() - duration := time.Since(start) - c := TestCase{ - Name: name, - ClassName: "e2e.go", - Time: duration.Seconds(), - } - if err != nil { - c.Failure = err.Error() - suite.Failures++ - } - suite.Cases = append(suite.Cases, c) - suite.Tests++ - return err -} - -func writeXML(start time.Time) { - suite.Time = time.Since(start).Seconds() - out, err := xml.MarshalIndent(&suite, "", " ") - if err != nil { - log.Fatalf("Could not marshal XML: %s", err) - } - path := filepath.Join(*dump, "junit_runner.xml") - f, err := os.Create(path) - if err != nil { - log.Fatalf("Could not create file: %s", err) - } - defer f.Close() - if _, err := f.WriteString(xml.Header); err != nil { - log.Fatalf("Error writing XML header: %s", err) - } - if _, err := f.Write(out); err != nil { - log.Fatalf("Error writing XML data: %s", err) - } - log.Printf("Saved XML output to %s.", path) -} - -func main() { - log.SetFlags(log.LstdFlags | log.Lshortfile) - flag.Parse() - - if err := validWorkingDirectory(); err != nil { - log.Fatalf("Called from invalid working directory: %v", err) - } - - deploy, err := getDeployer() - if err != nil { - log.Fatalf("Error creating deployer: %v", err) - } - - if err := run(deploy); err != nil { - log.Fatalf("Something went wrong: %s", err) - } -} - -func run(deploy deployer) error { - if *dump != "" { - defer writeXML(time.Now()) - } - - if *build { - if err := xmlWrap("Build", Build); err != nil { - return fmt.Errorf("error building: %s", err) - } - } - - if *up { - if err := xmlWrap("TearDown", deploy.Down); err != nil { - return fmt.Errorf("error tearing down previous cluster: %s", err) - } - } - - var errs []error - - if *up { - // If we tried to bring the cluster up, make a courtesy - // attempt to bring it down so we're not leaving resources around. - // - // TODO: We should try calling deploy.Down exactly once. Though to - // stop the leaking resources for now, we want to be on the safe side - // and call it explicitly in defer if the other one is not called. - if *down { - defer xmlWrap("Deferred TearDown", deploy.Down) - } - // Start the cluster using this version. - if err := xmlWrap("Up", deploy.Up); err != nil { - return fmt.Errorf("starting e2e cluster: %s", err) - } - if *dump != "" { - cmd := exec.Command("./cluster/kubectl.sh", "--match-server-version=false", "get", "nodes", "-oyaml") - b, err := cmd.CombinedOutput() - if *verbose { - log.Printf("kubectl get nodes:\n%s", string(b)) - } - if err == nil { - if err := ioutil.WriteFile(filepath.Join(*dump, "nodes.yaml"), b, 0644); err != nil { - errs = appendError(errs, fmt.Errorf("error writing nodes.yaml: %v", err)) - } - } else { - errs = appendError(errs, fmt.Errorf("error running get nodes: %v", err)) - } - } - } - - if *test { - if err := xmlWrap("IsUp", deploy.IsUp); err != nil { - errs = appendError(errs, err) - } else { - errs = appendError(errs, Test()) - } - } - - if len(errs) > 0 && *dump != "" { - errs = appendError(errs, xmlWrap("DumpClusterLogs", func() error { - return DumpClusterLogs(*dump) - })) - } - - if *down { - errs = appendError(errs, xmlWrap("TearDown", deploy.Down)) - } - - if len(errs) != 0 { - return fmt.Errorf("encountered %d errors: %v", len(errs), errs) - } - return nil -} - -func Build() error { - // The build-release script needs stdin to ask the user whether - // it's OK to download the docker image. - cmd := exec.Command("make", "docker-build") - cmd.Stdin = os.Stdin - if err := finishRunning("build-release", cmd); err != nil { - return fmt.Errorf("error building: %v", err) - } - return nil -} - -type deployer interface { - Up() error - IsUp() error - SetupKubecfg() error - Down() error -} - -func getDeployer() (deployer, error) { - switch *deployment { - case "bash": - return bash{}, nil - default: - return nil, fmt.Errorf("unknown deployment strategy %q", *deployment) - } -} - -type bash struct{} - -func (b bash) Up() error { - return finishRunning("up", exec.Command("./hack/e2e-internal/e2e-up.sh")) -} - -func (b bash) IsUp() error { - return finishRunning("get status", exec.Command("./hack/e2e-internal/e2e-status.sh")) -} - -func (b bash) SetupKubecfg() error { - return nil -} - -func (b bash) Down() error { - return finishRunning("teardown", exec.Command("./hack/e2e-internal/e2e-down.sh")) -} - -func DumpClusterLogs(location string) error { - log.Printf("Dumping cluster logs to: %v", location) - return finishRunning("dump cluster logs", exec.Command("./hack/e2e-internal/log-dump.sh", location)) -} - -func Test() error { - if *testArgs == "" { - *testArgs = "--ginkgo.focus=\\[Feature:Ingress\\]" - } - return finishRunning("Ginkgo tests", exec.Command("./hack/e2e-internal/ginkgo-e2e.sh", strings.Fields(*testArgs)...)) -} - -func finishRunning(stepName string, cmd *exec.Cmd) error { - if *verbose { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - log.Printf("Running: %v", stepName) - defer func(start time.Time) { - log.Printf("Step '%s' finished in %s", stepName, time.Since(start)) - }(time.Now()) - - if err := cmd.Run(); err != nil { - return fmt.Errorf("error running %v: %v", stepName, err) - } - return nil -} diff --git a/controllers/nginx/pkg/cmd/controller/main.go b/pkg/cmd/controller/main.go similarity index 100% rename from controllers/nginx/pkg/cmd/controller/main.go rename to pkg/cmd/controller/main.go diff --git a/controllers/nginx/pkg/cmd/controller/metrics.go b/pkg/cmd/controller/metrics.go similarity index 100% rename from controllers/nginx/pkg/cmd/controller/metrics.go rename to pkg/cmd/controller/metrics.go diff --git a/controllers/nginx/pkg/cmd/controller/nginx.go b/pkg/cmd/controller/nginx.go similarity index 100% rename from controllers/nginx/pkg/cmd/controller/nginx.go rename to pkg/cmd/controller/nginx.go diff --git a/controllers/nginx/pkg/cmd/controller/nginx_test.go b/pkg/cmd/controller/nginx_test.go similarity index 100% rename from controllers/nginx/pkg/cmd/controller/nginx_test.go rename to pkg/cmd/controller/nginx_test.go diff --git a/controllers/nginx/pkg/cmd/controller/tcp.go b/pkg/cmd/controller/tcp.go similarity index 100% rename from controllers/nginx/pkg/cmd/controller/tcp.go rename to pkg/cmd/controller/tcp.go diff --git a/controllers/nginx/pkg/cmd/controller/utils.go b/pkg/cmd/controller/utils.go similarity index 100% rename from controllers/nginx/pkg/cmd/controller/utils.go rename to pkg/cmd/controller/utils.go diff --git a/controllers/nginx/pkg/cmd/controller/utils_test.go b/pkg/cmd/controller/utils_test.go similarity index 100% rename from controllers/nginx/pkg/cmd/controller/utils_test.go rename to pkg/cmd/controller/utils_test.go diff --git a/controllers/nginx/pkg/config/config.go b/pkg/config/config.go similarity index 100% rename from controllers/nginx/pkg/config/config.go rename to pkg/config/config.go diff --git a/controllers/nginx/pkg/config/config_test.go b/pkg/config/config_test.go similarity index 100% rename from controllers/nginx/pkg/config/config_test.go rename to pkg/config/config_test.go diff --git a/controllers/nginx/pkg/metric/collector/nginx.go b/pkg/metric/collector/nginx.go similarity index 100% rename from controllers/nginx/pkg/metric/collector/nginx.go rename to pkg/metric/collector/nginx.go diff --git a/controllers/nginx/pkg/metric/collector/process.go b/pkg/metric/collector/process.go similarity index 100% rename from controllers/nginx/pkg/metric/collector/process.go rename to pkg/metric/collector/process.go diff --git a/controllers/nginx/pkg/metric/collector/scrape.go b/pkg/metric/collector/scrape.go similarity index 100% rename from controllers/nginx/pkg/metric/collector/scrape.go rename to pkg/metric/collector/scrape.go diff --git a/controllers/nginx/pkg/metric/collector/status.go b/pkg/metric/collector/status.go similarity index 100% rename from controllers/nginx/pkg/metric/collector/status.go rename to pkg/metric/collector/status.go diff --git a/controllers/nginx/pkg/metric/collector/status_test.go b/pkg/metric/collector/status_test.go similarity index 100% rename from controllers/nginx/pkg/metric/collector/status_test.go rename to pkg/metric/collector/status_test.go diff --git a/controllers/nginx/pkg/metric/collector/vts.go b/pkg/metric/collector/vts.go similarity index 100% rename from controllers/nginx/pkg/metric/collector/vts.go rename to pkg/metric/collector/vts.go diff --git a/controllers/nginx/pkg/template/configmap.go b/pkg/template/configmap.go similarity index 100% rename from controllers/nginx/pkg/template/configmap.go rename to pkg/template/configmap.go diff --git a/controllers/nginx/pkg/template/configmap_test.go b/pkg/template/configmap_test.go similarity index 100% rename from controllers/nginx/pkg/template/configmap_test.go rename to pkg/template/configmap_test.go diff --git a/controllers/nginx/pkg/template/template.go b/pkg/template/template.go similarity index 100% rename from controllers/nginx/pkg/template/template.go rename to pkg/template/template.go diff --git a/controllers/nginx/pkg/template/template_test.go b/pkg/template/template_test.go similarity index 100% rename from controllers/nginx/pkg/template/template_test.go rename to pkg/template/template_test.go diff --git a/controllers/nginx/pkg/version/version.go b/pkg/version/version.go similarity index 100% rename from controllers/nginx/pkg/version/version.go rename to pkg/version/version.go diff --git a/controllers/nginx/rootfs/Dockerfile b/rootfs/Dockerfile similarity index 100% rename from controllers/nginx/rootfs/Dockerfile rename to rootfs/Dockerfile diff --git a/controllers/nginx/rootfs/etc/nginx/nginx.conf b/rootfs/etc/nginx/nginx.conf similarity index 100% rename from controllers/nginx/rootfs/etc/nginx/nginx.conf rename to rootfs/etc/nginx/nginx.conf diff --git a/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl b/rootfs/etc/nginx/template/nginx.tmpl similarity index 100% rename from controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl rename to rootfs/etc/nginx/template/nginx.tmpl diff --git a/controllers/nginx/rootfs/ingress-controller/clean-nginx-conf.sh b/rootfs/ingress-controller/clean-nginx-conf.sh similarity index 100% rename from controllers/nginx/rootfs/ingress-controller/clean-nginx-conf.sh rename to rootfs/ingress-controller/clean-nginx-conf.sh diff --git a/controllers/nginx/test/data/config.json b/tests/data/config.json similarity index 100% rename from controllers/nginx/test/data/config.json rename to tests/data/config.json