Merge branch 'master' into ingress-ssl-auth

This commit is contained in:
Ricardo Pchevuzinske Katz 2017-02-17 09:32:24 -02:00
commit a9ad2fe0b3
51 changed files with 2353 additions and 80 deletions

View file

@ -1,7 +1,7 @@
all: push
# 0.0 shouldn't clobber any released builds
TAG = 0.9.0
TAG = 0.9.1
PREFIX = gcr.io/google_containers/glbc
server:
@ -11,7 +11,7 @@ container: server
docker build --pull -t $(PREFIX):$(TAG) .
push: container
gcloud docker push $(PREFIX):$(TAG)
gcloud docker -- push $(PREFIX):$(TAG)
clean:
rm -f glbc

View file

@ -327,7 +327,7 @@ So simply delete the replication controller:
$ kubectl get rc glbc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE
glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m
l7-lb-controller gcr.io/google_containers/glbc:0.9.0
l7-lb-controller gcr.io/google_containers/glbc:0.9.1
$ kubectl delete rc glbc
replicationcontroller "glbc" deleted

View file

@ -20,6 +20,7 @@ import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"k8s.io/kubernetes/pkg/util/sets"
@ -32,6 +33,41 @@ import (
"k8s.io/ingress/controllers/gce/utils"
)
// BalancingMode represents the loadbalancing configuration of an individual
// Backend in a BackendService. This is *effectively* a cluster wide setting
// since you can't mix modes across Backends pointing to the same IG, and you
// can't have a single node in more than 1 loadbalanced IG.
type BalancingMode string
const (
// Rate balances incoming requests based on observed RPS.
// As of this writing, it's the only balancing mode supported by GCE's
// internal LB. This setting doesn't make sense for Kubernets clusters
// because requests can get proxied between instance groups in different
// zones by kube-proxy without GCE even knowing it. Setting equal RPS on
// all IGs should achieve roughly equal distribution of requests.
Rate BalancingMode = "RATE"
// Utilization balances incoming requests based on observed utilization.
// This mode is only useful if you want to divert traffic away from IGs
// running other compute intensive workloads. Utilization statistics are
// aggregated per instances, not per container, and requests can get proxied
// between instance groups in different zones by kube-proxy without GCE even
// knowing about it.
Utilization BalancingMode = "UTILIZATION"
// Connections balances incoming requests based on a connection counter.
// This setting currently doesn't make sense for Kubernetes clusters,
// because we use NodePort Services as HTTP LB backends, so GCE's connection
// counters don't accurately represent connections per container.
Connections BalancingMode = "CONNECTION"
)
// maxRPS is the RPS setting for all Backends with BalancingMode RATE. The exact
// value doesn't matter, as long as it's the same for all Backends. Requests
// received by GCLB above this RPS are NOT dropped, GCLB continues to distribute
// them across IGs.
// TODO: Should this be math.MaxInt64?
const maxRPS = 1
// Backends implements BackendPool.
type Backends struct {
cloud BackendServices
@ -116,20 +152,49 @@ func (b *Backends) create(igs []*compute.InstanceGroup, namedPort *compute.Named
if err != nil {
return nil, err
}
// Create a new backend
backend := &compute.BackendService{
Name: name,
Protocol: "HTTP",
Backends: getBackendsForIGs(igs),
// Api expects one, means little to kubernetes.
HealthChecks: []string{hc.SelfLink},
Port: namedPort.Port,
PortName: namedPort.Name,
errs := []string{}
// We first try to create the backend with balancingMode=RATE. If this
// fails, it's mostly likely because there are existing backends with
// balancingMode=UTILIZATION. This failure mode throws a googleapi error
// which wraps a HTTP 400 status code. We handle it in the loop below
// and come around to retry with the right balancing mode. The goal is to
// switch everyone to using RATE.
for _, bm := range []BalancingMode{Rate, Utilization} {
backends := getBackendsForIGs(igs)
for _, b := range backends {
switch bm {
case Rate:
b.MaxRate = maxRPS
default:
// TODO: Set utilization and connection limits when we accept them
// as valid fields.
}
b.BalancingMode = string(bm)
}
// Create a new backend
backend := &compute.BackendService{
Name: name,
Protocol: "HTTP",
Backends: backends,
HealthChecks: []string{hc.SelfLink},
Port: namedPort.Port,
PortName: namedPort.Name,
}
if err := b.cloud.CreateBackendService(backend); err != nil {
// This is probably a failure because we tried to create the backend
// with balancingMode=RATE when there are already backends with
// balancingMode=UTILIZATION. Just ignore it and retry setting
// balancingMode=UTILIZATION (b/35102911).
if utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
glog.Infof("Error creating backend service with balancing mode %v:%v", bm, err)
errs = append(errs, fmt.Sprintf("%v", err))
continue
}
return nil, err
}
return b.Get(namedPort.Port)
}
if err := b.cloud.CreateBackendService(backend); err != nil {
return nil, err
}
return b.Get(namedPort.Port)
return nil, fmt.Errorf("%v", strings.Join(errs, "\n"))
}
// Add will get or create a Backend for the given port.

View file

@ -17,6 +17,7 @@ limitations under the License.
package backends
import (
"net/http"
"testing"
compute "google.golang.org/api/compute/v1"
@ -25,10 +26,14 @@ import (
"k8s.io/ingress/controllers/gce/storage"
"k8s.io/ingress/controllers/gce/utils"
"k8s.io/kubernetes/pkg/util/sets"
"google.golang.org/api/googleapi"
)
const defaultZone = "zone-a"
var noOpErrFunc = func(op int, be *compute.BackendService) error { return nil }
func newBackendPool(f BackendServices, fakeIGs instances.InstanceGroups, syncWithCloud bool) BackendPool {
namer := &utils.Namer{}
nodePool := instances.NewNodePool(fakeIGs)
@ -40,7 +45,7 @@ func newBackendPool(f BackendServices, fakeIGs instances.InstanceGroups, syncWit
}
func TestBackendPoolAdd(t *testing.T) {
f := NewFakeBackendServices()
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool := newBackendPool(f, fakeIGs, false)
namer := utils.Namer{}
@ -110,7 +115,7 @@ func TestBackendPoolSync(t *testing.T) {
// Call sync on a backend pool with a list of ports, make sure the pool
// creates/deletes required ports.
svcNodePorts := []int64{81, 82, 83}
f := NewFakeBackendServices()
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool := newBackendPool(f, fakeIGs, true)
pool.Add(81)
@ -174,7 +179,7 @@ func TestBackendPoolSync(t *testing.T) {
}
func TestBackendPoolShutdown(t *testing.T) {
f := NewFakeBackendServices()
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool := newBackendPool(f, fakeIGs, false)
namer := utils.Namer{}
@ -187,7 +192,7 @@ func TestBackendPoolShutdown(t *testing.T) {
}
func TestBackendInstanceGroupClobbering(t *testing.T) {
f := NewFakeBackendServices()
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool := newBackendPool(f, fakeIGs, false)
namer := utils.Namer{}
@ -230,3 +235,40 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
t.Fatalf("Expected %v Got %v", expectedGroups, gotGroups)
}
}
func TestBackendCreateBalancingMode(t *testing.T) {
f := NewFakeBackendServices(noOpErrFunc)
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
pool := newBackendPool(f, fakeIGs, false)
namer := utils.Namer{}
nodePort := int64(8080)
modes := []BalancingMode{Rate, Utilization}
// block the creation of Backends with the given balancingMode
// and verify that a backend with the other balancingMode is
// created
for i, bm := range modes {
f.errFunc = func(op int, be *compute.BackendService) error {
for _, b := range be.Backends {
if b.BalancingMode == string(bm) {
return &googleapi.Error{Code: http.StatusBadRequest}
}
}
return nil
}
pool.Add(nodePort)
be, err := f.GetBackendService(namer.BeName(nodePort))
if err != nil {
t.Fatalf("%v", err)
}
for _, b := range be.Backends {
if b.BalancingMode != string(modes[(i+1)%len(modes)]) {
t.Fatalf("Wrong balancing mode, expected %v got %v", modes[(i+1)%len(modes)], b.BalancingMode)
}
}
pool.GC([]int64{})
}
}

View file

@ -25,8 +25,9 @@ import (
)
// NewFakeBackendServices creates a new fake backend services manager.
func NewFakeBackendServices() *FakeBackendServices {
func NewFakeBackendServices(ef func(op int, be *compute.BackendService) error) *FakeBackendServices {
return &FakeBackendServices{
errFunc: ef,
backendServices: cache.NewStore(func(obj interface{}) (string, error) {
svc := obj.(*compute.BackendService)
return svc.Name, nil
@ -38,6 +39,7 @@ func NewFakeBackendServices() *FakeBackendServices {
type FakeBackendServices struct {
backendServices cache.Store
calls []int
errFunc func(op int, be *compute.BackendService) error
}
// GetBackendService fakes getting a backend service from the cloud.
@ -60,6 +62,11 @@ func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendSe
// CreateBackendService fakes backend service creation.
func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) error {
if f.errFunc != nil {
if err := f.errFunc(utils.Create, be); err != nil {
return err
}
}
f.calls = append(f.calls, utils.Create)
be.SelfLink = be.Name
return f.backendServices.Update(be)

View file

@ -20,6 +20,7 @@ import (
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
compute "google.golang.org/api/compute/v1"
"k8s.io/ingress/controllers/gce/backends"
"k8s.io/ingress/controllers/gce/firewalls"
"k8s.io/ingress/controllers/gce/healthchecks"
@ -45,7 +46,7 @@ type fakeClusterManager struct {
// NewFakeClusterManager creates a new fake ClusterManager.
func NewFakeClusterManager(clusterName string) *fakeClusterManager {
fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName)
fakeBackends := backends.NewFakeBackendServices()
fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil })
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeHCs := healthchecks.NewFakeHealthChecks()
namer := utils.NewNamer(clusterName)

View file

@ -34,7 +34,7 @@ const (
)
func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T) LoadBalancerPool {
fakeBackends := backends.NewFakeBackendServices()
fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil })
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeHCs := healthchecks.NewFakeHealthChecks()
namer := &utils.Namer{}

View file

@ -61,7 +61,7 @@ const (
alphaNumericChar = "0"
// Current docker image version. Only used in debug logging.
imageVersion = "glbc:0.9.0"
imageVersion = "glbc:0.9.1"
// Key used to persist UIDs to configmaps.
uidConfigMapName = "ingress-uid"

View file

@ -24,18 +24,18 @@ metadata:
name: l7-lb-controller
labels:
k8s-app: glbc
version: v0.9.0
version: v0.9.1
spec:
# There should never be more than 1 controller alive simultaneously.
replicas: 1
selector:
k8s-app: glbc
version: v0.9.0
version: v0.9.1
template:
metadata:
labels:
k8s-app: glbc
version: v0.9.0
version: v0.9.1
name: glbc
spec:
terminationGracePeriodSeconds: 600
@ -61,7 +61,7 @@ spec:
requests:
cpu: 10m
memory: 20Mi
- image: gcr.io/google_containers/glbc:0.9.0
- image: gcr.io/google_containers/glbc:0.9.1
livenessProbe:
httpGet:
path: /healthz

View file

@ -24,7 +24,7 @@ container: build
docker build --pull -t $(PREFIX):$(RELEASE) rootfs
push: container
gcloud docker push $(PREFIX):$(RELEASE)
gcloud docker -- push $(PREFIX):$(RELEASE)
fmt:
@echo "+ $@"

View file

@ -1,2 +1,462 @@
# Nginx Ingress Controller
This is an nginx Ingress controller that uses [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/design/configmap.md) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works.
## Contents
* [Conventions](#conventions)
* [Requirements](#what-it-provides)
* [Dry running](#dry-running-the-ingress-controller)
* [Deployment](#deployment)
* [HTTP](#http)
* [HTTPS](#https)
* [Default SSL Certificate](#default-ssl-certificate)
* [HTTPS enforcement](#server-side-https-enforcement)
* [HSTS](#http-strict-transport-security)
* [Kube-Lego](#automated-certificate-management-with-kube-lego)
* [TCP Services](#exposing-tcp-services)
* [UDP Services](#exposing-udp-services)
* [Proxy Protocol](#proxy-protocol)
* [NGINX customization](configuration.md)
* [NGINX status page](#nginx-status-page)
* [Running multiple ingress controllers](#running-multiple-ingress-controllers)
* [Running on Cloudproviders](#running-on-cloudproviders)
* [Disabling NGINX ingress controller](#disabling-nginx-ingress-controller)
* [Log format](#log-format)
* [Local cluster](#local-cluster)
* [Debug & Troubleshooting](#troubleshooting)
* [Why endpoints and not services?](#why-endpoints-and-not-services)
* [Limitations](#limitations)
* [NGINX Notes](#nginx-notes)
## Conventions
Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). You can generate such a certificate with:
`openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}"`
and create the secret via `kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}`
## Requirements
- Default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server)
## Try running the Ingress controller
Before deploying the controller to production you might want to run it outside the cluster and observe it.
```console
$ make controller
$ mkdir /etc/nginx-ssl
$ ./nginx-ingress-controller --running-in-cluster=false --default-backend-service=kube-system/default-http-backend
```
## Deployment
First create a default backend:
```
$ kubectl create -f examples/default-backend.yaml
$ kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend
```
Loadbalancers are created via a ReplicationController or Daemonset:
```
$ kubectl create -f examples/default/rc-default.yaml
```
## HTTP
First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app](https://github.com/kubernetes/contrib/blob/master/ingress/echoheaders/echo-app.yaml) that just returns information about the http request as output
```
kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.4 --replicas=1 --port=8080
```
Now we expose the same application in two different services (so we can create different Ingress rules)
```
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y
```
Next we create a couple of Ingress rules
```
kubectl create -f examples/ingress.yaml
```
we check that ingress rules are defined:
```
$ kubectl get ing
NAME RULE BACKEND ADDRESS
echomap -
foo.bar.com
/foo echoheaders-x:80
bar.baz.com
/bar echoheaders-y:80
/foo echoheaders-x:80
```
Before the deploy of the Ingress controller we need a default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server)
```
kubectl create -f examples/default-backend.yaml
kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend
```
Check NGINX it is running with the defined Ingress rules:
```
$ LBIP=$(kubectl get node `kubectl get po -l name=nginx-ingress-lb --template '{{range .items}}{{.spec.nodeName}}{{end}}'` --template '{{range $i, $n := .status.addresses}}{{if eq $n.type "ExternalIP"}}{{$n.address}}{{end}}{{end}}')
$ curl $LBIP/foo -H 'Host: foo.bar.com'
```
## HTTPS
You can secure an Ingress by specifying a secret that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. This controller supports SNI. The TLS secret must contain keys named tls.crt and tls.key that contain the certificate and private key to use for TLS, eg:
```
apiVersion: v1
data:
tls.crt: base64 encoded cert
tls.key: base64 encoded key
kind: Secret
metadata:
name: foo-secret
namespace: default
type: kubernetes.io/tls
```
Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS:
```
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: no-rules-map
spec:
tls:
secretName: foo-secret
backend:
serviceName: s1
servicePort: 80
```
Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/examples/sni/nginx/test.sh) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate.
Check the [example](examples/tls/README.md)
### Default SSL Certificate
NGINX provides the option [server name](http://nginx.org/en/docs/http/server_names.html) as a catch-all in case of requests that do not match one of the configured server names. This configuration works without issues for HTTP traffic. In case of HTTPS NGINX requires a certificate. For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned case.
If this flag is not provided NGINX will use a self signed certificate.
Running without the flag `--default-ssl-certificate`:
```
$ curl -v https://10.2.78.7:443 -k
* Rebuilt URL to: https://10.2.78.7:443/
* Trying 10.2.78.4...
* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0)
* ALPN, offering http/1.1
* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
* successfully set certificate verify locations:
* CAfile: /etc/ssl/certs/ca-certificates.crt
CApath: /etc/ssl/certs
* TLSv1.2 (OUT), TLS header, Certificate Status (22):
* TLSv1.2 (OUT), TLS handshake, Client hello (1):
* TLSv1.2 (IN), TLS handshake, Server hello (2):
* TLSv1.2 (IN), TLS handshake, Certificate (11):
* TLSv1.2 (IN), TLS handshake, Server key exchange (12):
* TLSv1.2 (IN), TLS handshake, Server finished (14):
* TLSv1.2 (OUT), TLS handshake, Client key exchange (16):
* TLSv1.2 (OUT), TLS change cipher, Client hello (1):
* TLSv1.2 (OUT), TLS handshake, Finished (20):
* TLSv1.2 (IN), TLS change cipher, Client hello (1):
* TLSv1.2 (IN), TLS handshake, Finished (20):
* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256
* ALPN, server accepted to use http/1.1
* Server certificate:
* subject: CN=foo.bar.com
* start date: Apr 13 00:50:56 2016 GMT
* expire date: Apr 13 00:50:56 2017 GMT
* issuer: CN=foo.bar.com
* SSL certificate verify result: self signed certificate (18), continuing anyway.
> GET / HTTP/1.1
> Host: 10.2.78.7
> User-Agent: curl/7.47.1
> Accept: */*
>
< HTTP/1.1 404 Not Found
< Server: nginx/1.11.1
< Date: Thu, 21 Jul 2016 15:38:46 GMT
< Content-Type: text/html
< Transfer-Encoding: chunked
< Connection: keep-alive
< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload
<
<span>The page you're looking for could not be found.</span>
* Connection #0 to host 10.2.78.7 left intact
```
Specifying `--default-ssl-certificate=default/foo-tls`:
```
core@localhost ~ $ curl -v https://10.2.78.7:443 -k
* Rebuilt URL to: https://10.2.78.7:443/
* Trying 10.2.78.7...
* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0)
* ALPN, offering http/1.1
* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
* successfully set certificate verify locations:
* CAfile: /etc/ssl/certs/ca-certificates.crt
CApath: /etc/ssl/certs
* TLSv1.2 (OUT), TLS header, Certificate Status (22):
* TLSv1.2 (OUT), TLS handshake, Client hello (1):
* TLSv1.2 (IN), TLS handshake, Server hello (2):
* TLSv1.2 (IN), TLS handshake, Certificate (11):
* TLSv1.2 (IN), TLS handshake, Server key exchange (12):
* TLSv1.2 (IN), TLS handshake, Server finished (14):
* TLSv1.2 (OUT), TLS handshake, Client key exchange (16):
* TLSv1.2 (OUT), TLS change cipher, Client hello (1):
* TLSv1.2 (OUT), TLS handshake, Finished (20):
* TLSv1.2 (IN), TLS change cipher, Client hello (1):
* TLSv1.2 (IN), TLS handshake, Finished (20):
* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256
* ALPN, server accepted to use http/1.1
* Server certificate:
* subject: CN=foo.bar.com
* start date: Apr 13 00:50:56 2016 GMT
* expire date: Apr 13 00:50:56 2017 GMT
* issuer: CN=foo.bar.com
* SSL certificate verify result: self signed certificate (18), continuing anyway.
> GET / HTTP/1.1
> Host: 10.2.78.7
> User-Agent: curl/7.47.1
> Accept: */*
>
< HTTP/1.1 404 Not Found
< Server: nginx/1.11.1
< Date: Mon, 18 Jul 2016 21:02:59 GMT
< Content-Type: text/html
< Transfer-Encoding: chunked
< Connection: keep-alive
< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload
<
<span>The page you're looking for could not be found.</span>
* Connection #0 to host 10.2.78.7 left intact
```
### Server-side HTTPS enforcement
By default the controller redirects (301) to HTTPS if TLS is enabled for that ingress . If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the NGINX config map.
To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource.
### HTTP Strict Transport Security
HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS.
By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule.
To disable this behavior use `hsts=false` in the NGINX config map.
### Automated Certificate Management with Kube-Lego
[Kube-Lego] automatically requests missing certificates or expired from
[Let's Encrypt] by monitoring ingress resources and its referenced secrets. To
enable this for an ingress resource you have to add an annotation:
```
kubectl annotate ing ingress-demo kubernetes.io/tls-acme="true"
```
To setup Kube-Lego you can take a look at this [full example]. The first
version to fully support Kube-Lego is nginx Ingress controller 0.8.
[full example]:https://github.com/jetstack/kube-lego/tree/master/examples
[Kube-Lego]:https://github.com/jetstack/kube-lego
[Let's Encrypt]:https://letsencrypt.org
## Exposing TCP services
Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `<namespace/service name>:<service port>`
It is possible to use a number or the name of the port.
The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000`
```
apiVersion: v1
kind: ConfigMap
metadata:
name: tcp-configmap-example
data:
9000: "default/example-go:8080"
```
Please check the [tcp services](examples/tcp/README.md) example
## Exposing UDP services
Since 1.9.13 NGINX provides [UDP Load Balancing](https://www.nginx.com/blog/announcing-udp-load-balancing/).
Ingress does not support UDP services (yet). For this reason this Ingress controller uses the flag `--udp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `<namespace/service name>:<service port>`
It is possible to use a number or the name of the port.
The next example shows how to expose the service `kube-dns` running in the namespace `kube-system` in the port `53` using the port `53`
```
apiVersion: v1
kind: ConfigMap
metadata:
name: udp-configmap-example
data:
53: "kube-system/kube-dns:53"
```
Please check the [udp services](examples/udp/README.md) example
## Proxy Protocol
If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP addresses. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself.
Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol.
Please check the [proxy-protocol](examples/proxy-protocol/) example
### Custom errors
In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers:
- `X-Code` indicates the HTTP code
- `X-Format` the value of the `Accept` header
Using this two headers is possible to use a custom backend service like [this one](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) that inspect each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](examples/custom-errors/README.md)
### NGINX status page
The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`.
This controller provides an alternative to this module using [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) third party module.
To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 8080.
Please check the example `example/rc-default.yaml`
![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter")
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`
### Running multiple ingress controllers
If you're running multiple ingress controllers, or running on a cloudprovider that natively handles
ingress, you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses
that you would like this controller to claim. Not specifying the annotation will lead to multiple
ingress controllers claiming the same ingress. Specifying the wrong value will result in all ingress
controllers ignoring the ingress. Multiple ingress controllers running in the same cluster was not
supported in Kubernetes versions < 1.3.
### Running on Cloudproviders
If you're running this ingress controller on a cloudprovider, you should assume the provider also has a native
Ingress controller and specify the ingress.class annotation as indicated in this section.
In addition to this, you will need to add a firewall rule for each port this controller is listening on, i.e :80 and :443.
### Disabling NGINX ingress controller
Setting the annotation `kubernetes.io/ingress.class` to any value other than "nginx" or the empty string, will force the NGINX Ingress controller to ignore your Ingress. Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller.
### Log format
The default configuration uses a custom logging format to add additional information about upstreams
```
log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - '
'[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '
'$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status';
```
Sources:
- [upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables)
- [embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables)
Description:
- `$proxy_protocol_addr`: if PROXY protocol is enabled
- `$remote_addr`: if PROXY protocol is disabled (default)
- `$proxy_add_x_forwarded_for`: the `X-Forwarded-For` client request header field with the $remote_addr variable appended to it, separated by a comma
- `$remote_user`: user name supplied with the Basic authentication
- `$time_local`: local time in the Common Log Format
- `$request`: full original request line
- `$status`: response status
- `$body_bytes_sent`: number of bytes sent to a client, not counting the response header
- `$http_referer`: value of the Referer header
- `$http_user_agent`: value of User-Agent header
- `$request_length`: request length (including request line, header, and request body)
- `$request_time`: time elapsed since the first bytes were read from the client
- `$proxy_upstream_name`: name of the upstream. The format is `upstream-<namespace>-<service name>-<service port>`
- `$upstream_addr`: keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas
- `$upstream_response_length`: keeps the length of the response obtained from the upstream server
- `$upstream_response_time`: keeps time spent on receiving the response from the upstream server; the time is kept in seconds with millisecond resolution
- `$upstream_status`: keeps status code of the response obtained from the upstream server
### Local cluster
Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/running-locally.md) for more details.
Use of `hostNetwork: true` in the ingress controller is required to falls back at localhost:8080 for the apiserver if every other client creation check fails (eg: service account not present, kubeconfig doesn't exist, no master env vars...)
### Debug & Troubleshooting
Using the flag `--v=XX` it is possible to increase the level of logging.
In particular:
- `--v=2` shows details using `diff` about the changes in the configuration in nginx
```
I0316 12:24:37.581267 1 utils.go:148] NGINX configuration diff a//etc/nginx/nginx.conf b//etc/nginx/nginx.conf
I0316 12:24:37.581356 1 utils.go:149] --- /tmp/922554809 2016-03-16 12:24:37.000000000 +0000
+++ /tmp/079811012 2016-03-16 12:24:37.000000000 +0000
@@ -235,7 +235,6 @@
upstream default-echoheadersx {
least_conn;
- server 10.2.112.124:5000;
server 10.2.208.50:5000;
}
I0316 12:24:37.610073 1 command.go:69] change in configuration detected. Reloading...
```
- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format
- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html)
*These issues were encountered in past versions of Kubernetes:*
[1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md):
* make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container.
* To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504)
### Limitations
- Ingress rules for TLS require the definition of the field `host`
### Why endpoints and not services
The NGINX ingress controller does not uses [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT.
### NGINX notes
Since `gcr.io/google_containers/nginx-slim:0.8` NGINX contains the next patches:
- Dynamic TLS record size [nginx__dynamic_tls_records.patch](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/)
NGINX provides the parameter `ssl_buffer_size` to adjust the size of the buffer. Default value in NGINX is 16KB. The ingress controller changes the default to 4KB. This improves the [TLS Time To First Byte (TTTFB)](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) but the size is fixed. This patches adapts the size of the buffer to the content is being served helping to improve the perceived latency.
- Add SPDY support back to Nginx with HTTP/2 [nginx_1_9_15_http2_spdy.patch](https://github.com/cloudflare/sslconfig/pull/36)
At the same NGINX introduced HTTP/2 support for SPDY was removed. This patch add support for SPDY without compromising HTTP/2 support using the Application-Layer Protocol Negotiation (ALPN) or Next Protocol Negotiation (NPN) Transport Layer Security (TLS) extension to negotiate what protocol the server and client support
```
openssl s_client -servername www.my-site.com -connect www.my-site.com:443 -nextprotoneg ''
CONNECTED(00000003)
Protocols advertised by server: h2, spdy/3.1, http/1.1
```

View file

@ -43,6 +43,7 @@ The following annotations are supported:
|[ingress.kubernetes.io/auth-secret](#authentication)|string|
|[ingress.kubernetes.io/auth-type](#authentication)|basic or digest|
|[ingress.kubernetes.io/auth-url](#external-authentication)|string|
|[ingress.kubernetes.io/enable-cors](#enable-cors)|true or false|
|[ingress.kubernetes.io/limit-connections](#rate-limiting)|number|
|[ingress.kubernetes.io/limit-rps](#rate-limiting)|number|
|[ingress.kubernetes.io/rewrite-target](#rewrite)|URI|
@ -51,6 +52,9 @@ The following annotations are supported:
|[ingress.kubernetes.io/upstream-max-fails](#custom-nginx-upstream-checks)|number|
|[ingress.kubernetes.io/upstream-fail-timeout](#custom-nginx-upstream-checks)|number|
|[ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR|
|[ingress.kubernetes.io/affinity](#session-affinity)|true or false|
|[ingress.kubernetes.io/session-cookie-name](#cookie-affinity)|string|
|[ingress.kubernetes.io/session-cookie-hash](#cookie-affinity)|string|
@ -66,7 +70,7 @@ In addition to the built-in functions provided by the Go package the following f
- empty: returns true if the specified parameter (string) is empty
- contains: [strings.Contains](https://golang.org/pkg/strings/#Contains)
- hasPrefix: [strings.HasPrefix](https://golang.org/pkg/strings/#Contains)
- hasPrefix: [strings.HasPrefix](https://golang.org/pkg/strings/#HasPrefix)
- hasSuffix: [strings.HasSuffix](https://golang.org/pkg/strings/#HasSuffix)
- toUpper: [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper)
- toLower: [strings.ToLower](https://golang.org/pkg/strings/#ToLower)
@ -120,6 +124,10 @@ ingress.kubernetes.io/auth-realm: "realm string"
Please check the [auth](examples/auth/README.md) example.
### Enable CORS
To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule add the annotation `ingress.kubernetes.io/enable-cors: "true"`. This will add a section in the server location enabling this functionality.
For more information please check https://enable-cors.org/server_nginx.html
### External Authentication
@ -174,7 +182,22 @@ To configure this setting globally for all Ingress rules, the `whitelist-source-
*Note:* Adding an annotation to an Ingress rule overrides any global restriction.
Please check the [whitelist](examples/whitelist/README.md) example.
Please check the [whitelist](examples/affinity/cookie/nginx/README.md) example.
### Session Affinity
The annotation `ingress.kubernetes.io/affinity` enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server.
#### Cookie affinity
If you use the ``cookie`` type you can also specify the name of the cookie that will be used to route the requests with the annotation `ingress.kubernetes.io/session-cookie-name`. The default is to create a cookie named 'route'.
In case of NGINX the annotation `ingress.kubernetes.io/session-cookie-hash` defines which algorithm will be used to 'hash' the used upstream. Default value is `md5` and possible values are `md5`, `sha1` and `index`.
The `index` option is not hashed, an in-memory index is used instead, it's quicker and the overhead is shorter Warning: the matching against upstream servers list is inconsistent. So, at reload, if upstreams servers has changed, index values are not guaranted to correspond to the same server as before! USE IT WITH CAUTION and only if you need to!
In NGINX this feature is implemented by the third party module [nginx-sticky-module-ng](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng). The workflow used to define which upstream server will be used is explained [here]https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng/raw/08a395c66e425540982c00482f55034e1fee67b6/docs/sticky.pdf
### **Allowed parameters in configuration ConfigMap**
@ -188,6 +211,9 @@ Setting at least one code also enables [proxy_intercept_errors](http://nginx.org
Example usage: `custom-http-errors: 404,415`
**disable-access-log:** Disables the Access Log from the entire Ingress Controller. This is 'false' by default.
**enable-dynamic-tls-records:** Enables dynamically sized TLS records to improve time-to-first-byte. Enabled by default. See [CloudFlare's blog](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency) for more information.

View file

@ -30,6 +30,7 @@ import (
"github.com/golang/glog"
"github.com/mitchellh/mapstructure"
"github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/api"
@ -101,6 +102,8 @@ type NGINXController struct {
configmap *api.ConfigMap
storeLister ingress.StoreLister
binary string
}
@ -132,10 +135,10 @@ NGINX master process died (%v): %v
// we wait until the workers are killed
for {
conn, err := net.DialTimeout("tcp", "127.0.0.1:80", 1*time.Second)
if err == nil {
conn.Close()
if err != nil {
break
}
conn.Close()
time.Sleep(1 * time.Second)
}
// start a new nginx master process
@ -251,6 +254,11 @@ func (n NGINXController) Info() *ingress.BackendInfo {
}
}
// OverrideFlags customize NGINX controller flags
func (n NGINXController) OverrideFlags(flags *pflag.FlagSet) {
flags.Set("ingress-class", "nginx")
}
// testTemplate checks if the NGINX configuration inside the byte array is valid
// running the command "nginx -t" using a temporal file.
func (n NGINXController) testTemplate(cfg []byte) error {
@ -276,11 +284,16 @@ Error: %v
return nil
}
// SetConfig ...
// SetConfig sets the configured configmap
func (n *NGINXController) SetConfig(cmap *api.ConfigMap) {
n.configmap = cmap
}
// SetListers sets the configured store listers in the generic ingress controller
func (n *NGINXController) SetListers(lister ingress.StoreLister) {
n.storeLister = lister
}
// OnUpdate is called by syncQueue in https://github.com/aledbf/ingress-controller/blob/master/pkg/ingress/controller/controller.go#L82
// periodically to keep the configuration in sync.
//
@ -324,14 +337,27 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
// and we leave some room to avoid consuming all the FDs available
maxOpenFiles := (sysctlFSFileMax() / cfg.WorkerProcesses) - 1024
setHeaders := map[string]string{}
if cfg.ProxySetHeaders != "" {
cmap, exists, err := n.storeLister.ConfigMap.GetByKey(cfg.ProxySetHeaders)
if err != nil {
glog.Warningf("unexpected error reading configmap %v: %v", cfg.ProxySetHeaders, err)
}
if exists {
setHeaders = cmap.(*api.ConfigMap).Data
}
}
return n.t.Write(config.TemplateConfig{
ProxySetHeaders: setHeaders,
MaxOpenFiles: maxOpenFiles,
BacklogSize: sysctlSomaxconn(),
Backends: ingressCfg.Backends,
PassthroughBackends: ingressCfg.PassthroughBackends,
Servers: ingressCfg.Servers,
TCPBackends: ingressCfg.TCPEndpoints,
UDPBackends: ingressCfg.UPDEndpoints,
UDPBackends: ingressCfg.UDPEndpoints,
HealthzURI: ngxHealthPath,
CustomErrors: len(cfg.CustomHTTPErrors) > 0,
Cfg: cfg,

View file

@ -88,6 +88,10 @@ type Configuration struct {
// http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size
ClientHeaderBufferSize string `json:"client-header-buffer-size"`
// DisableAccessLog disables the Access Log globally from NGINX ingress controller
//http://nginx.org/en/docs/http/ngx_http_log_module.html
DisableAccessLog bool `json:"disable-access-log,omitempty"`
// EnableSPDY enables spdy and use ALPN and NPN to advertise the availability of the two protocols
// https://blog.cloudflare.com/open-sourcing-our-nginx-http-2-spdy-code
// By default this is enabled
@ -152,6 +156,9 @@ type Configuration struct {
// of your external load balancer
ProxyRealIPCIDR string `json:"proxy-real-ip-cidr,omitempty"`
// Sets the name of the configmap that contains the headers to pass to the backend
ProxySetHeaders string `json:"proxy-set-headers,omitempty"`
// Maximum size of the server names hash tables used in server names, map directives values,
// MIME types, names of request header strings, etcd.
// http://nginx.org/en/docs/hash.html
@ -233,6 +240,7 @@ type Configuration struct {
func NewDefault() Configuration {
cfg := Configuration{
ClientHeaderBufferSize: "1k",
DisableAccessLog: false,
EnableDynamicTLSRecords: true,
EnableSPDY: false,
ErrorLogLevel: errorLevel,
@ -283,6 +291,7 @@ func NewDefault() Configuration {
// TemplateConfig contains the nginx configuration to render the file nginx.conf
type TemplateConfig struct {
ProxySetHeaders map[string]string
MaxOpenFiles int
BacklogSize int
Backends []*ingress.Backend

View file

@ -39,12 +39,14 @@ func TestMergeConfigMapToStruct(t *testing.T) {
"proxy-send-timeout": "2",
"skip-access-log-urls": "/log,/demo,/test",
"use-proxy-protocol": "true",
"disable-access-log": "true",
"use-gzip": "true",
"enable-dynamic-tls-records": "false",
"gzip-types": "text/html",
}
def := config.NewDefault()
def.CustomHTTPErrors = []int{300, 400}
def.DisableAccessLog = true
def.SkipAccessLogURLs = []string{"/log", "/demo", "/test"}
def.ProxyReadTimeout = 1
def.ProxySendTimeout = 2

View file

@ -8,17 +8,17 @@ local get_upstreams = upstream.get_upstreams
local random = math.random
local us = get_upstreams()
function openURL(status)
function openURL(original_headers, status)
local httpc = http.new()
original_headers["X-Code"] = status or "404"
original_headers["X-Format"] = original_headers["Accept"] or "text/html"
local random_backend = get_destination()
local res, err = httpc:request_uri(random_backend, {
path = "/",
method = "GET",
headers = {
["X-Code"] = status or "404",
["X-Format"] = ngx.var.httpAccept or "html",
}
headers = original_headers,
})
if not res then
@ -26,8 +26,8 @@ function openURL(status)
ngx.exit(500)
end
if ngx.var.http_cookie then
ngx.header["Cookie"] = ngx.var.http_cookie
for k,v in pairs(res.headers) do
ngx.header[k] = v
end
ngx.status = tonumber(status)

View file

@ -1,4 +1,4 @@
{{ $cfg := .Cfg }}{{ $healthzURI := .HealthzURI }}{{ $backends := .Backends }}
{{ $cfg := .Cfg }}{{ $healthzURI := .HealthzURI }}{{ $backends := .Backends }}{{ $proxyHeaders := .ProxySetHeaders }}
daemon off;
worker_processes {{ $cfg.WorkerProcesses }};
@ -87,7 +87,11 @@ http {
default 1;
}
{{ if $cfg.DisableAccessLog }}
access_log off;
{{ else }}
access_log /var/log/nginx/access.log upstreaminfo if=$loggable;
{{ end }}
error_log /var/log/nginx/error.log {{ $cfg.ErrorLogLevel }};
{{ buildResolvers $cfg.Resolver }}
@ -181,8 +185,8 @@ http {
{{range $name, $upstream := $backends}}
upstream {{$upstream.Name}} {
{{ if $cfg.EnableStickySessions }}
sticky hash=sha1 httponly;
{{ if eq $upstream.SessionAffinity.AffinityType "cookie" }}
sticky hash={{$upstream.SessionAffinity.CookieSessionAffinity.Hash}} name={{$upstream.SessionAffinity.CookieSessionAffinity.Name}} httponly;
{{ else }}
least_conn;
{{ end }}
@ -313,6 +317,11 @@ http {
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# Custom headers
{{ range $k, $v := $proxyHeaders }}
proxy_set_header {{ $k }} "{{ $v }}";
{{ end }}
proxy_connect_timeout {{ $location.Proxy.ConnectTimeout }}s;
proxy_send_timeout {{ $location.Proxy.SendTimeout }}s;
proxy_read_timeout {{ $location.Proxy.ReadTimeout }}s;
@ -409,7 +418,7 @@ http {
location / {
{{ if .CustomErrors }}
content_by_lua_block {
openURL(503)
openURL(ngx.req.get_headers(0), 503)
}
{{ else }}
return 503;
@ -430,7 +439,12 @@ stream {
log_format log_stream '$remote_addr [$time_local] $protocol [$ssl_preread_server_name] [$stream_upstream] $status $bytes_sent $bytes_received $session_time';
{{ if $cfg.DisableAccessLog }}
access_log off;
{{ else }}
access_log /var/log/nginx/access.log log_stream;
{{ end }}
error_log /var/log/nginx/error.log;
# configure default backend for SSL
@ -474,7 +488,7 @@ stream {
location @custom_{{ $errCode }} {
internal;
content_by_lua_block {
openURL({{ $errCode }})
openURL(ngx.req.get_headers(0), {{ $errCode }})
}
}
{{ end }}

View file

@ -92,7 +92,11 @@ func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) {
return nil, ing_errors.NewLocationDenied("invalid url host")
}
m, _ := parser.GetStringAnnotation(authMethod, ing)
m, err := parser.GetStringAnnotation(authMethod, ing)
if err != nil {
return nil, err
}
if len(m) != 0 && !validMethod(m) {
return nil, ing_errors.NewLocationDenied("invalid HTTP method")
}

View file

@ -52,11 +52,7 @@ func NewParser(br resolver.DefaultBackend) parser.IngressAnnotation {
// ParseAnnotations parses the annotations contained in the ingress
// rule used to rewrite the defined paths
func (a rewrite) Parse(ing *extensions.Ingress) (interface{}, error) {
rt, err := parser.GetStringAnnotation(rewriteTo, ing)
if err != nil {
return nil, err
}
rt, _ := parser.GetStringAnnotation(rewriteTo, ing)
sslRe, err := parser.GetBoolAnnotation(sslRedirect, ing)
if err != nil {
sslRe = a.backendResolver.GetDefaultBackend().SSLRedirect

View file

@ -76,8 +76,8 @@ func (m mockBackend) GetDefaultBackend() defaults.Backend {
func TestWithoutAnnotations(t *testing.T) {
ing := buildIngress()
_, err := NewParser(mockBackend{}).Parse(ing)
if err == nil {
t.Error("Expected error with ingress without annotations")
if err != nil {
t.Errorf("unexpected error with ingress without annotations: %v", err)
}
}

View file

@ -0,0 +1,118 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sessionaffinity
import (
"regexp"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
)
const (
annotationAffinityType = "ingress.kubernetes.io/affinity"
// If a cookie with this name exists,
// its value is used as an index into the list of available backends.
annotationAffinityCookieName = "ingress.kubernetes.io/session-cookie-name"
defaultAffinityCookieName = "INGRESSCOOKIE"
// This is the algorithm used by nginx to generate a value for the session cookie, if
// one isn't supplied and affintiy is set to "cookie".
annotationAffinityCookieHash = "ingress.kubernetes.io/session-cookie-hash"
defaultAffinityCookieHash = "md5"
)
var (
affinityCookieHashRegex = regexp.MustCompile(`^(index|md5|sha1)$`)
)
// AffinityConfig describes the per ingress session affinity config
type AffinityConfig struct {
// The type of affinity that will be used
AffinityType string `json:"type"`
CookieConfig
}
// CookieConfig describes the Config of cookie type affinity
type CookieConfig struct {
// The name of the cookie that will be used in case of cookie affinity type.
Name string `json:"name"`
// The hash that will be used to encode the cookie in case of cookie affinity type
Hash string `json:"hash"`
}
// CookieAffinityParse gets the annotation values related to Cookie Affinity
// It also sets default values when no value or incorrect value is found
func CookieAffinityParse(ing *extensions.Ingress) *CookieConfig {
sn, err := parser.GetStringAnnotation(annotationAffinityCookieName, ing)
if err != nil || sn == "" {
glog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName)
sn = defaultAffinityCookieName
}
sh, err := parser.GetStringAnnotation(annotationAffinityCookieHash, ing)
if err != nil || !affinityCookieHashRegex.MatchString(sh) {
glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Setting it to default %v", ing.Name, annotationAffinityCookieHash, defaultAffinityCookieHash)
sh = defaultAffinityCookieHash
}
return &CookieConfig{
Name: sn,
Hash: sh,
}
}
// NewParser creates a new Affinity annotation parser
func NewParser() parser.IngressAnnotation {
return affinity{}
}
type affinity struct {
}
// ParseAnnotations parses the annotations contained in the ingress
// rule used to configure the affinity directives
func (a affinity) Parse(ing *extensions.Ingress) (interface{}, error) {
var cookieAffinityConfig *CookieConfig
cookieAffinityConfig = &CookieConfig{}
// Check the type of affinity that will be used
at, err := parser.GetStringAnnotation(annotationAffinityType, ing)
if err != nil {
at = ""
}
switch at {
case "cookie":
cookieAffinityConfig = CookieAffinityParse(ing)
default:
glog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name)
}
return &AffinityConfig{
AffinityType: at,
CookieConfig: *cookieAffinityConfig,
}, nil
}

View file

@ -0,0 +1,88 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sessionaffinity
import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/util/intstr"
)
func buildIngress() *extensions.Ingress {
defaultBackend := extensions.IngressBackend{
ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
}
return &extensions.Ingress{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,
},
Spec: extensions.IngressSpec{
Backend: &extensions.IngressBackend{
ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
},
Rules: []extensions.IngressRule{
{
Host: "foo.bar.com",
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
{
Path: "/foo",
Backend: defaultBackend,
},
},
},
},
},
},
},
}
}
func TestIngressAffinityCookieConfig(t *testing.T) {
ing := buildIngress()
data := map[string]string{}
data[annotationAffinityType] = "cookie"
data[annotationAffinityCookieHash] = "sha123"
data[annotationAffinityCookieName] = "INGRESSCOOKIE"
ing.SetAnnotations(data)
affin, _ := NewParser().Parse(ing)
nginxAffinity, ok := affin.(*AffinityConfig)
if !ok {
t.Errorf("expected a Config type")
}
if nginxAffinity.AffinityType != "cookie" {
t.Errorf("expected cookie as sticky-type but returned %v", nginxAffinity.AffinityType)
}
if nginxAffinity.CookieConfig.Hash != "md5" {
t.Errorf("expected md5 as sticky-hash but returned %v", nginxAffinity.CookieConfig.Hash)
}
if nginxAffinity.CookieConfig.Name != "INGRESSCOOKIE" {
t.Errorf("expected route as sticky-name but returned %v", nginxAffinity.CookieConfig.Name)
}
}

View file

@ -33,6 +33,7 @@ import (
"k8s.io/ingress/core/pkg/ingress/annotations/ratelimit"
"k8s.io/ingress/core/pkg/ingress/annotations/rewrite"
"k8s.io/ingress/core/pkg/ingress/annotations/secureupstream"
"k8s.io/ingress/core/pkg/ingress/annotations/sessionaffinity"
"k8s.io/ingress/core/pkg/ingress/annotations/sslpassthrough"
"k8s.io/ingress/core/pkg/ingress/errors"
"k8s.io/ingress/core/pkg/ingress/resolver"
@ -62,6 +63,7 @@ func newAnnotationExtractor(cfg extractorConfig) annotationExtractor {
"RateLimit": ratelimit.NewParser(),
"Redirect": rewrite.NewParser(cfg),
"SecureUpstream": secureupstream.NewParser(),
"SessionAffinity": sessionaffinity.NewParser(),
"SSLPassthrough": sslpassthrough.NewParser(),
},
}
@ -96,9 +98,10 @@ func (e *annotationExtractor) Extract(ing *extensions.Ingress) map[string]interf
}
const (
secureUpstream = "SecureUpstream"
healthCheck = "HealthCheck"
sslPassthrough = "SSLPassthrough"
secureUpstream = "SecureUpstream"
healthCheck = "HealthCheck"
sslPassthrough = "SSLPassthrough"
sessionAffinity = "SessionAffinity"
)
func (e *annotationExtractor) SecureUpstream(ing *extensions.Ingress) bool {
@ -115,3 +118,8 @@ func (e *annotationExtractor) SSLPassthrough(ing *extensions.Ingress) bool {
val, _ := e.annotations[sslPassthrough].Parse(ing)
return val.(bool)
}
func (e *annotationExtractor) SessionAffinity(ing *extensions.Ingress) *sessionaffinity.AffinityConfig {
val, _ := e.annotations[sessionAffinity].Parse(ing)
return val.(*sessionaffinity.AffinityConfig)
}

View file

@ -28,10 +28,13 @@ import (
)
const (
annotationSecureUpstream = "ingress.kubernetes.io/secure-backends"
annotationUpsMaxFails = "ingress.kubernetes.io/upstream-max-fails"
annotationUpsFailTimeout = "ingress.kubernetes.io/upstream-fail-timeout"
annotationPassthrough = "ingress.kubernetes.io/ssl-passthrough"
annotationSecureUpstream = "ingress.kubernetes.io/secure-backends"
annotationUpsMaxFails = "ingress.kubernetes.io/upstream-max-fails"
annotationUpsFailTimeout = "ingress.kubernetes.io/upstream-fail-timeout"
annotationPassthrough = "ingress.kubernetes.io/ssl-passthrough"
annotationAffinityType = "ingress.kubernetes.io/affinity"
annotationAffinityCookieName = "ingress.kubernetes.io/session-cookie-name"
annotationAffinityCookieHash = "ingress.kubernetes.io/session-cookie-hash"
)
type mockCfg struct {
@ -179,3 +182,39 @@ func TestSSLPassthrough(t *testing.T) {
}
}
}
func TestAffinitySession(t *testing.T) {
ec := newAnnotationExtractor(mockCfg{})
ing := buildIngress()
fooAnns := []struct {
annotations map[string]string
affinitytype string
hash string
name string
}{
{map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "md5", annotationAffinityCookieName: "route"}, "cookie", "md5", "route"},
{map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "xpto", annotationAffinityCookieName: "route1"}, "cookie", "md5", "route1"},
{map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "", annotationAffinityCookieName: ""}, "cookie", "md5", "INGRESSCOOKIE"},
{map[string]string{}, "", "", ""},
{nil, "", "", ""},
}
for _, foo := range fooAnns {
ing.SetAnnotations(foo.annotations)
r := ec.SessionAffinity(ing)
t.Logf("Testing pass %v %v %v", foo.affinitytype, foo.hash, foo.name)
if r == nil {
t.Errorf("Returned nil but expected a SessionAffinity.AffinityConfig")
continue
}
if r.CookieConfig.Hash != foo.hash {
t.Errorf("Returned %v but expected %v for Hash", r.CookieConfig.Hash, foo.hash)
}
if r.CookieConfig.Name != foo.name {
t.Errorf("Returned %v but expected %v for Name", r.CookieConfig.Name, foo.name)
}
}
}

View file

@ -76,11 +76,13 @@ type GenericController struct {
ingController *cache.Controller
endpController *cache.Controller
svcController *cache.Controller
nodeController *cache.Controller
secrController *cache.Controller
mapController *cache.Controller
ingLister cache_store.StoreToIngressLister
svcLister cache.StoreToServiceLister
nodeLister cache.StoreToNodeLister
endpLister cache.StoreToEndpointsLister
secrLister cache_store.StoreToSecretsLister
mapLister cache_store.StoreToConfigmapLister
@ -173,7 +175,7 @@ func newIngressController(config *Configuration) *GenericController {
DeleteFunc: func(obj interface{}) {
delIng := obj.(*extensions.Ingress)
if !IsValidClass(delIng, config.IngressClass) {
glog.Infof("ignoring add for ingress %v based on annotation %v", delIng.Name, ingressClassKey)
glog.Infof("ignoring delete for ingress %v based on annotation %v", delIng.Name, ingressClassKey)
return
}
ic.recorder.Eventf(delIng, api.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", delIng.Namespace, delIng.Name))
@ -182,7 +184,7 @@ func newIngressController(config *Configuration) *GenericController {
UpdateFunc: func(old, cur interface{}) {
oldIng := old.(*extensions.Ingress)
curIng := cur.(*extensions.Ingress)
if !IsValidClass(curIng, config.IngressClass) {
if !IsValidClass(curIng, config.IngressClass) && !IsValidClass(oldIng, config.IngressClass) {
return
}
@ -292,6 +294,10 @@ func newIngressController(config *Configuration) *GenericController {
cache.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
ic.nodeLister.Store, ic.nodeController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "nodes", ic.cfg.Namespace, fields.Everything()),
&api.Node{}, ic.cfg.ResyncPeriod, eventHandler)
if config.UpdateStatus {
ic.syncStatus = status.NewStatusSyncer(status.Config{
Client: config.Client,
@ -304,6 +310,15 @@ func newIngressController(config *Configuration) *GenericController {
ic.annotations = newAnnotationExtractor(ic)
ic.cfg.Backend.SetListers(ingress.StoreLister{
Ingress: ic.ingLister,
Service: ic.svcLister,
Node: ic.nodeLister,
Endpoint: ic.endpLister,
Secret: ic.secrLister,
ConfigMap: ic.mapLister,
})
return &ic
}
@ -330,7 +345,7 @@ func (ic GenericController) GetDefaultBackend() defaults.Backend {
return ic.cfg.Backend.BackendDefaults()
}
// GetSecret searchs for a secret in the local secrets Store
// GetSecret searches for a secret in the local secrets Store
func (ic GenericController) GetSecret(name string) (*api.Secret, error) {
s, exists, err := ic.secrLister.Store.GetByKey(name)
if err != nil {
@ -391,7 +406,7 @@ func (ic *GenericController) sync(key interface{}) error {
Backends: upstreams,
Servers: servers,
TCPEndpoints: ic.getStreamServices(ic.cfg.TCPConfigMapName, api.ProtocolTCP),
UPDEndpoints: ic.getStreamServices(ic.cfg.UDPConfigMapName, api.ProtocolUDP),
UDPEndpoints: ic.getStreamServices(ic.cfg.UDPConfigMapName, api.ProtocolUDP),
PassthroughBackends: passUpstreams,
})
if err != nil {
@ -564,6 +579,10 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress
for _, ingIf := range ings {
ing := ingIf.(*extensions.Ingress)
if !IsValidClass(ing, ic.cfg.IngressClass) {
continue
}
anns := ic.annotations.Extract(ing)
for _, rule := range ing.Spec.Rules {
@ -707,8 +726,13 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing
for _, ingIf := range data {
ing := ingIf.(*extensions.Ingress)
if !IsValidClass(ing, ic.cfg.IngressClass) {
continue
}
secUpstream := ic.annotations.SecureUpstream(ing)
hz := ic.annotations.HealthCheck(ing)
affinity := ic.annotations.SessionAffinity(ing)
var defBackend string
if ing.Spec.Backend != nil {
@ -748,6 +772,14 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing
if !upstreams[name].Secure {
upstreams[name].Secure = secUpstream
}
if upstreams[name].SessionAffinity.AffinityType == "" {
upstreams[name].SessionAffinity.AffinityType = affinity.AffinityType
if affinity.AffinityType == "cookie" {
upstreams[name].SessionAffinity.CookieSessionAffinity.Name = affinity.CookieConfig.Name
upstreams[name].SessionAffinity.CookieSessionAffinity.Hash = affinity.CookieConfig.Hash
}
}
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
endp, err := ic.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), hz)
if err != nil {
@ -849,6 +881,10 @@ func (ic *GenericController) createServers(data []interface{}, upstreams map[str
// initialize all the servers
for _, ingIf := range data {
ing := ingIf.(*extensions.Ingress)
if !IsValidClass(ing, ic.cfg.IngressClass) {
continue
}
// check if ssl passthrough is configured
sslpt := ic.annotations.SSLPassthrough(ing)
@ -877,6 +913,9 @@ func (ic *GenericController) createServers(data []interface{}, upstreams map[str
// configure default location and SSL
for _, ingIf := range data {
ing := ingIf.(*extensions.Ingress)
if !IsValidClass(ing, ic.cfg.IngressClass) {
continue
}
for _, rule := range ing.Spec.Rules {
host := rule.Host
@ -1025,6 +1064,7 @@ func (ic GenericController) Start() {
go ic.ingController.Run(ic.stopCh)
go ic.endpController.Run(ic.stopCh)
go ic.svcController.Run(ic.stopCh)
go ic.nodeController.Run(ic.stopCh)
go ic.secrController.Run(ic.stopCh)
go ic.mapController.Run(ic.stopCh)

View file

@ -84,6 +84,8 @@ func NewIngressController(backend ingress.Controller) *GenericController {
ingress controller should update the Ingress status IP/hostname. Default is true`)
)
backend.OverrideFlags(flags)
flags.AddGoFlagSet(flag.CommandLine)
flags.Parse(os.Args)

View file

@ -26,6 +26,7 @@ import (
"k8s.io/ingress/core/pkg/ingress"
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
"k8s.io/ingress/core/pkg/ingress/errors"
)
// DeniedKeyName name of the key that contains the reason to deny a location
@ -92,7 +93,10 @@ func IsValidClass(ing *extensions.Ingress, class string) bool {
return true
}
cc, _ := parser.GetStringAnnotation(ingressClassKey, ing)
cc, err := parser.GetStringAnnotation(ingressClassKey, ing)
if err != nil && !errors.IsMissingAnnotations(err) {
glog.Warningf("unexpected error reading ingress annotation: %v", err)
}
if cc == "" {
return true
}

View file

@ -19,6 +19,8 @@ package controller
import (
"testing"
"reflect"
"k8s.io/ingress/core/pkg/ingress"
"k8s.io/ingress/core/pkg/ingress/annotations/auth"
"k8s.io/ingress/core/pkg/ingress/annotations/authreq"
@ -29,7 +31,6 @@ import (
"k8s.io/ingress/core/pkg/ingress/resolver"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions"
"reflect"
)
type fakeError struct{}
@ -54,6 +55,7 @@ func TestIsValidClass(t *testing.T) {
data := map[string]string{}
data[ingressClassKey] = "custom"
ing.SetAnnotations(data)
b = IsValidClass(ing, "custom")
if !b {
t.Errorf("Expected valid class but %v returned", b)
@ -62,6 +64,10 @@ func TestIsValidClass(t *testing.T) {
if b {
t.Errorf("Expected invalid class but %v returned", b)
}
b = IsValidClass(ing, "")
if !b {
t.Errorf("Expected invalid class but %v returned", b)
}
}
func TestIsHostValid(t *testing.T) {

View file

@ -28,7 +28,7 @@ type DefaultBackend interface {
GetDefaultBackend() defaults.Backend
}
// Secret has a method that searchs for secrets contenating
// Secret has a method that searches for secrets contenating
// the namespace and name using a the character /
type Secret interface {
GetSecret(string) (*api.Secret, error)

View file

@ -0,0 +1,130 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"encoding/json"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
tc "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
)
func TestGetCurrentLeaderLeaderExist(t *testing.T) {
fkER := resourcelock.LeaderElectionRecord{
HolderIdentity: "currentLeader",
LeaseDurationSeconds: 30,
AcquireTime: unversioned.Now(),
RenewTime: unversioned.Now(),
LeaderTransitions: 3,
}
leaderInfo, _ := json.Marshal(fkER)
fkEndpoints := api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: "ingress-controller-test",
Namespace: api.NamespaceSystem,
Annotations: map[string]string{
resourcelock.LeaderElectionRecordAnnotationKey: string(leaderInfo),
},
},
}
fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}})
identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk)
if err != nil {
t.Fatalf("expected identitiy and endpoints but returned error %s", err)
}
if endpoints == nil {
t.Fatalf("returned nil but expected an endpoints")
}
if identity != "currentLeader" {
t.Fatalf("returned %v but expected %v", identity, "currentLeader")
}
}
func TestGetCurrentLeaderLeaderNotExist(t *testing.T) {
fkEndpoints := api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: "ingress-controller-test",
Namespace: api.NamespaceSystem,
Annotations: map[string]string{},
},
}
fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}})
identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk)
if err != nil {
t.Fatalf("unexpeted error: %v", err)
}
if endpoints == nil {
t.Fatalf("returned nil but expected an endpoints")
}
if identity != "" {
t.Fatalf("returned %s but expected %s", identity, "")
}
}
func TestGetCurrentLeaderAnnotationError(t *testing.T) {
fkEndpoints := api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: "ingress-controller-test",
Namespace: api.NamespaceSystem,
Annotations: map[string]string{
resourcelock.LeaderElectionRecordAnnotationKey: "just-test-error-leader-annotation",
},
},
}
fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}})
_, _, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk)
if err == nil {
t.Errorf("expected error")
}
}
func TestNewElection(t *testing.T) {
fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{
{
ObjectMeta: api.ObjectMeta{
Name: "ingress-controller-test",
Namespace: api.NamespaceSystem,
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "ingress-controller-test-020",
Namespace: api.NamespaceSystem,
},
},
}})
ne, err := NewElection("ingress-controller-test", "startLeader", api.NamespaceSystem, 4*time.Second, func(leader string) {
// do nothing
go t.Logf("execute callback fun, leader is: %s", leader)
}, fk)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if ne == nil {
t.Fatalf("unexpected nil")
}
}

View file

@ -33,7 +33,7 @@ import (
cache_store "k8s.io/ingress/core/pkg/cache"
"k8s.io/ingress/core/pkg/k8s"
strings "k8s.io/ingress/core/pkg/strings"
"k8s.io/ingress/core/pkg/strings"
"k8s.io/ingress/core/pkg/task"
)
@ -251,7 +251,7 @@ func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) {
return
}
curIPs := ing.Status.LoadBalancer.Ingress
curIPs := currIng.Status.LoadBalancer.Ingress
sort.Sort(loadBalancerIngressByIP(curIPs))
if ingressSliceEqual(newIPs, curIPs) {
glog.V(3).Infof("skipping update of Ingress %v/%v (there is no change)", currIng.Namespace, currIng.Name)

View file

@ -0,0 +1,487 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"os"
"sort"
"sync"
"testing"
"time"
cache_store "k8s.io/ingress/core/pkg/cache"
"k8s.io/ingress/core/pkg/k8s"
"k8s.io/ingress/core/pkg/task"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
testclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/util/sets"
)
func buildLoadBalancerIngressByIP() loadBalancerIngressByIP {
return []api.LoadBalancerIngress{
{
IP: "10.0.0.1",
Hostname: "foo1",
},
{
IP: "10.0.0.2",
Hostname: "foo2",
},
{
IP: "10.0.0.3",
Hostname: "",
},
{
IP: "",
Hostname: "foo4",
},
}
}
func buildSimpleClientSet() *testclient.Clientset {
return testclient.NewSimpleClientset(
&api.PodList{Items: []api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo1",
Namespace: api.NamespaceDefault,
Labels: map[string]string{
"lable_sig": "foo_pod",
},
},
Spec: api.PodSpec{
NodeName: "foo_node_2",
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "foo2",
Namespace: api.NamespaceDefault,
Labels: map[string]string{
"lable_sig": "foo_no",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "foo3",
Namespace: api.NamespaceSystem,
Labels: map[string]string{
"lable_sig": "foo_pod",
},
},
Spec: api.PodSpec{
NodeName: "foo_node_2",
},
},
}},
&api.ServiceList{Items: []api.Service{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: buildLoadBalancerIngressByIP(),
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "foo_non_exist",
Namespace: api.NamespaceDefault,
},
},
}},
&api.NodeList{Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{
Name: "foo_node_1",
},
Status: api.NodeStatus{
Addresses: []api.NodeAddress{
{
Type: api.NodeLegacyHostIP,
Address: "10.0.0.1",
}, {
Type: api.NodeExternalIP,
Address: "10.0.0.2",
},
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "foo_node_2",
},
Status: api.NodeStatus{
Addresses: []api.NodeAddress{
{
Type: api.NodeLegacyHostIP,
Address: "11.0.0.1",
},
{
Type: api.NodeExternalIP,
Address: "11.0.0.2",
},
},
},
},
}},
&api.EndpointsList{Items: []api.Endpoints{
{
ObjectMeta: api.ObjectMeta{
Name: "ingress-controller-leader",
Namespace: api.NamespaceDefault,
},
}}},
&extensions.IngressList{Items: buildExtensionsIngresses()},
)
}
func fakeSynFn(interface{}) error {
return nil
}
func buildExtensionsIngresses() []extensions.Ingress {
return []extensions.Ingress{
{
ObjectMeta: api.ObjectMeta{
Name: "foo_ingress_1",
Namespace: api.NamespaceDefault,
},
Status: extensions.IngressStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "10.0.0.1",
Hostname: "foo1",
},
},
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "foo_ingress_2",
Namespace: api.NamespaceDefault,
},
Status: extensions.IngressStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{},
},
},
},
}
}
func buildIngressLIstener() cache_store.StoreToIngressLister {
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
ids := sets.NewString("foo_ingress_non_01")
for id := range ids {
store.Add(&extensions.Ingress{
ObjectMeta: api.ObjectMeta{
Name: id,
Namespace: api.NamespaceDefault,
}})
}
store.Add(&extensions.Ingress{
ObjectMeta: api.ObjectMeta{
Name: "foo_ingress_1",
Namespace: api.NamespaceDefault,
},
Status: extensions.IngressStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: buildLoadBalancerIngressByIP(),
},
},
})
return cache_store.StoreToIngressLister{store}
}
func buildStatusSync() statusSync {
return statusSync{
pod: &k8s.PodInfo{
Name: "foo_base_pod",
Namespace: api.NamespaceDefault,
Labels: map[string]string{
"lable_sig": "foo_pod",
},
},
runLock: &sync.Mutex{},
syncQueue: task.NewTaskQueue(fakeSynFn),
Config: Config{
Client: buildSimpleClientSet(),
PublishService: api.NamespaceDefault + "/" + "foo",
IngressLister: buildIngressLIstener(),
},
}
}
func TestStatusActions(t *testing.T) {
// make sure election can be created
os.Setenv("POD_NAME", "foo1")
os.Setenv("POD_NAMESPACE", api.NamespaceDefault)
c := Config{
Client: buildSimpleClientSet(),
PublishService: "",
IngressLister: buildIngressLIstener(),
}
// create object
fkSync := NewStatusSyncer(c)
if fkSync == nil {
t.Fatalf("expected a valid Sync")
}
fk := fkSync.(statusSync)
ns := make(chan struct{})
// start it and wait for the election and syn actions
go fk.Run(ns)
// wait for the election
time.Sleep(100 * time.Millisecond)
// execute sync
fk.sync("just-test")
// PublishService is empty, so the running address is: ["11.0.0.2"]
// after updated, the ingress's ip should only be "11.0.0.2"
newIPs := []api.LoadBalancerIngress{{
IP: "11.0.0.2",
}}
fooIngress1, err1 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1")
if err1 != nil {
t.Fatalf("unexpected error")
}
fooIngress1CurIPs := fooIngress1.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress1CurIPs, newIPs) {
t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs)
}
// execute shutdown
fk.Shutdown()
// ingress should be empty
newIPs2 := []api.LoadBalancerIngress{}
fooIngress2, err2 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1")
if err2 != nil {
t.Fatalf("unexpected error")
}
fooIngress2CurIPs := fooIngress2.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress2CurIPs, newIPs2) {
t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, newIPs2)
}
// end test
ns <- struct{}{}
}
func TestCallback(t *testing.T) {
fk := buildStatusSync()
// do nothing
fk.callback("foo_base_pod")
}
func TestKeyfunc(t *testing.T) {
fk := buildStatusSync()
i := "foo_base_pod"
r, err := fk.keyfunc(i)
if err != nil {
t.Fatalf("unexpected error")
}
if r != i {
t.Errorf("returned %v but expected %v", r, i)
}
}
func TestRunningAddresessWithPublishService(t *testing.T) {
fk := buildStatusSync()
r, _ := fk.runningAddresess()
if r == nil {
t.Fatalf("returned nil but expected valid []string")
}
rl := len(r)
if len(r) != 4 {
t.Errorf("returned %v but expected %v", rl, 4)
}
}
func TestRunningAddresessWithPods(t *testing.T) {
fk := buildStatusSync()
fk.PublishService = ""
r, _ := fk.runningAddresess()
if r == nil {
t.Fatalf("returned nil but expected valid []string")
}
rl := len(r)
if len(r) != 1 {
t.Fatalf("returned %v but expected %v", rl, 1)
}
rv := r[0]
if rv != "11.0.0.2" {
t.Errorf("returned %v but expected %v", rv, "11.0.0.2")
}
}
func TestUpdateStatus(t *testing.T) {
fk := buildStatusSync()
newIPs := buildLoadBalancerIngressByIP()
sort.Sort(loadBalancerIngressByIP(newIPs))
fk.updateStatus(newIPs)
fooIngress1, err1 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1")
if err1 != nil {
t.Fatalf("unexpected error")
}
fooIngress1CurIPs := fooIngress1.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress1CurIPs, newIPs) {
t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs)
}
fooIngress2, err2 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_2")
if err2 != nil {
t.Fatalf("unexpected error")
}
fooIngress2CurIPs := fooIngress2.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress2CurIPs, []api.LoadBalancerIngress{}) {
t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, []api.LoadBalancerIngress{})
}
}
func TestSliceToStatus(t *testing.T) {
fkEndpoints := []string{
"10.0.0.1",
"2001:db8::68",
"opensource-k8s-ingress",
}
r := sliceToStatus(fkEndpoints)
if r == nil {
t.Fatalf("returned nil but expected a valid []api.LoadBalancerIngress")
}
rl := len(r)
if rl != 3 {
t.Fatalf("returned %v but expected %v", rl, 3)
}
re1 := r[0]
if re1.Hostname != "opensource-k8s-ingress" {
t.Fatalf("returned %v but expected %v", re1, api.LoadBalancerIngress{Hostname: "opensource-k8s-ingress"})
}
re2 := r[1]
if re2.IP != "10.0.0.1" {
t.Fatalf("returned %v but expected %v", re2, api.LoadBalancerIngress{IP: "10.0.0.1"})
}
re3 := r[2]
if re3.IP != "2001:db8::68" {
t.Fatalf("returned %v but expected %v", re3, api.LoadBalancerIngress{IP: "2001:db8::68"})
}
}
func TestIngressSliceEqual(t *testing.T) {
fk1 := buildLoadBalancerIngressByIP()
fk2 := append(buildLoadBalancerIngressByIP(), api.LoadBalancerIngress{
IP: "10.0.0.5",
Hostname: "foo5",
})
fk3 := buildLoadBalancerIngressByIP()
fk3[0].Hostname = "foo_no_01"
fk4 := buildLoadBalancerIngressByIP()
fk4[2].IP = "11.0.0.3"
fooTests := []struct {
lhs []api.LoadBalancerIngress
rhs []api.LoadBalancerIngress
er bool
}{
{fk1, fk1, true},
{fk2, fk1, false},
{fk3, fk1, false},
{fk4, fk1, false},
{fk1, nil, false},
{nil, nil, true},
{[]api.LoadBalancerIngress{}, []api.LoadBalancerIngress{}, true},
}
for _, fooTest := range fooTests {
r := ingressSliceEqual(fooTest.lhs, fooTest.rhs)
if r != fooTest.er {
t.Errorf("returned %v but expected %v", r, fooTest.er)
}
}
}
func TestLoadBalancerIngressByIPLen(t *testing.T) {
fooTests := []struct {
ips loadBalancerIngressByIP
el int
}{
{[]api.LoadBalancerIngress{}, 0},
{buildLoadBalancerIngressByIP(), 4},
{nil, 0},
}
for _, fooTest := range fooTests {
r := fooTest.ips.Len()
if r != fooTest.el {
t.Errorf("returned %v but expected %v ", r, fooTest.el)
}
}
}
func TestLoadBalancerIngressByIPSwap(t *testing.T) {
fooTests := []struct {
ips loadBalancerIngressByIP
i int
j int
}{
{buildLoadBalancerIngressByIP(), 0, 1},
{buildLoadBalancerIngressByIP(), 2, 1},
}
for _, fooTest := range fooTests {
fooi := fooTest.ips[fooTest.i]
fooj := fooTest.ips[fooTest.j]
fooTest.ips.Swap(fooTest.i, fooTest.j)
if fooi.IP != fooTest.ips[fooTest.j].IP ||
fooj.IP != fooTest.ips[fooTest.i].IP {
t.Errorf("failed to swap for loadBalancerIngressByIP")
}
}
}
func TestLoadBalancerIngressByIPLess(t *testing.T) {
fooTests := []struct {
ips loadBalancerIngressByIP
i int
j int
er bool
}{
{buildLoadBalancerIngressByIP(), 0, 1, true},
{buildLoadBalancerIngressByIP(), 2, 1, false},
}
for _, fooTest := range fooTests {
r := fooTest.ips.Less(fooTest.i, fooTest.j)
if r != fooTest.er {
t.Errorf("returned %v but expected %v ", r, fooTest.er)
}
}
}

View file

@ -17,9 +17,13 @@ limitations under the License.
package ingress
import (
"github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/healthz"
cache_store "k8s.io/ingress/core/pkg/cache"
"k8s.io/ingress/core/pkg/ingress/annotations/auth"
"k8s.io/ingress/core/pkg/ingress/annotations/authreq"
"k8s.io/ingress/core/pkg/ingress/annotations/ipwhitelist"
@ -81,11 +85,27 @@ type Controller interface {
OnUpdate(Configuration) ([]byte, error)
// ConfigMap content of --configmap
SetConfig(*api.ConfigMap)
// SetListers allows the access of store listers present in the generic controller
// This avoid the use of the kubernetes client.
SetListers(StoreLister)
// BackendDefaults returns the minimum settings required to configure the
// communication to endpoints
BackendDefaults() defaults.Backend
// Info returns information about the ingress controller
Info() *BackendInfo
// OverrideFlags allow the customization of the flags in the backend
OverrideFlags(*pflag.FlagSet)
}
// StoreLister returns the configured stores for ingresses, services,
// endpoints, secrets and configmaps.
type StoreLister struct {
Ingress cache_store.StoreToIngressLister
Service cache.StoreToServiceLister
Node cache.StoreToNodeLister
Endpoint cache.StoreToEndpointsLister
Secret cache_store.StoreToSecretsLister
ConfigMap cache_store.StoreToConfigmapLister
}
// BackendInfo returns information about the backend.
@ -113,9 +133,9 @@ type Configuration struct {
// TCPEndpoints contain endpoints for tcp streams handled by this backend
// +optional
TCPEndpoints []*Location `json:"tcpEndpoints,omitempty"`
// UPDEndpoints contain endpoints for udp streams handled by this backend
// UDPEndpoints contain endpoints for udp streams handled by this backend
// +optional
UPDEndpoints []*Location `json:"udpEndpoints,omitempty"`
UDPEndpoints []*Location `json:"udpEndpoints,omitempty"`
// PassthroughBackend contains the backends used for SSL passthrough.
// It contains information about the associated Server Name Indication (SNI).
// +optional
@ -134,9 +154,29 @@ type Backend struct {
Secure bool `json:"secure"`
// Endpoints contains the list of endpoints currently running
Endpoints []Endpoint `json:"endpoints"`
// StickySession contains the StickyConfig object with stickness configuration
SessionAffinity SessionAffinityConfig
}
// Endpoint describes a kubernetes endpoint in an backend
// SessionAffinityConfig describes different affinity configurations for new sessions.
// Once a session is mapped to a backend based on some affinity setting, it
// retains that mapping till the backend goes down, or the ingress controller
// restarts. Exactly one of these values will be set on the upstream, since multiple
// affinity values are incompatible. Once set, the backend makes no guarantees
// about honoring updates.
type SessionAffinityConfig struct {
AffinityType string `json:"name"`
CookieSessionAffinity CookieSessionAffinity
}
// CookieSessionAffinity defines the structure used in Affinity configured by Cookies.
type CookieSessionAffinity struct {
Name string `json:"name"`
Hash string `json:"hash"`
}
// Endpoint describes a kubernetes endpoint in a backend
type Endpoint struct {
// Address IP address of the endpoint
Address string `json:"address"`

View file

@ -1,3 +1,21 @@
# Ingress documentation and examples
This directory contains documentation.
## File naming convention
Try to create a README file in every directory containing documentation and index
out from there, that's what readers will notice first. Use lower case for other
file names unless you have a reason to draw someones attention to it.
Avoid CamelCase.
Rationale:
* Files that are common to all controllers, or heavily index other files, are
named using ALL CAPS. This is done to indicate to the user that they should
visit these files first. Examples include PREREQUISITES and README.
* Files specific to a controller, or files that contain information about
various controllers, are named using all lower case. Examples include
configuration and catalog files.

View file

@ -3,5 +3,4 @@
This is a non-comprehensive list of existing ingress controllers.
* [Dummy controller backend](/examples/custom-controller)
* [HAProxy Ingress controller](https://github.com/jcmoraisjr/haproxy-ingress)

View file

@ -81,6 +81,14 @@ You may want to consider [using the VM's docker
daemon](https://github.com/kubernetes/minikube/blob/master/README.md#reusing-the-docker-daemon)
when developing.
### CoreOS Kubernetes
[CoreOS Kubernetes](https://github.com/coreos/coreos-kubernetes/) repository has `Vagrantfile`
scripts to easily create a new Kubernetes cluster on VirtualBox, VMware or AWS.
Follow the CoreOS [doc](https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant-single.html)
for detailed instructions.
## Deploy the ingress controller
You can deploy an ingress controller on the cluster setup in the previous step

View file

@ -1,7 +1,7 @@
# Ingress examples
This directory contains a catalog of examples on how to run, configure and
scale Ingress. Please review the [prerequisities](prerequisites.md) before
scale Ingress. Please review the [prerequisities](PREREQUISITES.md) before
trying them.
## Basic cross platform
@ -75,4 +75,8 @@ Name | Description | Platform | Complexity Level
-----| ----------- | ---------- | ----------------
Dummy | A simple dummy controller that logs updates | * | Advanced
## Custommization
Name | Description | Platform | Complexity Level
-----| ----------- | ---------- | ----------------
custom-headers | set custom headers before send traffic to backends | nginx | Advanced

View file

@ -0,0 +1,77 @@
# Sticky Session
This example demonstrates how to achieve session affinity using cookies
## Prerequisites
You will need to make sure you Ingress targets exactly one Ingress
controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class),
and that you have an ingress controller [running](/examples/deployment) in your cluster.
You will also need to deploy multiple replicas of your application that show up as endpoints for the Service referenced in the Ingress object, to test session stickyness.
Using a deployment with only one replica doesn't set the 'sticky' cookie.
## Deployment
Session stickyness is achieved through 3 annotations on the Ingress, as shown in the [example](sticky-ingress.yaml).
|Name|Description|Values|
| --- | --- | --- |
|ingress.kubernetes.io/affinity|Sets the affinity type|string (in NGINX only ``cookie`` is possible|
|ingress.kubernetes.io/session-cookie-name|Name of the cookie that will be used|string (default to route)|
|ingress.kubernetes.io/session-cookie-hash|Type of hash that will be used in cookie value|sha1/md5/index|
You can create the ingress to test this
```console
$ kubectl create -f sticky-ingress.yaml
```
## Validation
You can confirm that the Ingress works.
```console
$ kubectl describe ing nginx-test
Name: nginx-test
Namespace: default
Address:
Default backend: default-http-backend:80 (10.180.0.4:8080,10.240.0.2:8080)
Rules:
Host Path Backends
---- ---- --------
stickyingress.example.com
/ nginx-service:80 (<none>)
Annotations:
affinity: cookie
session-cookie-hash: sha1
session-cookie-name: route
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
7s 7s 1 {nginx-ingress-controller } Normal CREATE default/nginx-test
$ curl -I http://stickyingress.example.com
HTTP/1.1 200 OK
Server: nginx/1.11.9
Date: Fri, 10 Feb 2017 14:11:12 GMT
Content-Type: text/html
Content-Length: 612
Connection: keep-alive
Set-Cookie: route=a9907b79b248140b56bb13723f72b67697baac3d; Path=/; HttpOnly
Last-Modified: Tue, 24 Jan 2017 14:02:19 GMT
ETag: "58875e6b-264"
Accept-Ranges: bytes
```
In the example above, you can see a line containing the 'Set-Cookie: route' setting the right defined stickness cookie.
This cookie is created by NGINX containing the hash of the used upstream in that request.
If the user changes this cookie, NGINX creates a new one and redirect the user to another upstream.
If the backend pool grows up NGINX will keep sending the requests through the same server of the first request, even if it's overloaded.
When the backend server is removed, the requests are then re-routed to another upstream server and NGINX creates a new cookie, as the previous hash became invalid.
When you have more than one Ingress Object pointing to the same Service, but one containing affinity configuration and other don't, the first created Ingress will be used.
This means that you can face the situation that you've configured Session Affinity in one Ingress and it doesn't reflects in NGINX configuration, because there is another Ingress Object pointing to the same service that doesn't configure this.

View file

@ -0,0 +1,19 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: nginx-test
annotations:
kubernetes.io/ingress.class: "nginx"
ingress.kubernetes.io/affinity: "cookie"
ingress.kubernetes.io/session-cookie-name: "route"
ingress.kubernetes.io/session-cookie-hash: "sha1"
spec:
rules:
- host: stickyingress.example.com
http:
paths:
- backend:
serviceName: nginx-service
servicePort: 80
path: /

View file

@ -32,7 +32,7 @@ container: server
docker build --pull -t $(PREFIX)-$(ARCH):$(TAG) .
push: container
gcloud docker push $(PREFIX)-$(ARCH):$(TAG)
gcloud docker -- push $(PREFIX)-$(ARCH):$(TAG)
clean:
rm -f server

View file

@ -0,0 +1,76 @@
# Deploying the Nginx Ingress controller
This example aims to demonstrate the deployment of an nginx ingress controller and
use a ConfigMap to configure a custom list of headers to be passed to the upstream
server
## Default Backend
The default backend is a Service capable of handling all url paths and hosts the
nginx controller doesn't understand. This most basic implementation just returns
a 404 page:
```console
$ kubectl apply -f default-backend.yaml
deployment "default-http-backend" created
service "default-http-backend" created
$ kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
default-http-backend-2657704409-qgwdd 1/1 Running 0 28s
```
## Custom configuration
```console
$ cat nginx-load-balancer-conf.yaml
apiVersion: v1
data:
proxy-set-headers: "default/custom-headers"
kind: ConfigMap
metadata:
name: nginx-load-balancer-conf
```
```console
$ kubectl create -f nginx-load-balancer-conf.yaml
```
## Custom headers
```console
$ cat custom-headers.yaml
apiVersion: v1
data:
X-Different-Name: "true"
X-Request-Start: t=${msec}
X-Using-Nginx-Controller: "true"
kind: ConfigMap
metadata:
name: proxy-headers
namespace: default
```
```console
$ kubectl create -f custom-headers.yaml
```
## Controller
You can deploy the controller as follows:
```console
$ kubectl apply -f nginx-ingress-controller.yaml
deployment "nginx-ingress-controller" created
$ kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
default-http-backend-2657704409-qgwdd 1/1 Running 0 2m
nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s
```
## Test
Check the contents of the configmap is present in the nginx.conf file using:
`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf`

View file

@ -0,0 +1,9 @@
apiVersion: v1
data:
X-Different-Name: "true"
X-Request-Start: t=${msec}
X-Using-Nginx-Controller: "true"
kind: ConfigMap
metadata:
name: proxy-headers
namespace: kube-system

View file

@ -0,0 +1,51 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
k8s-app: default-http-backend
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend

View file

@ -0,0 +1,53 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
terminationGracePeriodSeconds: 60
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf

View file

@ -0,0 +1,7 @@
apiVersion: v1
data:
proxy-set-headers: "kube-system/custom-headers"
kind: ConfigMap
metadata:
name: nginx-load-balancer-conf
namespace: kube-system

View file

@ -0,0 +1,151 @@
# Deploying HAProxy Ingress Controller
If you don't have a Kubernetes cluster, please refer to [setup](/docs/dev/setup.md)
for instructions on how to create a new one.
## Prerequisites
This ingress controller doesn't yet have support for
[ingress classes](/examples/PREREQUISITES.md#ingress-class). You MUST turn
down any existing ingress controllers before running HAProxy Ingress controller or
they will fight for Ingresses. This includes any cloudprovider controller.
This document has also the following prerequisites:
* Deploy a [web app](/examples/PREREQUISITES.md#test-http-service) for testing
* Create a [TLS secret](/examples/PREREQUISITES.md#tls-certificates) named `tls-secret` to be used as default TLS certificate
The web app can be created as follow:
```console
$ kubectl run http-svc \
--image=gcr.io/google_containers/echoserver:1.3 \
--port=8080 \
--replicas=2 \
--expose
```
Creating the TLS secret:
```console
$ openssl req \
-x509 -newkey rsa:2048 -nodes -days 365 \
-keyout tls.key -out tls.crt -subj '/CN=localhost'
$ kubectl create secret tls tls-secret --cert=tls.crt --key=tls.key
$ rm -v tls.crt tls.key
```
## Default backend
Deploy a default backend used to serve `404 Not Found` pages:
```console
$ kubectl run ingress-default-backend \
--image=gcr.io/google_containers/defaultbackend:1.0 \
--port=8080 \
--limits=cpu=10m,memory=20Mi \
--expose
```
Check if the default backend is up and running:
```console
$ kubectl get pod
NAME READY STATUS RESTARTS AGE
ingress-default-backend-1110790216-gqr61 1/1 Running 0 10s
```
## Controller
Deploy HAProxy Ingress:
```console
$ kubectl create -f haproxy-ingress.yaml
```
Check if the controller was successfully deployed:
```console
$ kubectl get pod -w
NAME READY STATUS RESTARTS AGE
haproxy-ingress-2556761959-tv20k 1/1 Running 0 12s
ingress-default-backend-1110790216-gqr61 1/1 Running 0 3m
^C
```
Deploy the ingress resource of our already deployed web app:
```console
$ kubectl create -f - <<EOF
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: app
spec:
rules:
- host: foo.bar
http:
paths:
- path: /
backend:
serviceName: http-svc
servicePort: 8080
EOF
```
Exposing the controller as a `type=NodePort` service:
```console
$ kubectl expose deploy/haproxy-ingress --type=NodePort
$ kubectl get svc/haproxy-ingress -oyaml
```
Look for `nodePort` field next to `port: 80`.
Change below `172.17.4.99` to the host's IP and `30876` to the `nodePort`:
```console
$ curl -i 172.17.4.99:30876
HTTP/1.1 404 Not Found
Date: Mon, 05 Feb 2017 22:59:36 GMT
Content-Length: 21
Content-Type: text/plain; charset=utf-8
default backend - 404
```
Using default backend because host was not found.
Now try to send a header:
```console
$ curl -i 172.17.4.99:30876 -H 'Host: foo.bar'
HTTP/1.1 200 OK
Server: nginx/1.9.11
Date: Mon, 05 Feb 2017 23:00:33 GMT
Content-Type: text/plain
Transfer-Encoding: chunked
CLIENT VALUES:
client_address=10.2.18.5
command=GET
real path=/
query=nil
request_version=1.1
request_uri=http://foo.bar:8080/
...
```
## Troubleshooting
If you have any problem, check logs and events of HAProxy Ingress POD:
```console
$ kubectl get pod
NAME READY STATUS RESTARTS AGE
haproxy-ingress-2556761959-tv20k 1/1 Running 0 9m
...
$ kubectl logs haproxy-ingress-2556761959-tv20k
$ kubectl describe pod/haproxy-ingress-2556761959-tv20k
```

View file

@ -0,0 +1,38 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
run: haproxy-ingress
name: haproxy-ingress
spec:
replicas: 1
selector:
matchLabels:
run: haproxy-ingress
template:
metadata:
labels:
run: haproxy-ingress
spec:
containers:
- name: haproxy-ingress
image: quay.io/jcmoraisjr/haproxy-ingress
args:
- --default-backend-service=default/ingress-default-backend
- --default-ssl-certificate=default/tls-secret
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
- name: stat
containerPort: 1936
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace

View file

@ -0,0 +1,116 @@
# HAProxy Ingress TLS termination
## Prerequisites
This document has the following prerequisites:
* Deploy [HAProxy Ingress controller](/examples/deployment/haproxy), you should end up with controller, a sample web app and default TLS secret
* Create [*another* secret](/examples/PREREQUISITES.md#tls-certificates) named `foobar-ssl` and subject `'/CN=foo.bar'`
As mentioned in the deployment instructions, you MUST turn down any existing
ingress controllers before running HAProxy Ingress.
## Using default TLS certificate
Update ingress resource in order to add TLS termination to host `foo.bar`:
```console
$ kubectl replace -f ingress-tls-default.yaml
```
The difference from the starting ingress resource:
```console
metadata:
name: app
spec:
+ tls:
+ - hosts:
+ - foo.bar
rules:
- host: foo.bar
http:
```
Trying default backend:
```console
$ curl -iL 172.17.4.99:30876
HTTP/1.1 404 Not Found
Date: Tue, 07 Feb 2017 00:06:07 GMT
Content-Length: 21
Content-Type: text/plain; charset=utf-8
default backend - 404
```
Now telling the controller we are `foo.bar`:
```console
$ curl -iL 172.17.4.99:30876 -H 'Host: foo.bar'
HTTP/1.1 302 Found
Cache-Control: no-cache
Content-length: 0
Location: https://foo.bar/
Connection: close
^C
```
Note the `Location` header - this would redirect us to the correct server.
Checking the default certificate - change below `31692` to the TLS port:
```console
$ openssl s_client -connect 172.17.4.99:31692
...
subject=/CN=localhost
issuer=/CN=localhost
---
```
... and `foo.bar` certificate:
```console
$ openssl s_client -connect 172.17.4.99:31692 -servername foo.bar
...
subject=/CN=localhost
issuer=/CN=localhost
---
```
## Using a new TLS certificate
Now let's reference the new certificate to our domain. Note that secret
`foobar-ssl` should be created as described in the [prerequisites](#prerequisites)
```console
$ kubectl replace -f ingress-tls-foobar.yaml
```
Here is the difference:
```console
tls:
- hosts:
- foo.bar
+ secretName: foobar-ssl
rules:
- host: foo.bar
http:
```
Now `foo.bar` certificate should be used to terminate TLS:
```console
$ openssl s_client -connect 172.17.4.99:31692
...
subject=/CN=localhost
issuer=/CN=localhost
---
$ openssl s_client -connect 172.17.4.99:31692 -servername foo.bar
...
subject=/CN=foo.bar
issuer=/CN=foo.bar
---
```

View file

@ -0,0 +1,16 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: app
spec:
tls:
- hosts:
- foo.bar
rules:
- host: foo.bar
http:
paths:
- path: /
backend:
serviceName: http-svc
servicePort: 8080

View file

@ -0,0 +1,17 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: app
spec:
tls:
- hosts:
- foo.bar
secretName: foobar-ssl
rules:
- host: foo.bar
http:
paths:
- path: /
backend:
serviceName: http-svc
servicePort: 8080

View file

@ -8,7 +8,7 @@ container:
docker build --pull -t $(PREFIX):$(TAG) .
push: container
gcloud docker push $(PREFIX):$(TAG)
gcloud docker -- push $(PREFIX):$(TAG)
clean:
docker rmi -f $(PREFIX):$(TAG) || true