Merge branch 'upstream' into nginx/extauth_headers
This commit is contained in:
commit
4c2b2512f5
13 changed files with 370 additions and 48 deletions
|
@ -66,7 +66,7 @@ In addition to the built-in functions provided by the Go package the following f
|
|||
|
||||
- empty: returns true if the specified parameter (string) is empty
|
||||
- contains: [strings.Contains](https://golang.org/pkg/strings/#Contains)
|
||||
- hasPrefix: [strings.HasPrefix](https://golang.org/pkg/strings/#Contains)
|
||||
- hasPrefix: [strings.HasPrefix](https://golang.org/pkg/strings/#HasPrefix)
|
||||
- hasSuffix: [strings.HasSuffix](https://golang.org/pkg/strings/#HasSuffix)
|
||||
- toUpper: [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper)
|
||||
- toLower: [strings.ToLower](https://golang.org/pkg/strings/#ToLower)
|
||||
|
|
|
@ -132,10 +132,10 @@ NGINX master process died (%v): %v
|
|||
// we wait until the workers are killed
|
||||
for {
|
||||
conn, err := net.DialTimeout("tcp", "127.0.0.1:80", 1*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
// start a new nginx master process
|
||||
|
@ -331,7 +331,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
|
|||
PassthroughBackends: ingressCfg.PassthroughBackends,
|
||||
Servers: ingressCfg.Servers,
|
||||
TCPBackends: ingressCfg.TCPEndpoints,
|
||||
UDPBackends: ingressCfg.UPDEndpoints,
|
||||
UDPBackends: ingressCfg.UDPEndpoints,
|
||||
HealthzURI: ngxHealthPath,
|
||||
CustomErrors: len(cfg.CustomHTTPErrors) > 0,
|
||||
Cfg: cfg,
|
||||
|
|
|
@ -203,7 +203,8 @@ http {
|
|||
server_name {{ $server.Hostname }};
|
||||
listen [::]:80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $index 0 }} ipv6only=off{{end}}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}};
|
||||
{{/* Listen on 442 because port 443 is used in the stream section */}}
|
||||
{{ if not (empty $server.SSLCertificate) }}listen 442 {{ if $cfg.UseProxyProtocol }}proxy_protocol{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
||||
{{/* This listen cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}}
|
||||
{{ if not (empty $server.SSLCertificate) }}listen 442 {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
||||
{{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}}
|
||||
# PEM sha: {{ $server.SSLPemChecksum }}
|
||||
ssl_certificate {{ $server.SSLCertificate }};
|
||||
|
@ -437,8 +438,7 @@ stream {
|
|||
{{ buildSSPassthroughUpstreams $backends .PassthroughBackends }}
|
||||
|
||||
server {
|
||||
listen [::]:443 ipv6only=off;
|
||||
{{ if $cfg.UseProxyProtocol }}proxy_protocol on;{{ end }}
|
||||
listen [::]:443 ipv6only=off{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
|
||||
proxy_pass $stream_upstream;
|
||||
ssl_preread on;
|
||||
}
|
||||
|
|
|
@ -330,7 +330,7 @@ func (ic GenericController) GetDefaultBackend() defaults.Backend {
|
|||
return ic.cfg.Backend.BackendDefaults()
|
||||
}
|
||||
|
||||
// GetSecret searchs for a secret in the local secrets Store
|
||||
// GetSecret searches for a secret in the local secrets Store
|
||||
func (ic GenericController) GetSecret(name string) (*api.Secret, error) {
|
||||
s, exists, err := ic.secrLister.Store.GetByKey(name)
|
||||
if err != nil {
|
||||
|
@ -390,8 +390,8 @@ func (ic *GenericController) sync(key interface{}) error {
|
|||
data, err := ic.cfg.Backend.OnUpdate(ingress.Configuration{
|
||||
Backends: upstreams,
|
||||
Servers: servers,
|
||||
TCPEndpoints: ic.getTCPServices(),
|
||||
UPDEndpoints: ic.getUDPServices(),
|
||||
TCPEndpoints: ic.getStreamServices(ic.cfg.TCPConfigMapName, api.ProtocolTCP),
|
||||
UDPEndpoints: ic.getStreamServices(ic.cfg.UDPConfigMapName, api.ProtocolUDP),
|
||||
PassthroughBackends: passUpstreams,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -411,54 +411,31 @@ func (ic *GenericController) sync(key interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ic *GenericController) getTCPServices() []*ingress.Location {
|
||||
if ic.cfg.TCPConfigMapName == "" {
|
||||
// no configmap for TCP services
|
||||
func (ic *GenericController) getStreamServices(configmapName string, proto api.Protocol) []*ingress.Location {
|
||||
if configmapName == "" {
|
||||
// no configmap configured
|
||||
return []*ingress.Location{}
|
||||
}
|
||||
|
||||
ns, name, err := k8s.ParseNameNS(ic.cfg.TCPConfigMapName)
|
||||
ns, name, err := k8s.ParseNameNS(configmapName)
|
||||
if err != nil {
|
||||
glog.Warningf("%v", err)
|
||||
glog.Errorf("unexpected error reading configmap %v: %v", name, err)
|
||||
return []*ingress.Location{}
|
||||
}
|
||||
tcpMap, err := ic.getConfigMap(ns, name)
|
||||
|
||||
configmap, err := ic.getConfigMap(ns, name)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("no configured tcp services found: %v", err)
|
||||
glog.Errorf("unexpected error reading configmap %v: %v", name, err)
|
||||
return []*ingress.Location{}
|
||||
}
|
||||
|
||||
return ic.getStreamServices(tcpMap.Data, api.ProtocolTCP)
|
||||
}
|
||||
|
||||
func (ic *GenericController) getUDPServices() []*ingress.Location {
|
||||
if ic.cfg.UDPConfigMapName == "" {
|
||||
// no configmap for TCP services
|
||||
return []*ingress.Location{}
|
||||
}
|
||||
|
||||
ns, name, err := k8s.ParseNameNS(ic.cfg.UDPConfigMapName)
|
||||
if err != nil {
|
||||
glog.Warningf("%v", err)
|
||||
return []*ingress.Location{}
|
||||
}
|
||||
tcpMap, err := ic.getConfigMap(ns, name)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("no configured tcp services found: %v", err)
|
||||
return []*ingress.Location{}
|
||||
}
|
||||
|
||||
return ic.getStreamServices(tcpMap.Data, api.ProtocolUDP)
|
||||
}
|
||||
|
||||
func (ic *GenericController) getStreamServices(data map[string]string, proto api.Protocol) []*ingress.Location {
|
||||
var svcs []*ingress.Location
|
||||
// k -> port to expose
|
||||
// v -> <namespace>/<service name>:<port from service to be used>
|
||||
for k, v := range data {
|
||||
for k, v := range configmap.Data {
|
||||
_, err := strconv.Atoi(k)
|
||||
if err != nil {
|
||||
glog.Warningf("%v is not valid as a TCP port", k)
|
||||
glog.Warningf("%v is not valid as a TCP/UDP port", k)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ type DefaultBackend interface {
|
|||
GetDefaultBackend() defaults.Backend
|
||||
}
|
||||
|
||||
// Secret has a method that searchs for secrets contenating
|
||||
// Secret has a method that searches for secrets contenating
|
||||
// the namespace and name using a the character /
|
||||
type Secret interface {
|
||||
GetSecret(string) (*api.Secret, error)
|
||||
|
|
|
@ -113,9 +113,9 @@ type Configuration struct {
|
|||
// TCPEndpoints contain endpoints for tcp streams handled by this backend
|
||||
// +optional
|
||||
TCPEndpoints []*Location `json:"tcpEndpoints,omitempty"`
|
||||
// UPDEndpoints contain endpoints for udp streams handled by this backend
|
||||
// UDPEndpoints contain endpoints for udp streams handled by this backend
|
||||
// +optional
|
||||
UPDEndpoints []*Location `json:"udpEndpoints,omitempty"`
|
||||
UDPEndpoints []*Location `json:"udpEndpoints,omitempty"`
|
||||
// PassthroughBackend contains the backends used for SSL passthrough.
|
||||
// It contains information about the associated Server Name Indication (SNI).
|
||||
// +optional
|
||||
|
|
|
@ -3,5 +3,4 @@
|
|||
This is a non-comprehensive list of existing ingress controllers.
|
||||
|
||||
* [Dummy controller backend](/examples/custom-controller)
|
||||
|
||||
|
||||
* [HAProxy Ingress controller](https://github.com/jcmoraisjr/haproxy-ingress)
|
||||
|
|
|
@ -81,6 +81,14 @@ You may want to consider [using the VM's docker
|
|||
daemon](https://github.com/kubernetes/minikube/blob/master/README.md#reusing-the-docker-daemon)
|
||||
when developing.
|
||||
|
||||
### CoreOS Kubernetes
|
||||
|
||||
[CoreOS Kubernetes](https://github.com/coreos/coreos-kubernetes/) repository has `Vagrantfile`
|
||||
scripts to easily create a new Kubernetes cluster on VirtualBox, VMware or AWS.
|
||||
|
||||
Follow the CoreOS [doc](https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant-single.html)
|
||||
for detailed instructions.
|
||||
|
||||
## Deploy the ingress controller
|
||||
|
||||
You can deploy an ingress controller on the cluster setup in the previous step
|
||||
|
|
151
examples/deployment/haproxy/README.md
Normal file
151
examples/deployment/haproxy/README.md
Normal file
|
@ -0,0 +1,151 @@
|
|||
# Deploying HAProxy Ingress Controller
|
||||
|
||||
If you don't have a Kubernetes cluster, please refer to [setup](/docs/dev/setup.md)
|
||||
for instructions on how to create a new one.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This ingress controller doesn't yet have support for
|
||||
[ingress classes](/examples/PREREQUISITES.md#ingress-class). You MUST turn
|
||||
down any existing ingress controllers before running HAProxy Ingress controller or
|
||||
they will fight for Ingresses. This includes any cloudprovider controller.
|
||||
|
||||
This document has also the following prerequisites:
|
||||
|
||||
* Deploy a [web app](/examples/PREREQUISITES.md#test-http-service) for testing
|
||||
* Create a [TLS secret](/examples/PREREQUISITES.md#tls-certificates) named `tls-secret` to be used as default TLS certificate
|
||||
|
||||
The web app can be created as follow:
|
||||
|
||||
```console
|
||||
$ kubectl run http-svc \
|
||||
--image=gcr.io/google_containers/echoserver:1.3 \
|
||||
--port=8080 \
|
||||
--replicas=2 \
|
||||
--expose
|
||||
```
|
||||
|
||||
Creating the TLS secret:
|
||||
|
||||
```console
|
||||
$ openssl req \
|
||||
-x509 -newkey rsa:2048 -nodes -days 365 \
|
||||
-keyout tls.key -out tls.crt -subj '/CN=localhost'
|
||||
$ kubectl create secret tls tls-secret --cert=tls.crt --key=tls.key
|
||||
$ rm -v tls.crt tls.key
|
||||
```
|
||||
|
||||
## Default backend
|
||||
|
||||
Deploy a default backend used to serve `404 Not Found` pages:
|
||||
|
||||
```console
|
||||
$ kubectl run ingress-default-backend \
|
||||
--image=gcr.io/google_containers/defaultbackend:1.0 \
|
||||
--port=8080 \
|
||||
--limits=cpu=10m,memory=20Mi \
|
||||
--expose
|
||||
```
|
||||
|
||||
Check if the default backend is up and running:
|
||||
|
||||
```console
|
||||
$ kubectl get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
ingress-default-backend-1110790216-gqr61 1/1 Running 0 10s
|
||||
```
|
||||
|
||||
## Controller
|
||||
|
||||
Deploy HAProxy Ingress:
|
||||
|
||||
```console
|
||||
$ kubectl create -f haproxy-ingress.yaml
|
||||
```
|
||||
|
||||
Check if the controller was successfully deployed:
|
||||
|
||||
```console
|
||||
$ kubectl get pod -w
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
haproxy-ingress-2556761959-tv20k 1/1 Running 0 12s
|
||||
ingress-default-backend-1110790216-gqr61 1/1 Running 0 3m
|
||||
^C
|
||||
```
|
||||
|
||||
Deploy the ingress resource of our already deployed web app:
|
||||
|
||||
```console
|
||||
$ kubectl create -f - <<EOF
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: app
|
||||
spec:
|
||||
rules:
|
||||
- host: foo.bar
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
||||
EOF
|
||||
```
|
||||
|
||||
Exposing the controller as a `type=NodePort` service:
|
||||
|
||||
```console
|
||||
$ kubectl expose deploy/haproxy-ingress --type=NodePort
|
||||
$ kubectl get svc/haproxy-ingress -oyaml
|
||||
```
|
||||
|
||||
Look for `nodePort` field next to `port: 80`.
|
||||
|
||||
Change below `172.17.4.99` to the host's IP and `30876` to the `nodePort`:
|
||||
|
||||
```console
|
||||
$ curl -i 172.17.4.99:30876
|
||||
HTTP/1.1 404 Not Found
|
||||
Date: Mon, 05 Feb 2017 22:59:36 GMT
|
||||
Content-Length: 21
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
|
||||
default backend - 404
|
||||
```
|
||||
|
||||
Using default backend because host was not found.
|
||||
|
||||
Now try to send a header:
|
||||
|
||||
```console
|
||||
$ curl -i 172.17.4.99:30876 -H 'Host: foo.bar'
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.9.11
|
||||
Date: Mon, 05 Feb 2017 23:00:33 GMT
|
||||
Content-Type: text/plain
|
||||
Transfer-Encoding: chunked
|
||||
|
||||
CLIENT VALUES:
|
||||
client_address=10.2.18.5
|
||||
command=GET
|
||||
real path=/
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://foo.bar:8080/
|
||||
...
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you have any problem, check logs and events of HAProxy Ingress POD:
|
||||
|
||||
```console
|
||||
$ kubectl get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
haproxy-ingress-2556761959-tv20k 1/1 Running 0 9m
|
||||
...
|
||||
|
||||
$ kubectl logs haproxy-ingress-2556761959-tv20k
|
||||
$ kubectl describe haproxy-ingress-2556761959-tv20k
|
||||
```
|
38
examples/deployment/haproxy/haproxy-ingress.yaml
Normal file
38
examples/deployment/haproxy/haproxy-ingress.yaml
Normal file
|
@ -0,0 +1,38 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
run: haproxy-ingress
|
||||
name: haproxy-ingress
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: haproxy-ingress
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: haproxy-ingress
|
||||
spec:
|
||||
containers:
|
||||
- name: haproxy-ingress
|
||||
image: quay.io/jcmoraisjr/haproxy-ingress
|
||||
args:
|
||||
- --default-backend-service=default/ingress-default-backend
|
||||
- --default-ssl-certificate=default/tls-secret
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
- name: stat
|
||||
containerPort: 1936
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
116
examples/tls-termination/haproxy/README.md
Normal file
116
examples/tls-termination/haproxy/README.md
Normal file
|
@ -0,0 +1,116 @@
|
|||
# TLS termination
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This document has the following prerequisites:
|
||||
|
||||
* Deploy [HAProxy Ingress controller](/examples/deployment/haproxy), you should end up with controller, a sample web app and default TLS secret
|
||||
* Create [*another* secret](/examples/PREREQUISITES.md#tls-certificates) named `foobar-ssl` and subject `'/CN=foo.bar'`
|
||||
|
||||
As mentioned in the deployment instructions, you MUST turn down any existing
|
||||
ingress controllers before running HAProxy Ingress.
|
||||
|
||||
## Using default TLS certificate
|
||||
|
||||
Update ingress resource in order to add TLS termination to host `foo.bar`:
|
||||
|
||||
```console
|
||||
$ kubectl replace -f ingress-tls-default.yaml
|
||||
```
|
||||
|
||||
The difference from the starting ingress resource:
|
||||
|
||||
```console
|
||||
metadata:
|
||||
name: app
|
||||
spec:
|
||||
+ tls:
|
||||
+ - hosts:
|
||||
+ - foo.bar
|
||||
rules:
|
||||
- host: foo.bar
|
||||
http:
|
||||
```
|
||||
|
||||
Trying default backend:
|
||||
|
||||
```console
|
||||
$ curl -iL 172.17.4.99:30876
|
||||
HTTP/1.1 404 Not Found
|
||||
Date: Tue, 07 Feb 2017 00:06:07 GMT
|
||||
Content-Length: 21
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
|
||||
default backend - 404
|
||||
```
|
||||
|
||||
Now telling the controller we are `foo.bar`:
|
||||
|
||||
```console
|
||||
$ curl -iL 172.17.4.99:30876 -H 'Host: foo.bar'
|
||||
HTTP/1.1 302 Found
|
||||
Cache-Control: no-cache
|
||||
Content-length: 0
|
||||
Location: https://foo.bar/
|
||||
Connection: close
|
||||
^C
|
||||
```
|
||||
|
||||
Note the `Location` header - this would redirect us to the correct server.
|
||||
|
||||
Checking the default certificate - change below `31692` to the TLS port:
|
||||
|
||||
```console
|
||||
$ openssl s_client -connect 172.17.4.99:31692
|
||||
...
|
||||
subject=/CN=localhost
|
||||
issuer=/CN=localhost
|
||||
---
|
||||
```
|
||||
|
||||
... and `foo.bar` certificate:
|
||||
|
||||
```console
|
||||
$ openssl s_client -connect 172.17.4.99:31692 -servername foo.bar
|
||||
...
|
||||
subject=/CN=localhost
|
||||
issuer=/CN=localhost
|
||||
---
|
||||
```
|
||||
|
||||
## Using a new TLS certificate
|
||||
|
||||
Now let's reference the new certificate to our domain. Note that secret
|
||||
`foobar-ssl` should be created as described in the [prerequisites](#prerequisites)
|
||||
|
||||
```console
|
||||
$ kubectl replace -f ingress-tls-foobar.yaml
|
||||
```
|
||||
|
||||
Here is the difference:
|
||||
|
||||
```console
|
||||
tls:
|
||||
- hosts:
|
||||
- foo.bar
|
||||
+ secretName: foobar-ssl
|
||||
rules:
|
||||
- host: foo.bar
|
||||
http:
|
||||
```
|
||||
|
||||
Now `foo.bar` certificate should be used to terminate TLS:
|
||||
|
||||
```console
|
||||
openssl s_client -connect 172.17.4.99:31692
|
||||
...
|
||||
subject=/CN=localhost
|
||||
issuer=/CN=localhost
|
||||
---
|
||||
|
||||
openssl s_client -connect 172.17.4.99:31692 -servername foo.bar
|
||||
...
|
||||
subject=/CN=foo.bar
|
||||
issuer=/CN=foo.bar
|
||||
---
|
||||
```
|
16
examples/tls-termination/haproxy/ingress-tls-default.yaml
Normal file
16
examples/tls-termination/haproxy/ingress-tls-default.yaml
Normal file
|
@ -0,0 +1,16 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: app
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- foo.bar
|
||||
rules:
|
||||
- host: foo.bar
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
17
examples/tls-termination/haproxy/ingress-tls-foobar.yaml
Normal file
17
examples/tls-termination/haproxy/ingress-tls-foobar.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: app
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- foo.bar
|
||||
secretName: foobar-ssl
|
||||
rules:
|
||||
- host: foo.bar
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
Loading…
Reference in a new issue