This commit is contained in:
rnburn 2017-10-19 18:04:14 -07:00
commit 6ea6921562
9 changed files with 58 additions and 14 deletions

View file

@ -39,6 +39,7 @@ An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches th
- [Proxy Protocol](#proxy-protocol) - [Proxy Protocol](#proxy-protocol)
- [ModSecurity Web Application Firewall](docs/user-guide/modsecurity.md) - [ModSecurity Web Application Firewall](docs/user-guide/modsecurity.md)
- [OpenTracing](docs/user-guide/opentracing.md) - [OpenTracing](docs/user-guide/opentracing.md)
- [VTS and Prometheus metrics](docs/examples/customization/custom-vts-metrics-prometheus/README.md)
- [Custom errors](docs/user-guide/custom-errors.md) - [Custom errors](docs/user-guide/custom-errors.md)
- [NGINX status page](docs/user-guide/nginx-status-page.md) - [NGINX status page](docs/user-guide/nginx-status-page.md)
- [Running multiple ingress controllers](#running-multiple-ingress-controllers) - [Running multiple ingress controllers](#running-multiple-ingress-controllers)

View file

@ -66,7 +66,7 @@ minikube addons enable ingress
### AWS ### AWS
In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`. In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`.
This setup requires to choose in wich layer (L4 or L7) we want to configure the ELB: This setup requires to choose in which layer (L4 or L7) we want to configure the ELB:
- [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): use TCP as the listener protocol for ports 80 and 443. - [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): use TCP as the listener protocol for ports 80 and 443.
- [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): use HTTP as the listener protocol for port 80 and terminate TLS in the ELB - [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): use HTTP as the listener protocol for port 80 and terminate TLS in the ELB

View file

@ -16,4 +16,4 @@ spec:
targetPort: http targetPort: http
- name: https - name: https
port: 443 port: 443
targetPort: http targetPort: https

View file

@ -28,6 +28,7 @@ The following annotations are supported:
|[ingress.kubernetes.io/proxy-connect-timeout](#custom-timeouts)|number| |[ingress.kubernetes.io/proxy-connect-timeout](#custom-timeouts)|number|
|[ingress.kubernetes.io/proxy-send-timeout](#custom-timeouts)|number| |[ingress.kubernetes.io/proxy-send-timeout](#custom-timeouts)|number|
|[ingress.kubernetes.io/proxy-read-timeout](#custom-timeouts)|number| |[ingress.kubernetes.io/proxy-read-timeout](#custom-timeouts)|number|
|[ingress.kubernetes.io/proxy-next-upstream](#custom-timeouts)|string|
|[ingress.kubernetes.io/proxy-request-buffering](#custom-timeouts)|string| |[ingress.kubernetes.io/proxy-request-buffering](#custom-timeouts)|string|
|[ingress.kubernetes.io/rewrite-target](#rewrite)|URI| |[ingress.kubernetes.io/rewrite-target](#rewrite)|URI|
|[ingress.kubernetes.io/secure-backends](#secure-backends)|true or false| |[ingress.kubernetes.io/secure-backends](#secure-backends)|true or false|
@ -313,6 +314,7 @@ In some scenarios is required to have different values. To allow this we provide
- `ingress.kubernetes.io/proxy-connect-timeout` - `ingress.kubernetes.io/proxy-connect-timeout`
- `ingress.kubernetes.io/proxy-send-timeout` - `ingress.kubernetes.io/proxy-send-timeout`
- `ingress.kubernetes.io/proxy-read-timeout` - `ingress.kubernetes.io/proxy-read-timeout`
- `ingress.kubernetes.io/proxy-next-upstream`
- `ingress.kubernetes.io/proxy-request-buffering` - `ingress.kubernetes.io/proxy-request-buffering`
### Custom max body size ### Custom max body size

View file

@ -1219,7 +1219,7 @@ func (ic *GenericController) Start() {
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
// initial sync of secrets to avoid unnecessary reloads // initial sync of secrets to avoid unnecessary reloads
glog.Info("running initial sync of secret") glog.Info("running initial sync of secrets")
for _, obj := range ic.listers.Ingress.List() { for _, obj := range ic.listers.Ingress.List() {
ing := obj.(*extensions.Ingress) ing := obj.(*extensions.Ingress)

View file

@ -109,6 +109,9 @@ func NewIngressController(backend ingress.Controller) *GenericController {
flags.AddGoFlagSet(flag.CommandLine) flags.AddGoFlagSet(flag.CommandLine)
backend.ConfigureFlags(flags) backend.ConfigureFlags(flags)
flags.Parse(os.Args) flags.Parse(os.Args)
// Workaround for this issue:
// https://github.com/kubernetes/kubernetes/issues/17162
flag.CommandLine.Parse([]string{})
backend.OverrideFlags(flags) backend.OverrideFlags(flags)
flag.Set("logtostderr", "true") flag.Set("logtostderr", "true")

View file

@ -44,6 +44,7 @@ import (
const ( const (
slash = "/" slash = "/"
nonIdempotent = "non_idempotent"
defBufferSize = 65535 defBufferSize = 65535
) )
@ -548,20 +549,30 @@ func isSticky(host string, loc *ingress.Location, stickyLocations map[string][]s
return false return false
} }
func buildNextUpstream(input interface{}) string { func buildNextUpstream(i, r interface{}) string {
nextUpstream, ok := input.(string) nextUpstream, ok := i.(string)
if !ok { if !ok {
glog.Errorf("expected a 'string' type but %T was returned", input) glog.Errorf("expected a 'string' type but %T was returned", i)
return "" return ""
} }
retryNonIdempotent := r.(bool)
parts := strings.Split(nextUpstream, " ") parts := strings.Split(nextUpstream, " ")
nextUpstreamCodes := make([]string, 0, len(parts)) nextUpstreamCodes := make([]string, 0, len(parts))
for _, v := range parts { for _, v := range parts {
if v != "" && v != "non_idempotent" { if v != "" && v != nonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, v) nextUpstreamCodes = append(nextUpstreamCodes, v)
} }
if v == nonIdempotent {
retryNonIdempotent = true
}
}
if retryNonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, nonIdempotent)
} }
return strings.Join(nextUpstreamCodes, " ") return strings.Join(nextUpstreamCodes, " ")

View file

@ -311,13 +311,40 @@ func TestBuildResolvers(t *testing.T) {
} }
func TestBuildNextUpstream(t *testing.T) { func TestBuildNextUpstream(t *testing.T) {
nextUpstream := "timeout http_500 http_502 non_idempotent" cases := map[string]struct {
validNextUpstream := "timeout http_500 http_502" NextUpstream string
NonIdempotent bool
Output string
}{
"default": {
"timeout http_500 http_502",
false,
"timeout http_500 http_502",
},
"global": {
"timeout http_500 http_502",
true,
"timeout http_500 http_502 non_idempotent",
},
"local": {
"timeout http_500 http_502 non_idempotent",
false,
"timeout http_500 http_502 non_idempotent",
},
}
buildNextUpstream := buildNextUpstream(nextUpstream) for k, tc := range cases {
nextUpstream := buildNextUpstream(tc.NextUpstream, tc.NonIdempotent)
if buildNextUpstream != validNextUpstream { if nextUpstream != tc.Output {
t.Errorf("Expected '%v' but returned '%v'", validNextUpstream, buildNextUpstream) t.Errorf(
"%s: called buildNextUpstream('%s', %v); expected '%v' but returned '%v'",
k,
tc.NextUpstream,
tc.NonIdempotent,
tc.Output,
nextUpstream,
)
}
} }
} }

View file

@ -798,7 +798,7 @@ stream {
proxy_cookie_path {{ $location.Proxy.CookiePath }}; proxy_cookie_path {{ $location.Proxy.CookiePath }};
# In case of errors try the next upstream server before returning an error # In case of errors try the next upstream server before returning an error
proxy_next_upstream {{ buildNextUpstream $location.Proxy.NextUpstream }}{{ if $all.Cfg.RetryNonIdempotent }} non_idempotent{{ end }}; proxy_next_upstream {{ buildNextUpstream $location.Proxy.NextUpstream $all.Cfg.RetryNonIdempotent }};
{{/* rewrite only works if the content is not compressed */}} {{/* rewrite only works if the content is not compressed */}}
{{ if $location.Rewrite.AddBaseURL }} {{ if $location.Rewrite.AddBaseURL }}