diff --git a/controllers/nginx/configuration.md b/controllers/nginx/configuration.md index d7d4b2c46..857096792 100644 --- a/controllers/nginx/configuration.md +++ b/controllers/nginx/configuration.md @@ -332,6 +332,7 @@ http://nginx.org/en/docs/http/load_balancing.html. **proxy-send-timeout:** Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request. +**randomize-upstreams:** By default the upstream servers are sorted in the configuration by ip. If sticky sessions are enabled this can lead to a lumpy distribution of sessions to upstreams in the presence of frequent nginx restarts (the pod with the "lowest" ip will get any new session after a restart). Setting this to true randomizes the order of the upstreams and therefore new sessions will be assigned to a random pod after a restart. **retry-non-idempotent:** Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. diff --git a/controllers/nginx/pkg/cmd/controller/nginx.go b/controllers/nginx/pkg/cmd/controller/nginx.go index 133bc9a55..1de8fc9f1 100644 --- a/controllers/nginx/pkg/cmd/controller/nginx.go +++ b/controllers/nginx/pkg/cmd/controller/nginx.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io/ioutil" + "math/rand" "net" "net/http" "os" @@ -526,6 +527,22 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er cfg.SSLDHParam = sslDHParam + rand.Seed(time.Now().UnixNano()) + + glog.Warningf("Randomize upstreams: %v", cfg.RandomizeUpstreams) + + if cfg.RandomizeUpstreams { + // randomize the upstreams. In the case where you are using sticky sessions the backend + // with the lowest IP will always recieve more traffic as whenever nginx is reloaded the + // internal pointer it uses for loadbalancing will be reset to 0 + for _, be := range ingressCfg.Backends { + for i := range be.Endpoints { + j := rand.Intn(i + 1) + be.Endpoints[i], be.Endpoints[j] = be.Endpoints[j], be.Endpoints[i] + } + } + } + content, err := n.t.Write(config.TemplateConfig{ ProxySetHeaders: setHeaders, MaxOpenFiles: maxOpenFiles, diff --git a/controllers/nginx/pkg/config/config.go b/controllers/nginx/pkg/config/config.go index 8bc2fd70a..77851daf9 100644 --- a/controllers/nginx/pkg/config/config.go +++ b/controllers/nginx/pkg/config/config.go @@ -199,6 +199,11 @@ type Configuration struct { // Sets the name of the configmap that contains the headers to pass to the backend ProxySetHeaders string `json:"proxy-set-headers,omitempty"` + // If RandomizeUpstreams is true the upstream ips are not sorted before writing the config. This is + // useful to get better random distribution of sticky sessions. This will increase the number of + // reloads nginx will incur. + RandomizeUpstreams bool `json:"randomize-upstreams,omitempty"` + // Maximum size of the server names hash tables used in server names, map directive’s values, // MIME types, names of request header strings, etcd. // http://nginx.org/en/docs/hash.html @@ -316,6 +321,7 @@ func NewDefault() Configuration { MaxWorkerConnections: 16384, MapHashBucketSize: 64, ProxyRealIPCIDR: defIPCIDR, + RandomizeUpstreams: false, ServerNameHashMaxSize: 1024, ShowServerTokens: true, SSLBufferSize: sslBufferSize,