randomize-upstreams added to the configuration, causes upstream server lists to be randomized. This helps w/ stick session distribution
This commit is contained in:
parent
a6bba68233
commit
1f646e9fc5
3 changed files with 24 additions and 0 deletions
|
@ -332,6 +332,7 @@ http://nginx.org/en/docs/http/load_balancing.html.
|
||||||
|
|
||||||
**proxy-send-timeout:** Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request.
|
**proxy-send-timeout:** Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request.
|
||||||
|
|
||||||
|
**randomize-upstreams:** By default the upstream servers are sorted in the configuration by ip. If sticky sessions are enabled this can lead to a lumpy distribution of sessions to upstreams in the presence of frequent nginx restarts (the pod with the "lowest" ip will get any new session after a restart). Setting this to true randomizes the order of the upstreams and therefore new sessions will be assigned to a random pod after a restart.
|
||||||
|
|
||||||
**retry-non-idempotent:** Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server.
|
**retry-non-idempotent:** Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server.
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -526,6 +527,22 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
|
||||||
|
|
||||||
cfg.SSLDHParam = sslDHParam
|
cfg.SSLDHParam = sslDHParam
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
glog.Warningf("Randomize upstreams: %v", cfg.RandomizeUpstreams)
|
||||||
|
|
||||||
|
if cfg.RandomizeUpstreams {
|
||||||
|
// randomize the upstreams. In the case where you are using sticky sessions the backend
|
||||||
|
// with the lowest IP will always recieve more traffic as whenever nginx is reloaded the
|
||||||
|
// internal pointer it uses for loadbalancing will be reset to 0
|
||||||
|
for _, be := range ingressCfg.Backends {
|
||||||
|
for i := range be.Endpoints {
|
||||||
|
j := rand.Intn(i + 1)
|
||||||
|
be.Endpoints[i], be.Endpoints[j] = be.Endpoints[j], be.Endpoints[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
content, err := n.t.Write(config.TemplateConfig{
|
content, err := n.t.Write(config.TemplateConfig{
|
||||||
ProxySetHeaders: setHeaders,
|
ProxySetHeaders: setHeaders,
|
||||||
MaxOpenFiles: maxOpenFiles,
|
MaxOpenFiles: maxOpenFiles,
|
||||||
|
|
|
@ -199,6 +199,11 @@ type Configuration struct {
|
||||||
// Sets the name of the configmap that contains the headers to pass to the backend
|
// Sets the name of the configmap that contains the headers to pass to the backend
|
||||||
ProxySetHeaders string `json:"proxy-set-headers,omitempty"`
|
ProxySetHeaders string `json:"proxy-set-headers,omitempty"`
|
||||||
|
|
||||||
|
// If RandomizeUpstreams is true the upstream ips are not sorted before writing the config. This is
|
||||||
|
// useful to get better random distribution of sticky sessions. This will increase the number of
|
||||||
|
// reloads nginx will incur.
|
||||||
|
RandomizeUpstreams bool `json:"randomize-upstreams,omitempty"`
|
||||||
|
|
||||||
// Maximum size of the server names hash tables used in server names, map directive’s values,
|
// Maximum size of the server names hash tables used in server names, map directive’s values,
|
||||||
// MIME types, names of request header strings, etcd.
|
// MIME types, names of request header strings, etcd.
|
||||||
// http://nginx.org/en/docs/hash.html
|
// http://nginx.org/en/docs/hash.html
|
||||||
|
@ -316,6 +321,7 @@ func NewDefault() Configuration {
|
||||||
MaxWorkerConnections: 16384,
|
MaxWorkerConnections: 16384,
|
||||||
MapHashBucketSize: 64,
|
MapHashBucketSize: 64,
|
||||||
ProxyRealIPCIDR: defIPCIDR,
|
ProxyRealIPCIDR: defIPCIDR,
|
||||||
|
RandomizeUpstreams: false,
|
||||||
ServerNameHashMaxSize: 1024,
|
ServerNameHashMaxSize: 1024,
|
||||||
ShowServerTokens: true,
|
ShowServerTokens: true,
|
||||||
SSLBufferSize: sslBufferSize,
|
SSLBufferSize: sslBufferSize,
|
||||||
|
|
Loading…
Reference in a new issue