Allow configuring nginx worker reload behaviour, to prevent multiple concurrent worker reloads which can lead to high resource usage and OOMKill (#10884)
* feat: allow configuring nginx worker reload behaviour, to prevent multiple concurrent worker reloads Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com> * appease linter, remove unnecessary log line Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com> * Flip to using a positive behaviour flag instead of negative Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com> * Update helm-docs Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com> * Avoid calling GetBackendConfiguration() twice, use clearer name for helm chart option Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com> * Fix helm-docs ordering Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com> --------- Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com>
This commit is contained in:
parent
689b993a3c
commit
4e11074323
7 changed files with 69 additions and 3 deletions
|
@ -301,6 +301,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu
|
||||||
| controller.enableAnnotationValidations | bool | `false` | |
|
| controller.enableAnnotationValidations | bool | `false` | |
|
||||||
| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # |
|
| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # |
|
||||||
| controller.enableTopologyAwareRouting | bool | `false` | This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode="auto" Defaults to false |
|
| controller.enableTopologyAwareRouting | bool | `false` | This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode="auto" Defaults to false |
|
||||||
|
| controller.enableWorkerSerialReloads | bool | `false` | This configuration defines if NGINX workers should reload serially instead of concurrently when multiple changes that require reloads are queued |
|
||||||
| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one |
|
| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one |
|
||||||
| controller.extraArgs | object | `{}` | Additional command line arguments to pass to Ingress-Nginx Controller E.g. to specify the default SSL certificate you can use |
|
| controller.extraArgs | object | `{}` | Additional command line arguments to pass to Ingress-Nginx Controller E.g. to specify the default SSL certificate you can use |
|
||||||
| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. |
|
| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. |
|
||||||
|
|
|
@ -14,6 +14,7 @@ metadata:
|
||||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||||
data:
|
data:
|
||||||
allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}"
|
allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}"
|
||||||
|
enable-serial-reloads: "{{ .Values.controller.enableWorkerSerialReloads }}"
|
||||||
{{- if .Values.controller.addHeaders }}
|
{{- if .Values.controller.addHeaders }}
|
||||||
add-headers: {{ include "ingress-nginx.namespace" . }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
|
add-headers: {{ include "ingress-nginx.namespace" . }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -93,6 +93,8 @@ controller:
|
||||||
# when users add those annotations.
|
# when users add those annotations.
|
||||||
# Global snippets in ConfigMap are still respected
|
# Global snippets in ConfigMap are still respected
|
||||||
allowSnippetAnnotations: false
|
allowSnippetAnnotations: false
|
||||||
|
# -- This configuration defines if NGINX workers should reload serially instead of concurrently when multiple changes that require reloads are queued
|
||||||
|
enableWorkerSerialReloads: false
|
||||||
# -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
|
# -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
|
||||||
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
|
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
|
||||||
# is merged
|
# is merged
|
||||||
|
|
|
@ -114,6 +114,7 @@ The following table shows a configuration option's name, type, and the default v
|
||||||
|[worker-processes](#worker-processes)| string | `<Number of CPUs>` ||
|
|[worker-processes](#worker-processes)| string | `<Number of CPUs>` ||
|
||||||
|[worker-cpu-affinity](#worker-cpu-affinity)| string | "" ||
|
|[worker-cpu-affinity](#worker-cpu-affinity)| string | "" ||
|
||||||
|[worker-shutdown-timeout](#worker-shutdown-timeout)| string | "240s" ||
|
|[worker-shutdown-timeout](#worker-shutdown-timeout)| string | "240s" ||
|
||||||
|
|[enable-serial-reloads](#enable-serial-reloads)|bool|"false"||
|
||||||
|[load-balance](#load-balance)| string | "round_robin" ||
|
|[load-balance](#load-balance)| string | "round_robin" ||
|
||||||
|[variables-hash-bucket-size](#variables-hash-bucket-size)| int | 128 ||
|
|[variables-hash-bucket-size](#variables-hash-bucket-size)| int | 128 ||
|
||||||
|[variables-hash-max-size](#variables-hash-max-size)| int | 2048 ||
|
|[variables-hash-max-size](#variables-hash-max-size)| int | 2048 ||
|
||||||
|
|
|
@ -477,6 +477,13 @@ type Configuration struct {
|
||||||
// http://nginx.org/en/docs/ngx_core_module.html#worker_processes
|
// http://nginx.org/en/docs/ngx_core_module.html#worker_processes
|
||||||
WorkerProcesses string `json:"worker-processes,omitempty"`
|
WorkerProcesses string `json:"worker-processes,omitempty"`
|
||||||
|
|
||||||
|
// Defines whether multiple concurrent reloads of worker processes should occur.
|
||||||
|
// Set this to false to prevent more than n x 2 workers to exist at any time, to avoid potential OOM situations and high CPU load
|
||||||
|
// With this setting on false, configuration changes in the queue will be re-queued with an exponential backoff, until the number of worker process is the expected value.
|
||||||
|
// By default new worker processes are spawned every time there's a change that cannot be applied dynamically with no upper limit to the number of running workers
|
||||||
|
// http://nginx.org/en/docs/ngx_core_module.html#worker_processes
|
||||||
|
WorkerSerialReloads bool `json:"enable-serial-reloads,omitempty"`
|
||||||
|
|
||||||
// Defines a timeout for a graceful shutdown of worker processes
|
// Defines a timeout for a graceful shutdown of worker processes
|
||||||
// http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout
|
// http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout
|
||||||
WorkerShutdownTimeout string `json:"worker-shutdown-timeout,omitempty"`
|
WorkerShutdownTimeout string `json:"worker-shutdown-timeout,omitempty"`
|
||||||
|
@ -851,6 +858,7 @@ func NewDefault() Configuration {
|
||||||
UseGeoIP2: false,
|
UseGeoIP2: false,
|
||||||
GeoIP2AutoReloadMinutes: 0,
|
GeoIP2AutoReloadMinutes: 0,
|
||||||
WorkerProcesses: strconv.Itoa(runtime.NumCPU()),
|
WorkerProcesses: strconv.Itoa(runtime.NumCPU()),
|
||||||
|
WorkerSerialReloads: false,
|
||||||
WorkerShutdownTimeout: "240s",
|
WorkerShutdownTimeout: "240s",
|
||||||
VariablesHashBucketSize: 256,
|
VariablesHashBucketSize: 256,
|
||||||
VariablesHashMaxSize: 2048,
|
VariablesHashMaxSize: 2048,
|
||||||
|
|
|
@ -35,6 +35,7 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
proxyproto "github.com/armon/go-proxyproto"
|
proxyproto "github.com/armon/go-proxyproto"
|
||||||
"github.com/eapache/channels"
|
"github.com/eapache/channels"
|
||||||
|
@ -87,9 +88,10 @@ func NewNGINXController(config *Configuration, mc metric.Collector) *NGINXContro
|
||||||
n := &NGINXController{
|
n := &NGINXController{
|
||||||
isIPV6Enabled: ing_net.IsIPv6Enabled(),
|
isIPV6Enabled: ing_net.IsIPv6Enabled(),
|
||||||
|
|
||||||
resolver: h,
|
resolver: h,
|
||||||
cfg: config,
|
cfg: config,
|
||||||
syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(config.SyncRateLimit, 1),
|
syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(config.SyncRateLimit, 1),
|
||||||
|
workersReloading: false,
|
||||||
|
|
||||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{
|
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{
|
||||||
Component: "nginx-ingress-controller",
|
Component: "nginx-ingress-controller",
|
||||||
|
@ -229,6 +231,8 @@ type NGINXController struct {
|
||||||
|
|
||||||
syncRateLimiter flowcontrol.RateLimiter
|
syncRateLimiter flowcontrol.RateLimiter
|
||||||
|
|
||||||
|
workersReloading bool
|
||||||
|
|
||||||
// stopLock is used to enforce that only a single call to Stop send at
|
// stopLock is used to enforce that only a single call to Stop send at
|
||||||
// a given time. We allow stopping through an HTTP endpoint and
|
// a given time. We allow stopping through an HTTP endpoint and
|
||||||
// allowing concurrent stoppers leads to stack traces.
|
// allowing concurrent stoppers leads to stack traces.
|
||||||
|
@ -676,6 +680,11 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
|
||||||
cfg := n.store.GetBackendConfiguration()
|
cfg := n.store.GetBackendConfiguration()
|
||||||
cfg.Resolver = n.resolver
|
cfg.Resolver = n.resolver
|
||||||
|
|
||||||
|
workerSerialReloads := cfg.WorkerSerialReloads
|
||||||
|
if workerSerialReloads && n.workersReloading {
|
||||||
|
return errors.New("worker reload already in progress, requeuing reload")
|
||||||
|
}
|
||||||
|
|
||||||
content, err := n.generateTemplate(cfg, ingressCfg)
|
content, err := n.generateTemplate(cfg, ingressCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -738,9 +747,41 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
|
||||||
return fmt.Errorf("%v\n%v", err, string(o))
|
return fmt.Errorf("%v\n%v", err, string(o))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reload status checking runs in a separate goroutine to avoid blocking the sync queue
|
||||||
|
if workerSerialReloads {
|
||||||
|
go n.awaitWorkersReload()
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// awaitWorkersReload checks if the number of workers has returned to the expected count
|
||||||
|
func (n *NGINXController) awaitWorkersReload() {
|
||||||
|
n.workersReloading = true
|
||||||
|
defer func() { n.workersReloading = false }()
|
||||||
|
|
||||||
|
expectedWorkers := n.store.GetBackendConfiguration().WorkerProcesses
|
||||||
|
var numWorkers string
|
||||||
|
klog.V(3).Infof("waiting for worker count to be equal to %s", expectedWorkers)
|
||||||
|
for numWorkers != expectedWorkers {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
o, err := exec.Command("/bin/sh", "-c", "pgrep worker | wc -l").Output()
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, numWorkers)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// cleanup any non-printable chars from shell output
|
||||||
|
numWorkers = strings.Map(func(r rune) rune {
|
||||||
|
if unicode.IsPrint(r) {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}, string(o))
|
||||||
|
|
||||||
|
klog.V(3).Infof("Currently running nginx worker processes: %s, expected %s", numWorkers, expectedWorkers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash
|
// nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash
|
||||||
// with the given longest key.
|
// with the given longest key.
|
||||||
func nginxHashBucketSize(longestString int) int {
|
func nginxHashBucketSize(longestString int) int {
|
||||||
|
|
|
@ -69,6 +69,7 @@ const (
|
||||||
luaSharedDictsKey = "lua-shared-dicts"
|
luaSharedDictsKey = "lua-shared-dicts"
|
||||||
plugins = "plugins"
|
plugins = "plugins"
|
||||||
debugConnections = "debug-connections"
|
debugConnections = "debug-connections"
|
||||||
|
workerSerialReloads = "enable-serial-reloads"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -404,6 +405,17 @@ func ReadConfig(src map[string]string) config.Configuration {
|
||||||
delete(conf, workerProcesses)
|
delete(conf, workerProcesses)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if val, ok := conf[workerSerialReloads]; ok {
|
||||||
|
boolVal, err := strconv.ParseBool(val)
|
||||||
|
if err != nil {
|
||||||
|
to.WorkerSerialReloads = false
|
||||||
|
klog.Warningf("failed to parse enable-serial-reloads setting, valid values are true or false, found %s", val)
|
||||||
|
} else {
|
||||||
|
to.WorkerSerialReloads = boolVal
|
||||||
|
}
|
||||||
|
delete(conf, workerSerialReloads)
|
||||||
|
}
|
||||||
|
|
||||||
if val, ok := conf[plugins]; ok {
|
if val, ok := conf[plugins]; ok {
|
||||||
to.Plugins = splitAndTrimSpace(val, ",")
|
to.Plugins = splitAndTrimSpace(val, ",")
|
||||||
delete(conf, plugins)
|
delete(conf, plugins)
|
||||||
|
|
Loading…
Reference in a new issue