Flip to using a positive behaviour flag instead of negative
Signed-off-by: Rafael da Fonseca <rafael.fonseca@wildlifestudios.com>
This commit is contained in:
parent
1e20a00589
commit
1b8f98397a
6 changed files with 15 additions and 12 deletions
|
@ -14,6 +14,7 @@ metadata:
|
|||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
data:
|
||||
allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}"
|
||||
enable-serial-reloads: "{{ .Values.controller.enableSerialReloads }}"
|
||||
{{- if .Values.controller.addHeaders }}
|
||||
add-headers: {{ include "ingress-nginx.namespace" . }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
|
||||
{{- end }}
|
||||
|
|
|
@ -88,6 +88,8 @@ controller:
|
|||
# when users add those annotations.
|
||||
# Global snippets in ConfigMap are still respected
|
||||
allowSnippetAnnotations: false
|
||||
# -- This configuration defines if NGINX workers should reload serially instead of concurrently when multiple changes that require reloads are queued
|
||||
enableSerialReloads: false
|
||||
# -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
|
||||
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
|
||||
# is merged
|
||||
|
|
|
@ -114,7 +114,7 @@ The following table shows a configuration option's name, type, and the default v
|
|||
|[worker-processes](#worker-processes)|string|`<Number of CPUs>`||
|
||||
|[worker-cpu-affinity](#worker-cpu-affinity)|string|""||
|
||||
|[worker-shutdown-timeout](#worker-shutdown-timeout)|string|"240s"||
|
||||
|[concurrently-reload-worker-processes](#concurrently-reload-worker-processes)|bool|"true"||
|
||||
|[enable-serial-reloads](#enable-serial-reloads)|bool|"false"||
|
||||
|[load-balance](#load-balance)|string|"round_robin"||
|
||||
|[variables-hash-bucket-size](#variables-hash-bucket-size)|int|128||
|
||||
|[variables-hash-max-size](#variables-hash-max-size)|int|2048||
|
||||
|
|
|
@ -479,7 +479,7 @@ type Configuration struct {
|
|||
// With this setting on false, configuration changes in the queue will be re-queued with an exponential backoff, until the number of worker process is the expected value.
|
||||
// By default new worker processes are spawned every time there's a change that cannot be applied dynamically with no upper limit to the number of running workers
|
||||
// http://nginx.org/en/docs/ngx_core_module.html#worker_processes
|
||||
ConcurrentlyReloadWorkers bool `json:"concurrently-reload-worker-processes,omitempty"`
|
||||
WorkerSerialReloads bool `json:"enable-serial-reloads,omitempty"`
|
||||
|
||||
// Defines a timeout for a graceful shutdown of worker processes
|
||||
// http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout
|
||||
|
@ -849,7 +849,7 @@ func NewDefault() Configuration {
|
|||
UseGzip: false,
|
||||
UseGeoIP2: false,
|
||||
WorkerProcesses: strconv.Itoa(runtime.NumCPU()),
|
||||
ConcurrentlyReloadWorkers: true,
|
||||
WorkerSerialReloads: false,
|
||||
WorkerShutdownTimeout: "240s",
|
||||
VariablesHashBucketSize: 256,
|
||||
VariablesHashMaxSize: 2048,
|
||||
|
|
|
@ -672,8 +672,8 @@ Error: %v
|
|||
//
|
||||
//nolint:gocritic // the cfg shouldn't be changed, and shouldn't be mutated by other processes while being rendered.
|
||||
func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
|
||||
concurrentlyReloadWorkers := n.store.GetBackendConfiguration().ConcurrentlyReloadWorkers
|
||||
if !concurrentlyReloadWorkers && n.workersReloading {
|
||||
workerSerialReloads := n.store.GetBackendConfiguration().WorkerSerialReloads
|
||||
if workerSerialReloads && n.workersReloading {
|
||||
return errors.New("worker reload already in progress, requeuing reload")
|
||||
}
|
||||
|
||||
|
@ -743,7 +743,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
|
|||
}
|
||||
|
||||
// Reload status checking runs in a separate goroutine to avoid blocking the sync queue
|
||||
if !concurrentlyReloadWorkers {
|
||||
if workerSerialReloads {
|
||||
go n.awaitWorkersReload()
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ const (
|
|||
luaSharedDictsKey = "lua-shared-dicts"
|
||||
plugins = "plugins"
|
||||
debugConnections = "debug-connections"
|
||||
concurrentlyReloadWorkers = "concurrently-reload-worker-processes"
|
||||
workerSerialReloads = "enable-serial-reloads"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -386,15 +386,15 @@ func ReadConfig(src map[string]string) config.Configuration {
|
|||
delete(conf, workerProcesses)
|
||||
}
|
||||
|
||||
if val, ok := conf[concurrentlyReloadWorkers]; ok {
|
||||
if val, ok := conf[workerSerialReloads]; ok {
|
||||
boolVal, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
to.ConcurrentlyReloadWorkers = true
|
||||
klog.Warningf("failed to parse concurrently-reload-worker-processes setting, valid values are true or false, found %s", val)
|
||||
to.WorkerSerialReloads = false
|
||||
klog.Warningf("failed to parse enable-serial-reloads setting, valid values are true or false, found %s", val)
|
||||
} else {
|
||||
to.ConcurrentlyReloadWorkers = boolVal
|
||||
to.WorkerSerialReloads = boolVal
|
||||
}
|
||||
delete(conf, concurrentlyReloadWorkers)
|
||||
delete(conf, workerSerialReloads)
|
||||
}
|
||||
|
||||
if val, ok := conf[plugins]; ok {
|
||||
|
|
Loading…
Reference in a new issue