Merge pull request #5749 from Bo0km4n/feat-configurable-max-batch-size
[Fix/metrics] Be configurable max batch size of metrics
This commit is contained in:
commit
803a76cf8a
7 changed files with 28 additions and 2 deletions
|
@ -145,6 +145,7 @@ Requires the update-status parameter.`)
|
|||
`Enables the collection of NGINX metrics`)
|
||||
metricsPerHost = flags.Bool("metrics-per-host", true,
|
||||
`Export metrics per-host`)
|
||||
monitorMaxBatchSize = flags.Int("monitor-max-batch-size", 10000, "Max batch size of NGINX metrics")
|
||||
|
||||
httpPort = flags.Int("http-port", 80, `Port to use for servicing HTTP traffic.`)
|
||||
httpsPort = flags.Int("https-port", 443, `Port to use for servicing HTTPS traffic.`)
|
||||
|
@ -269,6 +270,7 @@ https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-g
|
|||
EnableProfiling: *profiling,
|
||||
EnableMetrics: *enableMetrics,
|
||||
MetricsPerHost: *metricsPerHost,
|
||||
MonitorMaxBatchSize: *monitorMaxBatchSize,
|
||||
EnableSSLPassthrough: *enableSSLPassthrough,
|
||||
ResyncPeriod: *resyncPeriod,
|
||||
DefaultService: *defaultSvc,
|
||||
|
|
|
@ -851,6 +851,7 @@ type TemplateConfig struct {
|
|||
PublishService *apiv1.Service
|
||||
EnableMetrics bool
|
||||
MaxmindEditionFiles []string
|
||||
MonitorMaxBatchSize int
|
||||
|
||||
PID string
|
||||
StatusPath string
|
||||
|
|
|
@ -101,6 +101,8 @@ type Configuration struct {
|
|||
|
||||
GlobalExternalAuth *ngx_config.GlobalExternalAuth
|
||||
MaxmindEditionFiles []string
|
||||
|
||||
MonitorMaxBatchSize int
|
||||
}
|
||||
|
||||
// GetPublishService returns the Service used to set the load-balancer status of Ingresses.
|
||||
|
|
|
@ -610,6 +610,7 @@ func (n NGINXController) generateTemplate(cfg ngx_config.Configuration, ingressC
|
|||
EnableMetrics: n.cfg.EnableMetrics,
|
||||
MaxmindEditionFiles: n.cfg.MaxmindEditionFiles,
|
||||
HealthzURI: nginx.HealthPath,
|
||||
MonitorMaxBatchSize: n.cfg.MonitorMaxBatchSize,
|
||||
PID: nginx.PID,
|
||||
StatusPath: nginx.StatusPath,
|
||||
StatusPort: nginx.StatusPort,
|
||||
|
|
|
@ -70,7 +70,14 @@ local function flush(premature)
|
|||
send(payload)
|
||||
end
|
||||
|
||||
function _M.init_worker()
|
||||
local function set_metrics_max_batch_size(max_batch_size)
|
||||
if max_batch_size > 10000 then
|
||||
MAX_BATCH_SIZE = max_batch_size
|
||||
end
|
||||
end
|
||||
|
||||
function _M.init_worker(max_batch_size)
|
||||
set_metrics_max_batch_size(max_batch_size)
|
||||
local _, err = ngx.timer.every(FLUSH_INTERVAL, flush)
|
||||
if err then
|
||||
ngx.log(ngx.ERR, string.format("error when setting up timer.every: %s", tostring(err)))
|
||||
|
@ -89,6 +96,7 @@ end
|
|||
|
||||
setmetatable(_M, {__index = {
|
||||
flush = flush,
|
||||
set_metrics_max_batch_size = set_metrics_max_batch_size,
|
||||
get_metrics_batch = function() return metrics_batch end,
|
||||
}})
|
||||
|
||||
|
|
|
@ -29,6 +29,18 @@ describe("Monitor", function()
|
|||
package.loaded["monitor"] = nil
|
||||
end)
|
||||
|
||||
it("extended batch size", function()
|
||||
mock_ngx({ var = {} })
|
||||
local monitor = require("monitor")
|
||||
monitor.set_metrics_max_batch_size(20000)
|
||||
|
||||
for i = 1,20000,1 do
|
||||
monitor.call()
|
||||
end
|
||||
|
||||
assert.equal(20000, #monitor.get_metrics_batch())
|
||||
end)
|
||||
|
||||
it("batches metrics", function()
|
||||
mock_ngx({ var = {} })
|
||||
local monitor = require("monitor")
|
||||
|
|
|
@ -111,7 +111,7 @@ http {
|
|||
lua_ingress.init_worker()
|
||||
balancer.init_worker()
|
||||
{{ if $all.EnableMetrics }}
|
||||
monitor.init_worker()
|
||||
monitor.init_worker({{ $all.MonitorMaxBatchSize }})
|
||||
{{ end }}
|
||||
|
||||
plugins.run()
|
||||
|
|
Loading…
Reference in a new issue