2018-06-17 15:04:03 +00:00
|
|
|
local socket = ngx.socket.tcp
|
2018-06-14 02:54:09 +00:00
|
|
|
local cjson = require('cjson')
|
|
|
|
local assert = assert
|
|
|
|
|
2018-08-18 00:01:50 +00:00
|
|
|
local metrics_batch = {}
|
|
|
|
-- if an Nginx worker processes more than (MAX_BATCH_SIZE/FLUSH_INTERVAL) RPS then it will start dropping metrics
|
|
|
|
local MAX_BATCH_SIZE = 10000
|
|
|
|
local FLUSH_INTERVAL = 1 -- second
|
|
|
|
|
2018-06-14 02:54:09 +00:00
|
|
|
local _M = {}
|
|
|
|
|
2018-08-18 00:01:50 +00:00
|
|
|
local function send(payload)
|
2018-06-14 02:54:09 +00:00
|
|
|
local s = assert(socket())
|
2018-08-18 00:01:50 +00:00
|
|
|
assert(s:connect("unix:/tmp/prometheus-nginx.socket"))
|
|
|
|
assert(s:send(payload))
|
2018-06-14 02:54:09 +00:00
|
|
|
assert(s:close())
|
|
|
|
end
|
|
|
|
|
2018-08-18 00:01:50 +00:00
|
|
|
local function metrics()
|
|
|
|
return {
|
2018-06-14 02:54:09 +00:00
|
|
|
host = ngx.var.host or "-",
|
2018-08-18 00:01:50 +00:00
|
|
|
namespace = ngx.var.namespace or "-",
|
|
|
|
ingress = ngx.var.ingress_name or "-",
|
|
|
|
service = ngx.var.service_name or "-",
|
2018-06-25 13:22:28 +00:00
|
|
|
path = ngx.var.location_path or "-",
|
2018-07-07 17:46:18 +00:00
|
|
|
|
2018-08-18 00:01:50 +00:00
|
|
|
method = ngx.var.request_method or "-",
|
2018-07-07 17:46:18 +00:00
|
|
|
status = ngx.var.status or "-",
|
2018-06-14 02:54:09 +00:00
|
|
|
requestLength = tonumber(ngx.var.request_length) or -1,
|
|
|
|
requestTime = tonumber(ngx.var.request_time) or -1,
|
2018-07-07 17:46:18 +00:00
|
|
|
responseLength = tonumber(ngx.var.bytes_sent) or -1,
|
|
|
|
|
|
|
|
endpoint = ngx.var.upstream_addr or "-",
|
|
|
|
upstreamLatency = tonumber(ngx.var.upstream_connect_time) or -1,
|
2018-06-14 02:54:09 +00:00
|
|
|
upstreamResponseTime = tonumber(ngx.var.upstream_response_time) or -1,
|
2018-07-07 17:46:18 +00:00
|
|
|
upstreamResponseLength = tonumber(ngx.var.upstream_response_length) or -1,
|
2018-11-21 16:00:08 +00:00
|
|
|
--upstreamStatus = ngx.var.upstream_status or "-",
|
2018-08-18 00:01:50 +00:00
|
|
|
}
|
|
|
|
end
|
2018-07-07 17:46:18 +00:00
|
|
|
|
2018-08-18 00:01:50 +00:00
|
|
|
local function flush(premature)
|
|
|
|
if premature then
|
|
|
|
return
|
|
|
|
end
|
|
|
|
|
|
|
|
if #metrics_batch == 0 then
|
|
|
|
return
|
|
|
|
end
|
|
|
|
|
|
|
|
local current_metrics_batch = metrics_batch
|
|
|
|
metrics_batch = {}
|
|
|
|
|
|
|
|
local ok, payload = pcall(cjson.encode, current_metrics_batch)
|
|
|
|
if not ok then
|
|
|
|
ngx.log(ngx.ERR, "error while encoding metrics: " .. tostring(payload))
|
|
|
|
return
|
|
|
|
end
|
|
|
|
|
|
|
|
send(payload)
|
|
|
|
end
|
|
|
|
|
|
|
|
function _M.init_worker()
|
|
|
|
local _, err = ngx.timer.every(FLUSH_INTERVAL, flush)
|
|
|
|
if err then
|
|
|
|
ngx.log(ngx.ERR, string.format("error when setting up timer.every: %s", tostring(err)))
|
|
|
|
end
|
2018-06-14 02:54:09 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
function _M.call()
|
2018-08-18 00:01:50 +00:00
|
|
|
if #metrics_batch >= MAX_BATCH_SIZE then
|
2018-09-08 13:49:04 +00:00
|
|
|
ngx.log(ngx.WARN, "omitting metrics for the request, current batch is full")
|
2018-06-14 02:54:09 +00:00
|
|
|
return
|
|
|
|
end
|
2018-08-18 00:01:50 +00:00
|
|
|
|
|
|
|
table.insert(metrics_batch, metrics())
|
|
|
|
end
|
|
|
|
|
|
|
|
if _TEST then
|
|
|
|
_M.flush = flush
|
|
|
|
_M.get_metrics_batch = function() return metrics_batch end
|
2018-06-14 02:54:09 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
return _M
|