rollback nginx metrics
This commit is contained in:
parent
74e5bcece2
commit
f79257db36
5 changed files with 292 additions and 163 deletions
|
@ -25,6 +25,7 @@ import (
|
||||||
"github.com/ncabatoff/process-exporter/proc"
|
"github.com/ncabatoff/process-exporter/proc"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type exeMatcher struct {
|
type exeMatcher struct {
|
||||||
|
@ -41,9 +42,12 @@ func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NGINXController) setupMonitor(args []string) {
|
func (n *NGINXController) setupMonitor(args []string) {
|
||||||
pc, err := newProcessCollector(true, exeMatcher{"nginx", args})
|
var enableVts = true
|
||||||
|
|
||||||
|
// TODO fix true
|
||||||
|
pc, err := newProcessCollector(true, exeMatcher{"nginx", args}, enableVts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("unexpected error registering nginx collector: %v", err)
|
glog.Fatalf("unexpected error registering nginx collector: %v", err)
|
||||||
}
|
}
|
||||||
err = prometheus.Register(pc)
|
err = prometheus.Register(pc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -79,22 +83,6 @@ var (
|
||||||
"number of bytes read",
|
"number of bytes read",
|
||||||
nil, nil)
|
nil, nil)
|
||||||
|
|
||||||
//vts metrics
|
|
||||||
bytesDesc = prometheus.NewDesc(
|
|
||||||
"nginx_bytes_total",
|
|
||||||
"Nginx bytes count",
|
|
||||||
[]string{"server_zones", "direction"}, nil)
|
|
||||||
|
|
||||||
cacheDesc = prometheus.NewDesc(
|
|
||||||
"nginx_cache_total",
|
|
||||||
"Nginx cache count",
|
|
||||||
[]string{"server_zones", "type"}, nil)
|
|
||||||
|
|
||||||
connectionsDesc = prometheus.NewDesc(
|
|
||||||
"nginx_connections_total",
|
|
||||||
"Nginx connections count",
|
|
||||||
[]string{"type"}, nil)
|
|
||||||
|
|
||||||
startTimeDesc = prometheus.NewDesc(
|
startTimeDesc = prometheus.NewDesc(
|
||||||
"nginx_oldest_start_time_seconds",
|
"nginx_oldest_start_time_seconds",
|
||||||
"start time in seconds since 1970/01/01",
|
"start time in seconds since 1970/01/01",
|
||||||
|
@ -105,60 +93,126 @@ var (
|
||||||
"number of bytes written",
|
"number of bytes written",
|
||||||
nil, nil)
|
nil, nil)
|
||||||
|
|
||||||
responseDesc = prometheus.NewDesc(
|
//vts metrics
|
||||||
"nginx_responses_total",
|
vtsBytesDesc = prometheus.NewDesc(
|
||||||
|
"nginx_vts_bytes_total",
|
||||||
|
"Nginx bytes count",
|
||||||
|
[]string{"server_zone", "direction"}, nil)
|
||||||
|
|
||||||
|
vtsCacheDesc = prometheus.NewDesc(
|
||||||
|
"nginx_vts_cache_total",
|
||||||
|
"Nginx cache count",
|
||||||
|
[]string{"server_zone", "type"}, nil)
|
||||||
|
|
||||||
|
vtsConnectionsDesc = prometheus.NewDesc(
|
||||||
|
"nginx_vts_connections_total",
|
||||||
|
"Nginx connections count",
|
||||||
|
[]string{"type"}, nil)
|
||||||
|
|
||||||
|
vtsResponseDesc = prometheus.NewDesc(
|
||||||
|
"nginx_vts_responses_total",
|
||||||
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||||
[]string{"server_zones", "status_code"}, nil)
|
[]string{"server_zone", "status_code"}, nil)
|
||||||
|
|
||||||
requestDesc = prometheus.NewDesc(
|
vtsRequestDesc = prometheus.NewDesc(
|
||||||
"nginx_requests_total",
|
"nginx_vts_requests_total",
|
||||||
"The total number of requested client connections.",
|
"The total number of requested client connections.",
|
||||||
[]string{"server_zones"}, nil)
|
[]string{"server_zone"}, nil)
|
||||||
|
|
||||||
upstreamBackupDesc = prometheus.NewDesc(
|
vtsFilterZoneBytesDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_backup",
|
"nginx_vts_filterzone_bytes_total",
|
||||||
|
"Nginx bytes count",
|
||||||
|
[]string{"server_zone", "country", "direction"}, nil)
|
||||||
|
|
||||||
|
vtsFilterZoneResponseDesc = prometheus.NewDesc(
|
||||||
|
"nginx_vts_filterzone_responses_total",
|
||||||
|
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||||
|
[]string{"server_zone", "country", "status_code"}, nil)
|
||||||
|
|
||||||
|
vtsFilterZoneCacheDesc = prometheus.NewDesc(
|
||||||
|
"nginx_vts_filterzone_cache_total",
|
||||||
|
"Nginx cache count",
|
||||||
|
[]string{"server_zone", "country", "type"}, nil)
|
||||||
|
|
||||||
|
vtsUpstreamBackupDesc = prometheus.NewDesc(
|
||||||
|
"nginx_vts_upstream_backup",
|
||||||
"Current backup setting of the server.",
|
"Current backup setting of the server.",
|
||||||
[]string{"upstream", "server"}, nil)
|
[]string{"upstream", "server"}, nil)
|
||||||
|
|
||||||
upstreamBytesDesc = prometheus.NewDesc(
|
vtsUpstreamBytesDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_bytes_total",
|
"nginx_vts_upstream_bytes_total",
|
||||||
"The total number of bytes sent to this server.",
|
"The total number of bytes sent to this server.",
|
||||||
[]string{"upstream", "server", "direction"}, nil)
|
[]string{"upstream", "server", "direction"}, nil)
|
||||||
|
|
||||||
upstreamDownDesc = prometheus.NewDesc(
|
vtsUpstreamDownDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_down_total",
|
"nginx_vts_upstream_down_total",
|
||||||
"Current down setting of the server.",
|
"Current down setting of the server.",
|
||||||
[]string{"upstream", "server"}, nil)
|
[]string{"upstream", "server"}, nil)
|
||||||
|
|
||||||
upstreamFailTimeoutDesc = prometheus.NewDesc(
|
vtsUpstreamFailTimeoutDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_fail_timeout",
|
"nginx_vts_upstream_fail_timeout",
|
||||||
"Current fail_timeout setting of the server.",
|
"Current fail_timeout setting of the server.",
|
||||||
[]string{"upstream", "server"}, nil)
|
[]string{"upstream", "server"}, nil)
|
||||||
|
|
||||||
upstreamMaxFailsDesc = prometheus.NewDesc(
|
vtsUpstreamMaxFailsDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_maxfails",
|
"nginx_vts_upstream_maxfails",
|
||||||
"Current max_fails setting of the server.",
|
"Current max_fails setting of the server.",
|
||||||
[]string{"upstream", "server"}, nil)
|
[]string{"upstream", "server"}, nil)
|
||||||
|
|
||||||
upstreamResponsesDesc = prometheus.NewDesc(
|
vtsUpstreamResponsesDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_responses_total",
|
"nginx_vts_upstream_responses_total",
|
||||||
"The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
"The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||||
[]string{"upstream", "server", "status_code"}, nil)
|
[]string{"upstream", "server", "status_code"}, nil)
|
||||||
|
|
||||||
upstreamRequestDesc = prometheus.NewDesc(
|
vtsUpstreamRequestDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_requests_total",
|
"nginx_vts_upstream_requests_total",
|
||||||
"The total number of client connections forwarded to this server.",
|
"The total number of client connections forwarded to this server.",
|
||||||
[]string{"upstream", "server"}, nil)
|
[]string{"upstream", "server"}, nil)
|
||||||
|
|
||||||
upstreamResponseMsecDesc = prometheus.NewDesc(
|
vtsUpstreamResponseMsecDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_response_msecs_avg",
|
"nginx_vts_upstream_response_msecs_avg",
|
||||||
"The average of only upstream response processing times in milliseconds.",
|
"The average of only upstream response processing times in milliseconds.",
|
||||||
[]string{"upstream", "server"}, nil)
|
[]string{"upstream", "server"}, nil)
|
||||||
|
|
||||||
upstreamWeightDesc = prometheus.NewDesc(
|
vtsUpstreamWeightDesc = prometheus.NewDesc(
|
||||||
"nginx_upstream_weight",
|
"nginx_vts_upstream_weight",
|
||||||
"Current upstream weight setting of the server.",
|
"Current upstream weight setting of the server.",
|
||||||
[]string{"upstream", "server"}, nil)
|
[]string{"upstream", "server"}, nil)
|
||||||
|
|
||||||
|
activeDesc = prometheus.NewDesc(
|
||||||
|
"nginx_active_connections",
|
||||||
|
"total number of active connections",
|
||||||
|
nil, nil)
|
||||||
|
|
||||||
|
acceptedDesc = prometheus.NewDesc(
|
||||||
|
"nginx_accepted_connections",
|
||||||
|
"total number of accepted client connections",
|
||||||
|
nil, nil)
|
||||||
|
|
||||||
|
handledDesc = prometheus.NewDesc(
|
||||||
|
"nginx_handled_connections",
|
||||||
|
"total number of handled connections",
|
||||||
|
nil, nil)
|
||||||
|
|
||||||
|
requestsDesc = prometheus.NewDesc(
|
||||||
|
"nginx_total_requests",
|
||||||
|
"total number of client requests",
|
||||||
|
nil, nil)
|
||||||
|
|
||||||
|
readingDesc = prometheus.NewDesc(
|
||||||
|
"nginx_current_reading_connections",
|
||||||
|
"current number of connections where nginx is reading the request header",
|
||||||
|
nil, nil)
|
||||||
|
|
||||||
|
writingDesc = prometheus.NewDesc(
|
||||||
|
"nginx_current_writing_connections",
|
||||||
|
"current number of connections where nginx is writing the response back to the client",
|
||||||
|
nil, nil)
|
||||||
|
|
||||||
|
waitingDesc = prometheus.NewDesc(
|
||||||
|
"nginx_current_waiting_connections",
|
||||||
|
"current number of idle client connections waiting for a request",
|
||||||
|
nil, nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -170,23 +224,27 @@ type (
|
||||||
namedProcessCollector struct {
|
namedProcessCollector struct {
|
||||||
scrapeChan chan scrapeRequest
|
scrapeChan chan scrapeRequest
|
||||||
*proc.Grouper
|
*proc.Grouper
|
||||||
fs *proc.FS
|
fs *proc.FS
|
||||||
|
vtsCollector bool
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func newProcessCollector(
|
func newProcessCollector(
|
||||||
children bool,
|
children bool,
|
||||||
n common.MatchNamer) (*namedProcessCollector, error) {
|
n common.MatchNamer,
|
||||||
|
vtsCollector bool) (*namedProcessCollector, error) {
|
||||||
|
|
||||||
fs, err := proc.NewFS("/proc")
|
fs, err := proc.NewFS("/proc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
p := &namedProcessCollector{
|
p := &namedProcessCollector{
|
||||||
scrapeChan: make(chan scrapeRequest),
|
scrapeChan: make(chan scrapeRequest),
|
||||||
Grouper: proc.NewGrouper(children, n),
|
Grouper: proc.NewGrouper(children, n),
|
||||||
fs: fs,
|
fs: fs,
|
||||||
|
vtsCollector: vtsCollector,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = p.Update(p.fs.AllProcs())
|
_, err = p.Update(p.fs.AllProcs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -199,30 +257,37 @@ func newProcessCollector(
|
||||||
|
|
||||||
// Describe implements prometheus.Collector.
|
// Describe implements prometheus.Collector.
|
||||||
func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
|
func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||||
|
|
||||||
ch <- cpuSecsDesc
|
ch <- cpuSecsDesc
|
||||||
|
ch <- numprocsDesc
|
||||||
|
ch <- readBytesDesc
|
||||||
|
ch <- writeBytesDesc
|
||||||
ch <- memResidentbytesDesc
|
ch <- memResidentbytesDesc
|
||||||
ch <- memVirtualbytesDesc
|
ch <- memVirtualbytesDesc
|
||||||
ch <- startTimeDesc
|
ch <- startTimeDesc
|
||||||
|
|
||||||
ch <- bytesDesc
|
if p.vtsCollector {
|
||||||
ch <- cacheDesc
|
|
||||||
ch <- connectionsDesc
|
|
||||||
ch <- readBytesDesc
|
|
||||||
ch <- requestDesc
|
|
||||||
ch <- responseDesc
|
|
||||||
ch <- writeBytesDesc
|
|
||||||
ch <- upstreamBackupDesc
|
|
||||||
ch <- upstreamBytesDesc
|
|
||||||
ch <- upstreamDownDesc
|
|
||||||
ch <- upstreamFailTimeoutDesc
|
|
||||||
ch <- upstreamMaxFailsDesc
|
|
||||||
ch <- upstreamRequestDesc
|
|
||||||
ch <- upstreamResponseMsecDesc
|
|
||||||
ch <- upstreamResponsesDesc
|
|
||||||
ch <- upstreamWeightDesc
|
|
||||||
|
|
||||||
ch <- numprocsDesc
|
|
||||||
|
|
||||||
|
ch <- vtsBytesDesc
|
||||||
|
ch <- vtsCacheDesc
|
||||||
|
ch <- vtsConnectionsDesc
|
||||||
|
ch <- readBytesDesc
|
||||||
|
ch <- vtsRequestDesc
|
||||||
|
ch <- vtsResponseDesc
|
||||||
|
ch <- writeBytesDesc
|
||||||
|
ch <- vtsUpstreamBackupDesc
|
||||||
|
ch <- vtsUpstreamBytesDesc
|
||||||
|
ch <- vtsUpstreamDownDesc
|
||||||
|
ch <- vtsUpstreamFailTimeoutDesc
|
||||||
|
ch <- vtsUpstreamMaxFailsDesc
|
||||||
|
ch <- vtsUpstreamRequestDesc
|
||||||
|
ch <- vtsUpstreamResponseMsecDesc
|
||||||
|
ch <- vtsUpstreamResponsesDesc
|
||||||
|
ch <- vtsUpstreamWeightDesc
|
||||||
|
ch <- vtsFilterZoneBytesDesc
|
||||||
|
ch <- vtsFilterZoneCacheDesc
|
||||||
|
ch <- vtsFilterZoneResponseDesc
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,71 +299,88 @@ func (p *namedProcessCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *namedProcessCollector) start() {
|
func (p *namedProcessCollector) start() {
|
||||||
|
|
||||||
|
//glog.Warningf("OOO %v", p.configmap.Data)
|
||||||
|
|
||||||
for req := range p.scrapeChan {
|
for req := range p.scrapeChan {
|
||||||
ch := req.results
|
ch := req.results
|
||||||
p.scrape(ch)
|
p.scrapeNginxStatus(ch)
|
||||||
|
|
||||||
|
if p.vtsCollector {
|
||||||
|
p.scrapeVts(ch)
|
||||||
|
}
|
||||||
|
|
||||||
req.done <- struct{}{}
|
req.done <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
|
func (p *namedProcessCollector) scrapeNginxStatus(ch chan<- prometheus.Metric) {
|
||||||
|
s, err := getNginxStatus()
|
||||||
val := reflect.ValueOf(value).Elem()
|
|
||||||
|
|
||||||
for i := 0; i < val.NumField(); i++ {
|
|
||||||
tag := val.Type().Field(i).Tag
|
|
||||||
|
|
||||||
labels := append(labels, tag.Get("json"))
|
|
||||||
ch <- prometheus.MustNewConstMetric(desc,
|
|
||||||
prometheus.CounterValue, float64(val.Field(i).Interface().(float64)),
|
|
||||||
labels...)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) {
|
|
||||||
|
|
||||||
nginxMetrics, err := getNginxMetrics()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.scrapeProcs(ch)
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(activeDesc,
|
||||||
|
prometheus.GaugeValue, float64(s.Active))
|
||||||
|
ch <- prometheus.MustNewConstMetric(acceptedDesc,
|
||||||
|
prometheus.GaugeValue, float64(s.Accepted))
|
||||||
|
ch <- prometheus.MustNewConstMetric(handledDesc,
|
||||||
|
prometheus.GaugeValue, float64(s.Handled))
|
||||||
|
ch <- prometheus.MustNewConstMetric(requestsDesc,
|
||||||
|
prometheus.GaugeValue, float64(s.Requests))
|
||||||
|
ch <- prometheus.MustNewConstMetric(readingDesc,
|
||||||
|
prometheus.GaugeValue, float64(s.Reading))
|
||||||
|
ch <- prometheus.MustNewConstMetric(writingDesc,
|
||||||
|
prometheus.GaugeValue, float64(s.Writing))
|
||||||
|
ch <- prometheus.MustNewConstMetric(waitingDesc,
|
||||||
|
prometheus.GaugeValue, float64(s.Waiting))
|
||||||
|
|
||||||
reflectMetrics(&nginxMetrics.Connections, connectionsDesc, ch)
|
}
|
||||||
|
|
||||||
|
func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
|
nginxMetrics, err := getNginxVtsMetrics()
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
reflectMetrics(&nginxMetrics.Connections, vtsConnectionsDesc, ch)
|
||||||
|
|
||||||
for name, zones := range nginxMetrics.UpstreamZones {
|
for name, zones := range nginxMetrics.UpstreamZones {
|
||||||
|
|
||||||
for pos, value := range zones {
|
for pos, value := range zones {
|
||||||
|
|
||||||
reflectMetrics(&zones[pos].Responses, upstreamResponsesDesc, ch, name, value.Server)
|
reflectMetrics(&zones[pos].Responses, vtsUpstreamResponsesDesc, ch, name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamRequestDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamRequestDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].RequestCounter), name, value.Server)
|
prometheus.CounterValue, float64(zones[pos].RequestCounter), name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamDownDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamDownDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].Down), name, value.Server)
|
prometheus.CounterValue, float64(zones[pos].Down), name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamWeightDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamWeightDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].Weight), name, value.Server)
|
prometheus.CounterValue, float64(zones[pos].Weight), name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamResponseMsecDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamResponseMsecDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].ResponseMsec), name, value.Server)
|
prometheus.CounterValue, float64(zones[pos].ResponseMsec), name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamBackupDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamBackupDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server)
|
prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamFailTimeoutDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamFailTimeoutDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].FailTimeout), name, value.Server)
|
prometheus.CounterValue, float64(zones[pos].FailTimeout), name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamMaxFailsDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamMaxFailsDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].MaxFails), name, value.Server)
|
prometheus.CounterValue, float64(zones[pos].MaxFails), name, value.Server)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamBytesDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].InBytes), name, value.Server, "in")
|
prometheus.CounterValue, float64(zones[pos].InBytes), name, value.Server, "in")
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(upstreamBytesDesc,
|
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
|
||||||
prometheus.CounterValue, float64(zones[pos].OutBytes), name, value.Server, "out")
|
prometheus.CounterValue, float64(zones[pos].OutBytes), name, value.Server, "out")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -306,42 +388,44 @@ func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
for name, zone := range nginxMetrics.ServerZones {
|
for name, zone := range nginxMetrics.ServerZones {
|
||||||
|
|
||||||
reflectMetrics(&zone.Responses, responseDesc, ch, name)
|
reflectMetrics(&zone.Responses, vtsResponseDesc, ch, name)
|
||||||
|
reflectMetrics(&zone.Cache, vtsCacheDesc, ch, name)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(requestDesc,
|
ch <- prometheus.MustNewConstMetric(vtsRequestDesc,
|
||||||
prometheus.CounterValue, float64(zone.RequestCounter), name)
|
prometheus.CounterValue, float64(zone.RequestCounter), name)
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(bytesDesc,
|
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
|
||||||
prometheus.CounterValue, float64(zone.InBytes), name, "in")
|
prometheus.CounterValue, float64(zone.InBytes), name, "in")
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(bytesDesc,
|
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
|
||||||
prometheus.CounterValue, float64(zone.OutBytes), name, "out")
|
prometheus.CounterValue, float64(zone.OutBytes), name, "out")
|
||||||
|
|
||||||
//cache
|
|
||||||
ch <- prometheus.MustNewConstMetric(cacheDesc,
|
|
||||||
prometheus.CounterValue, float64(zone.Responses.CacheBypass), name, "bypass")
|
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(cacheDesc,
|
|
||||||
prometheus.CounterValue, float64(zone.Responses.CacheExpired), name, "expired")
|
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(cacheDesc,
|
|
||||||
prometheus.CounterValue, float64(zone.Responses.CacheHit), name, "hit")
|
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(cacheDesc,
|
|
||||||
prometheus.CounterValue, float64(zone.Responses.CacheRevalidated), name, "revalidated")
|
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(cacheDesc,
|
|
||||||
prometheus.CounterValue, float64(zone.Responses.CacheUpdating), name, "updating")
|
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(cacheDesc,
|
|
||||||
prometheus.CounterValue, float64(zone.Responses.CacheStale), name, "stale")
|
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(cacheDesc,
|
|
||||||
prometheus.CounterValue, float64(zone.Responses.CacheScarce), name, "scarce")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = p.Update(p.fs.AllProcs())
|
for serverZone, countries := range nginxMetrics.FilterZones {
|
||||||
|
|
||||||
|
for country, zone := range countries {
|
||||||
|
|
||||||
|
serverZone = strings.Replace(serverZone, "country::", "", 1)
|
||||||
|
|
||||||
|
reflectMetrics(&zone.Responses, vtsFilterZoneResponseDesc, ch, serverZone, country)
|
||||||
|
reflectMetrics(&zone.Cache, vtsFilterZoneCacheDesc, ch, serverZone, country)
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
|
||||||
|
prometheus.CounterValue, float64(zone.InBytes), serverZone, country, "in")
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
|
||||||
|
prometheus.CounterValue, float64(zone.OutBytes), serverZone, country, "out")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *namedProcessCollector) scrapeProcs(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
|
_, err := p.Update(p.fs.AllProcs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
||||||
return
|
return
|
||||||
|
@ -366,3 +450,18 @@ func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) {
|
||||||
prometheus.CounterValue, float64(gcounts.WriteBytes))
|
prometheus.CounterValue, float64(gcounts.WriteBytes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
|
||||||
|
|
||||||
|
val := reflect.ValueOf(value).Elem()
|
||||||
|
|
||||||
|
for i := 0; i < val.NumField(); i++ {
|
||||||
|
tag := val.Type().Field(i).Tag
|
||||||
|
|
||||||
|
labels := append(labels, tag.Get("json"))
|
||||||
|
ch <- prometheus.MustNewConstMetric(desc,
|
||||||
|
prometheus.CounterValue, float64(val.Field(i).Interface().(float64)),
|
||||||
|
labels...)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
1
controllers/nginx/pkg/cmd/controller/metrics_test.go
Normal file
1
controllers/nginx/pkg/cmd/controller/metrics_test.go
Normal file
|
@ -0,0 +1 @@
|
||||||
|
package main
|
|
@ -19,6 +19,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/golang/glog"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -50,45 +51,44 @@ type nginxStatus struct {
|
||||||
Waiting int
|
Waiting int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://github.com/vozlt/nginx-module-vts
|
||||||
type Vts struct {
|
type Vts struct {
|
||||||
NginxVersion string `json:"nginxVersion"`
|
NginxVersion string `json:"nginxVersion"`
|
||||||
LoadMsec int `json:"loadMsec"`
|
LoadMsec int `json:"loadMsec"`
|
||||||
NowMsec int `json:"nowMsec"`
|
NowMsec int `json:"nowMsec"`
|
||||||
Connections Connections `json:"connections"`
|
// Total connections and requests(same as stub_status_module in NGINX)
|
||||||
ServerZones map[string]ServerZones `json:"serverZones"`
|
Connections Connections `json:"connections"`
|
||||||
FilterZones map[string]FilterZone `json:"filterZones"`
|
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone
|
||||||
|
ServerZones map[string]ServerZone `json:"serverZones"`
|
||||||
|
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through
|
||||||
|
// the vhost_traffic_status_filter_by_set_key directive
|
||||||
|
FilterZones map[string]map[string]FilterZone `json:"filterZones"`
|
||||||
|
// Traffic(in/out) and request and response counts per server in each upstream group
|
||||||
UpstreamZones map[string][]UpstreamZone `json:"upstreamZones"`
|
UpstreamZones map[string][]UpstreamZone `json:"upstreamZones"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServerZones struct {
|
type ServerZone struct {
|
||||||
RequestCounter float64 `json:"requestCounter"`
|
RequestCounter float64 `json:"requestCounter"`
|
||||||
InBytes float64 `json:"inBytes"`
|
InBytes float64 `json:"inBytes"`
|
||||||
OutBytes float64 `json:"outBytes"`
|
OutBytes float64 `json:"outBytes"`
|
||||||
Responses Response `json:"responses"`
|
Responses Response `json:"responses"`
|
||||||
OverCounts OverCounts `json:"overCounts"`
|
Cache Cache `json:"responses"`
|
||||||
}
|
|
||||||
|
|
||||||
type OverCounts struct {
|
|
||||||
RequestCounter float64 `json:"requestCounter"`
|
|
||||||
InBytes float64 `json:"inBytes"`
|
|
||||||
OutBytes float64 `json:"outBytes"`
|
|
||||||
OneXx float64 `json:"1xx"`
|
|
||||||
TwoXx float64 `json:"2xx"`
|
|
||||||
TheeXx float64 `json:"3xx"`
|
|
||||||
FourXx float64 `json:"4xx"`
|
|
||||||
FiveXx float64 `json:"5xx"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type FilterZone struct {
|
type FilterZone struct {
|
||||||
|
RequestCounter float64 `json:"requestCounter"`
|
||||||
|
InBytes float64 `json:"inBytes"`
|
||||||
|
OutBytes float64 `json:"outBytes"`
|
||||||
|
Cache Cache `json:"responses"`
|
||||||
|
Responses Response `json:"responses"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpstreamZone struct {
|
type UpstreamZone struct {
|
||||||
|
Responses Response `json:"responses"`
|
||||||
Server string `json:"server"`
|
Server string `json:"server"`
|
||||||
RequestCounter float64 `json:"requestCounter"`
|
RequestCounter float64 `json:"requestCounter"`
|
||||||
InBytes float64 `json:"inBytes"`
|
InBytes float64 `json:"inBytes"`
|
||||||
OutBytes float64 `json:"outBytes"`
|
OutBytes float64 `json:"outBytes"`
|
||||||
Responses Response `json:"responses"`
|
|
||||||
OverCounts OverCounts `json:"overcounts"`
|
|
||||||
ResponseMsec float64 `json:"responseMsec"`
|
ResponseMsec float64 `json:"responseMsec"`
|
||||||
Weight float64 `json:"weight"`
|
Weight float64 `json:"weight"`
|
||||||
MaxFails float64 `json:"maxFails"`
|
MaxFails float64 `json:"maxFails"`
|
||||||
|
@ -97,20 +97,23 @@ type UpstreamZone struct {
|
||||||
Down BoolToFloat64 `json:"down"`
|
Down BoolToFloat64 `json:"down"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Cache struct {
|
||||||
|
Miss float64 `json:"miss"`
|
||||||
|
Bypass float64 `json:"bypass"`
|
||||||
|
Expired float64 `json:"expired"`
|
||||||
|
Stale float64 `json:"stale"`
|
||||||
|
Updating float64 `json:"updating"`
|
||||||
|
Revalidated float64 `json:"revalidated"`
|
||||||
|
Hit float64 `json:"hit"`
|
||||||
|
Scarce float64 `json:"scarce"`
|
||||||
|
}
|
||||||
|
|
||||||
type Response struct {
|
type Response struct {
|
||||||
OneXx float64 `json:"1xx"`
|
OneXx float64 `json:"1xx"`
|
||||||
TwoXx float64 `json:"2xx"`
|
TwoXx float64 `json:"2xx"`
|
||||||
TheeXx float64 `json:"3xx"`
|
TheeXx float64 `json:"3xx"`
|
||||||
FourXx float64 `json:"4xx"`
|
FourXx float64 `json:"4xx"`
|
||||||
FiveXx float64 `json:"5xx"`
|
FiveXx float64 `json:"5xx"`
|
||||||
CacheMiss float64 `json:"miss"`
|
|
||||||
CacheBypass float64 `json:"bypass"`
|
|
||||||
CacheExpired float64 `json:"expired"`
|
|
||||||
CacheStale float64 `json:"stale"`
|
|
||||||
CacheUpdating float64 `json:"updating"`
|
|
||||||
CacheRevalidated float64 `json:"revalidated"`
|
|
||||||
CacheHit float64 `json:"hit"`
|
|
||||||
CacheScarce float64 `json:"scarce"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Connections struct {
|
type Connections struct {
|
||||||
|
@ -138,7 +141,11 @@ func (bit BoolToFloat64) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNginxStatus() (*nginxStatus, error) {
|
func getNginxStatus() (*nginxStatus, error) {
|
||||||
data, err := httpBody(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath))
|
|
||||||
|
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)
|
||||||
|
glog.V(3).Infof("scrapping url: %v", url)
|
||||||
|
|
||||||
|
data, err := httpBody(url)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err)
|
return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err)
|
||||||
|
@ -166,9 +173,12 @@ func httpBody(url string) ([]byte, error) {
|
||||||
|
|
||||||
}
|
}
|
||||||
func getNginxVtsMetrics() (*Vts, error) {
|
func getNginxVtsMetrics() (*Vts, error) {
|
||||||
data, err := httpBody(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath))
|
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath)
|
||||||
|
glog.V(3).Infof("scrapping url: %v", url)
|
||||||
|
|
||||||
if err {
|
data, err := httpBody(url)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err)
|
return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,3 +68,21 @@ func TestToint(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
//
|
||||||
|
//func TestUnmarshalJSON (t *testing.T){
|
||||||
|
// tests := []struct{
|
||||||
|
// in []byte
|
||||||
|
// exp float64
|
||||||
|
// error error
|
||||||
|
// }{
|
||||||
|
// {in: "false",exp: 0},
|
||||||
|
// {"0", 0},
|
||||||
|
// {"true", 1},
|
||||||
|
// {"1", 1},
|
||||||
|
// {" errr", error},
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// for _,test := range tests
|
||||||
|
//
|
||||||
|
//
|
||||||
|
//}
|
|
@ -289,6 +289,7 @@ func NewDefault() Configuration {
|
||||||
UseProxyProtocol: false,
|
UseProxyProtocol: false,
|
||||||
UseGzip: true,
|
UseGzip: true,
|
||||||
WorkerProcesses: runtime.NumCPU(),
|
WorkerProcesses: runtime.NumCPU(),
|
||||||
|
EnableVtsStatus: false,
|
||||||
VtsStatusZoneSize: "10m",
|
VtsStatusZoneSize: "10m",
|
||||||
UseHTTP2: true,
|
UseHTTP2: true,
|
||||||
Backend: defaults.Backend{
|
Backend: defaults.Backend{
|
||||||
|
|
Loading…
Reference in a new issue