Cleanup collection of prometheus metrics
This commit is contained in:
parent
1d38e3a384
commit
7ba389c1d0
11 changed files with 643 additions and 480 deletions
|
@ -17,446 +17,37 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
common "github.com/ncabatoff/process-exporter"
|
||||
"github.com/ncabatoff/process-exporter/proc"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/ingress/controllers/nginx/pkg/metric/collector"
|
||||
)
|
||||
|
||||
// TODO add current namespace
|
||||
// TODO add ingress class
|
||||
var (
|
||||
// descriptions borrow from https://github.com/vozlt/nginx-module-vts
|
||||
|
||||
cpuSecsDesc = prometheus.NewDesc(
|
||||
"nginx_cpu_seconds_total",
|
||||
"Cpu usage in seconds",
|
||||
nil, nil)
|
||||
|
||||
numprocsDesc = prometheus.NewDesc(
|
||||
"nginx_num_procs",
|
||||
"number of processes",
|
||||
nil, nil)
|
||||
|
||||
memResidentbytesDesc = prometheus.NewDesc(
|
||||
"nginx_resident_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil)
|
||||
|
||||
memVirtualbytesDesc = prometheus.NewDesc(
|
||||
"nginx_virtual_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil)
|
||||
|
||||
readBytesDesc = prometheus.NewDesc(
|
||||
"nginx_read_bytes_total",
|
||||
"number of bytes read",
|
||||
nil, nil)
|
||||
|
||||
startTimeDesc = prometheus.NewDesc(
|
||||
"nginx_oldest_start_time_seconds",
|
||||
"start time in seconds since 1970/01/01",
|
||||
nil, nil)
|
||||
|
||||
writeBytesDesc = prometheus.NewDesc(
|
||||
"nginx_write_bytes_total",
|
||||
"number of bytes written",
|
||||
nil, nil)
|
||||
|
||||
//vts metrics
|
||||
vtsBytesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_bytes_total",
|
||||
"Nginx bytes count",
|
||||
[]string{"server_zone", "direction"}, nil)
|
||||
|
||||
vtsCacheDesc = prometheus.NewDesc(
|
||||
"nginx_vts_cache_total",
|
||||
"Nginx cache count",
|
||||
[]string{"server_zone", "type"}, nil)
|
||||
|
||||
vtsConnectionsDesc = prometheus.NewDesc(
|
||||
"nginx_vts_connections_total",
|
||||
"Nginx connections count",
|
||||
[]string{"type"}, nil)
|
||||
|
||||
vtsResponseDesc = prometheus.NewDesc(
|
||||
"nginx_vts_responses_total",
|
||||
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"server_zone", "status_code"}, nil)
|
||||
|
||||
vtsRequestDesc = prometheus.NewDesc(
|
||||
"nginx_vts_requests_total",
|
||||
"The total number of requested client connections.",
|
||||
[]string{"server_zone"}, nil)
|
||||
|
||||
vtsFilterZoneBytesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_filterzone_bytes_total",
|
||||
"Nginx bytes count",
|
||||
[]string{"server_zone", "country", "direction"}, nil)
|
||||
|
||||
vtsFilterZoneResponseDesc = prometheus.NewDesc(
|
||||
"nginx_vts_filterzone_responses_total",
|
||||
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"server_zone", "country", "status_code"}, nil)
|
||||
|
||||
vtsFilterZoneCacheDesc = prometheus.NewDesc(
|
||||
"nginx_vts_filterzone_cache_total",
|
||||
"Nginx cache count",
|
||||
[]string{"server_zone", "country", "type"}, nil)
|
||||
|
||||
vtsUpstreamBackupDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_backup",
|
||||
"Current backup setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamBytesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_bytes_total",
|
||||
"The total number of bytes sent to this server.",
|
||||
[]string{"upstream", "server", "direction"}, nil)
|
||||
|
||||
vtsUpstreamDownDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_down_total",
|
||||
"Current down setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamFailTimeoutDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_fail_timeout",
|
||||
"Current fail_timeout setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamMaxFailsDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_maxfails",
|
||||
"Current max_fails setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamResponsesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_responses_total",
|
||||
"The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"upstream", "server", "status_code"}, nil)
|
||||
|
||||
vtsUpstreamRequestDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_requests_total",
|
||||
"The total number of client connections forwarded to this server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamResponseMsecDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_response_msecs_avg",
|
||||
"The average of only upstream response processing times in milliseconds.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamWeightDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_weight",
|
||||
"Current upstream weight setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
activeDesc = prometheus.NewDesc(
|
||||
"nginx_active_connections",
|
||||
"total number of active connections",
|
||||
nil, nil)
|
||||
|
||||
acceptedDesc = prometheus.NewDesc(
|
||||
"nginx_accepted_connections",
|
||||
"total number of accepted client connections",
|
||||
nil, nil)
|
||||
|
||||
handledDesc = prometheus.NewDesc(
|
||||
"nginx_handled_connections",
|
||||
"total number of handled connections",
|
||||
nil, nil)
|
||||
|
||||
requestsDesc = prometheus.NewDesc(
|
||||
"nginx_total_requests",
|
||||
"total number of client requests",
|
||||
nil, nil)
|
||||
|
||||
readingDesc = prometheus.NewDesc(
|
||||
"nginx_current_reading_connections",
|
||||
"current number of connections where nginx is reading the request header",
|
||||
nil, nil)
|
||||
|
||||
writingDesc = prometheus.NewDesc(
|
||||
"nginx_current_writing_connections",
|
||||
"current number of connections where nginx is writing the response back to the client",
|
||||
nil, nil)
|
||||
|
||||
waitingDesc = prometheus.NewDesc(
|
||||
"nginx_current_waiting_connections",
|
||||
"current number of idle client connections waiting for a request",
|
||||
nil, nil)
|
||||
)
|
||||
|
||||
type exeMatcher struct {
|
||||
name string
|
||||
args []string
|
||||
func (n *NGINXController) setupMonitor(sm statusModule) {
|
||||
csm := n.statusModule
|
||||
if csm != sm {
|
||||
prometheus
|
||||
n.statusModule = sm
|
||||
}
|
||||
}
|
||||
|
||||
func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
||||
if len(nacl.Cmdline) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
cmd := filepath.Base(nacl.Cmdline[0])
|
||||
return em.name == cmd, ""
|
||||
type statsCollector struct {
|
||||
process prometheus.Collector
|
||||
basic prometheus.Collector
|
||||
vts prometheus.Collector
|
||||
}
|
||||
|
||||
func (n *NGINXController) setupMonitor(args []string, vtsCollector bool) {
|
||||
|
||||
pc, err := newProcessCollector(true, exeMatcher{"nginx", args}, vtsCollector)
|
||||
|
||||
func newStatsCollector() (*statsCollector, error) {
|
||||
pc, err := collector.NewNamedProcess(true, collector.BinaryNameMatcher{"nginx", n.cmdArgs})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = prometheus.Register(pc)
|
||||
if err != nil {
|
||||
glog.Fatalf("unexpected error registering nginx collector: %v", err)
|
||||
}
|
||||
|
||||
err = prometheus.Register(pc)
|
||||
|
||||
if err != nil {
|
||||
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
|
||||
glog.Warningf("unexpected error registering nginx collector: %v", err)
|
||||
return nil, &statsCollector{
|
||||
process: pc,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type (
|
||||
scrapeRequest struct {
|
||||
results chan<- prometheus.Metric
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
namedProcessCollector struct {
|
||||
scrapeChan chan scrapeRequest
|
||||
*proc.Grouper
|
||||
fs *proc.FS
|
||||
enableVtsCollector bool
|
||||
}
|
||||
)
|
||||
|
||||
func newProcessCollector(
|
||||
children bool,
|
||||
n common.MatchNamer,
|
||||
enableVtsCollector bool) (*namedProcessCollector, error) {
|
||||
|
||||
fs, err := proc.NewFS("/proc")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &namedProcessCollector{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
Grouper: proc.NewGrouper(children, n),
|
||||
fs: fs,
|
||||
enableVtsCollector: enableVtsCollector,
|
||||
}
|
||||
_, err = p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
|
||||
ch <- cpuSecsDesc
|
||||
ch <- numprocsDesc
|
||||
ch <- readBytesDesc
|
||||
ch <- writeBytesDesc
|
||||
ch <- memResidentbytesDesc
|
||||
ch <- memVirtualbytesDesc
|
||||
ch <- startTimeDesc
|
||||
|
||||
//vts metrics
|
||||
ch <- vtsBytesDesc
|
||||
ch <- vtsCacheDesc
|
||||
ch <- vtsConnectionsDesc
|
||||
ch <- vtsRequestDesc
|
||||
ch <- vtsResponseDesc
|
||||
ch <- vtsUpstreamBackupDesc
|
||||
ch <- vtsUpstreamBytesDesc
|
||||
ch <- vtsUpstreamDownDesc
|
||||
ch <- vtsUpstreamFailTimeoutDesc
|
||||
ch <- vtsUpstreamMaxFailsDesc
|
||||
ch <- vtsUpstreamRequestDesc
|
||||
ch <- vtsUpstreamResponseMsecDesc
|
||||
ch <- vtsUpstreamResponsesDesc
|
||||
ch <- vtsUpstreamWeightDesc
|
||||
ch <- vtsFilterZoneBytesDesc
|
||||
ch <- vtsFilterZoneCacheDesc
|
||||
ch <- vtsFilterZoneResponseDesc
|
||||
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p *namedProcessCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p *namedProcessCollector) start() {
|
||||
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrapeNginxStatus(ch)
|
||||
p.scrapeProcs(ch)
|
||||
p.scrapeVts(ch)
|
||||
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// scrapeNginxStatus scrap the nginx status
|
||||
func (p *namedProcessCollector) scrapeNginxStatus(ch chan<- prometheus.Metric) {
|
||||
s, err := getNginxStatus()
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(activeDesc,
|
||||
prometheus.GaugeValue, float64(s.Active))
|
||||
ch <- prometheus.MustNewConstMetric(acceptedDesc,
|
||||
prometheus.GaugeValue, float64(s.Accepted))
|
||||
ch <- prometheus.MustNewConstMetric(handledDesc,
|
||||
prometheus.GaugeValue, float64(s.Handled))
|
||||
ch <- prometheus.MustNewConstMetric(requestsDesc,
|
||||
prometheus.GaugeValue, float64(s.Requests))
|
||||
ch <- prometheus.MustNewConstMetric(readingDesc,
|
||||
prometheus.GaugeValue, float64(s.Reading))
|
||||
ch <- prometheus.MustNewConstMetric(writingDesc,
|
||||
prometheus.GaugeValue, float64(s.Writing))
|
||||
ch <- prometheus.MustNewConstMetric(waitingDesc,
|
||||
prometheus.GaugeValue, float64(s.Waiting))
|
||||
|
||||
}
|
||||
|
||||
// scrapeVts scrape nginx vts metrics
|
||||
func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
||||
|
||||
nginxMetrics, err := getNginxVtsMetrics()
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
reflectMetrics(&nginxMetrics.Connections, vtsConnectionsDesc, ch)
|
||||
|
||||
for name, zones := range nginxMetrics.UpstreamZones {
|
||||
|
||||
for pos, value := range zones {
|
||||
|
||||
reflectMetrics(&zones[pos].Responses, vtsUpstreamResponsesDesc, ch, name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamRequestDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].RequestCounter), name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamDownDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].Down), name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamWeightDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].Weight), name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamResponseMsecDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].ResponseMsec), name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamBackupDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamFailTimeoutDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].FailTimeout), name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamMaxFailsDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].MaxFails), name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].InBytes), name, value.Server, "in")
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].OutBytes), name, value.Server, "out")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
for name, zone := range nginxMetrics.ServerZones {
|
||||
|
||||
reflectMetrics(&zone.Responses, vtsResponseDesc, ch, name)
|
||||
reflectMetrics(&zone.Cache, vtsCacheDesc, ch, name)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsRequestDesc,
|
||||
prometheus.CounterValue, float64(zone.RequestCounter), name)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.InBytes), name, "in")
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.OutBytes), name, "out")
|
||||
|
||||
}
|
||||
|
||||
for serverZone, countries := range nginxMetrics.FilterZones {
|
||||
|
||||
for country, zone := range countries {
|
||||
|
||||
reflectMetrics(&zone.Responses, vtsFilterZoneResponseDesc, ch, serverZone, country)
|
||||
reflectMetrics(&zone.Cache, vtsFilterZoneCacheDesc, ch, serverZone, country)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.InBytes), serverZone, country, "in")
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.OutBytes), serverZone, country, "out")
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *namedProcessCollector) scrapeProcs(ch chan<- prometheus.Metric) {
|
||||
|
||||
_, err := p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for gname, gcounts := range p.Groups() {
|
||||
glog.Infof("%v", gname)
|
||||
glog.Infof("%v", gcounts)
|
||||
ch <- prometheus.MustNewConstMetric(numprocsDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Procs))
|
||||
ch <- prometheus.MustNewConstMetric(memResidentbytesDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Memresident))
|
||||
ch <- prometheus.MustNewConstMetric(memVirtualbytesDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Memvirtual))
|
||||
ch <- prometheus.MustNewConstMetric(startTimeDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()))
|
||||
ch <- prometheus.MustNewConstMetric(cpuSecsDesc,
|
||||
prometheus.CounterValue, gcounts.Cpu)
|
||||
ch <- prometheus.MustNewConstMetric(readBytesDesc,
|
||||
prometheus.CounterValue, float64(gcounts.ReadBytes))
|
||||
ch <- prometheus.MustNewConstMetric(writeBytesDesc,
|
||||
prometheus.CounterValue, float64(gcounts.WriteBytes))
|
||||
}
|
||||
}
|
||||
|
||||
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
|
||||
|
||||
val := reflect.ValueOf(value).Elem()
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
tag := val.Type().Field(i).Tag
|
||||
|
||||
labels := append(labels, tag.Get("json"))
|
||||
ch <- prometheus.MustNewConstMetric(desc,
|
||||
prometheus.CounterValue, float64(val.Field(i).Interface().(float64)),
|
||||
labels...)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -33,20 +33,26 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
"strings"
|
||||
|
||||
"k8s.io/ingress/controllers/nginx/pkg/config"
|
||||
ngx_template "k8s.io/ingress/controllers/nginx/pkg/template"
|
||||
"k8s.io/ingress/controllers/nginx/pkg/version"
|
||||
"k8s.io/ingress/core/pkg/ingress"
|
||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||
"k8s.io/ingress/core/pkg/net/ssl"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type statusModule string
|
||||
|
||||
const (
|
||||
ngxHealthPort = 18080
|
||||
ngxHealthPath = "/healthz"
|
||||
ngxStatusPath = "/internal_nginx_status"
|
||||
ngxVtsPath = "/nginx_status/format/json"
|
||||
|
||||
defaultStatusModule statusModule = "default"
|
||||
vtsStatusModule statusModule = "vts"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -108,6 +114,10 @@ type NGINXController struct {
|
|||
storeLister ingress.StoreLister
|
||||
|
||||
binary string
|
||||
|
||||
cmdArgs []string
|
||||
|
||||
statusModule statusModule
|
||||
}
|
||||
|
||||
// Start start a new NGINX master process running in foreground.
|
||||
|
@ -157,8 +167,17 @@ func (n *NGINXController) start(cmd *exec.Cmd, done chan error) {
|
|||
done <- err
|
||||
return
|
||||
}
|
||||
|
||||
n.cmdArgs = cmd.Args
|
||||
|
||||
cfg := ngx_template.ReadConfig(n.configmap.Data)
|
||||
n.setupMonitor(cmd.Args, cfg.EnableVtsStatus)
|
||||
n.statusModule = defaultStatusModule
|
||||
if cfg.EnableVtsStatus {
|
||||
n.statusModule = vtsStatusModule
|
||||
n.setupMonitor(vtsStatusModule)
|
||||
} else {
|
||||
n.setupMonitor(defaultStatusModule)
|
||||
}
|
||||
|
||||
go func() {
|
||||
done <- cmd.Wait()
|
||||
|
@ -315,7 +334,9 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
|
|||
}
|
||||
|
||||
cfg := ngx_template.ReadConfig(n.configmap.Data)
|
||||
n.setupMonitor([]string{""}, cfg.EnableVtsStatus)
|
||||
|
||||
// we need to check if the status module configuration changed
|
||||
n.setupMonitor()
|
||||
|
||||
// NGINX cannot resize the has tables used to store server names.
|
||||
// For this reason we check if the defined size defined is correct
|
||||
|
|
|
@ -259,8 +259,6 @@ type Configuration struct {
|
|||
func NewDefault() Configuration {
|
||||
cfg := Configuration{
|
||||
ClientHeaderBufferSize: "1k",
|
||||
DisableAccessLog: false,
|
||||
DisableIpv6: false,
|
||||
EnableDynamicTLSRecords: true,
|
||||
ErrorLogLevel: errorLevel,
|
||||
HTTP2MaxFieldSize: "4k",
|
||||
|
@ -286,10 +284,8 @@ func NewDefault() Configuration {
|
|||
SSLSessionCacheSize: sslSessionCacheSize,
|
||||
SSLSessionTickets: true,
|
||||
SSLSessionTimeout: sslSessionTimeout,
|
||||
UseProxyProtocol: false,
|
||||
UseGzip: true,
|
||||
WorkerProcesses: runtime.NumCPU(),
|
||||
EnableVtsStatus: false,
|
||||
VtsStatusZoneSize: "10m",
|
||||
UseHTTP2: true,
|
||||
Backend: defaults.Backend{
|
||||
|
@ -301,11 +297,9 @@ func NewDefault() Configuration {
|
|||
ProxyCookieDomain: "off",
|
||||
ProxyCookiePath: "off",
|
||||
SSLRedirect: true,
|
||||
ForceSSLRedirect: false,
|
||||
CustomHTTPErrors: []int{},
|
||||
WhitelistSourceRange: []string{},
|
||||
SkipAccessLogURLs: []string{},
|
||||
UsePortInRedirects: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
130
controllers/nginx/pkg/metric/collector/nginx.go
Normal file
130
controllers/nginx/pkg/metric/collector/nginx.go
Normal file
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
activeDesc = prometheus.NewDesc(
|
||||
"nginx_active_connections",
|
||||
"total number of active connections",
|
||||
nil, nil)
|
||||
|
||||
acceptedDesc = prometheus.NewDesc(
|
||||
"nginx_accepted_connections",
|
||||
"total number of accepted client connections",
|
||||
nil, nil)
|
||||
|
||||
handledDesc = prometheus.NewDesc(
|
||||
"nginx_handled_connections",
|
||||
"total number of handled connections",
|
||||
nil, nil)
|
||||
|
||||
requestsDesc = prometheus.NewDesc(
|
||||
"nginx_total_requests",
|
||||
"total number of client requests",
|
||||
nil, nil)
|
||||
|
||||
readingDesc = prometheus.NewDesc(
|
||||
"nginx_current_reading_connections",
|
||||
"current number of connections where nginx is reading the request header",
|
||||
nil, nil)
|
||||
|
||||
writingDesc = prometheus.NewDesc(
|
||||
"nginx_current_writing_connections",
|
||||
"current number of connections where nginx is writing the response back to the client",
|
||||
nil, nil)
|
||||
|
||||
waitingDesc = prometheus.NewDesc(
|
||||
"nginx_current_waiting_connections",
|
||||
"current number of idle client connections waiting for a request",
|
||||
nil, nil)
|
||||
)
|
||||
|
||||
type (
|
||||
nginxStatusCollector struct {
|
||||
scrapeChan chan scrapeRequest
|
||||
}
|
||||
)
|
||||
|
||||
func NewNginxStatus() (prometheus.Collector, error) {
|
||||
p := nginxStatusCollector{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p nginxStatusCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- activeDesc
|
||||
ch <- acceptedDesc
|
||||
ch <- handledDesc
|
||||
ch <- requestsDesc
|
||||
ch <- readingDesc
|
||||
ch <- writingDesc
|
||||
ch <- waitingDesc
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p nginxStatusCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p nginxStatusCollector) start() {
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrape(ch)
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (p nginxStatusCollector) Stop() {
|
||||
close(p.scrapeChan)
|
||||
}
|
||||
|
||||
// nginxStatusCollector scrap the nginx status
|
||||
func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) {
|
||||
s, err := getNginxStatus()
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(activeDesc,
|
||||
prometheus.GaugeValue, float64(s.Active))
|
||||
ch <- prometheus.MustNewConstMetric(acceptedDesc,
|
||||
prometheus.GaugeValue, float64(s.Accepted))
|
||||
ch <- prometheus.MustNewConstMetric(handledDesc,
|
||||
prometheus.GaugeValue, float64(s.Handled))
|
||||
ch <- prometheus.MustNewConstMetric(requestsDesc,
|
||||
prometheus.GaugeValue, float64(s.Requests))
|
||||
ch <- prometheus.MustNewConstMetric(readingDesc,
|
||||
prometheus.GaugeValue, float64(s.Reading))
|
||||
ch <- prometheus.MustNewConstMetric(writingDesc,
|
||||
prometheus.GaugeValue, float64(s.Writing))
|
||||
ch <- prometheus.MustNewConstMetric(waitingDesc,
|
||||
prometheus.GaugeValue, float64(s.Waiting))
|
||||
|
||||
}
|
157
controllers/nginx/pkg/metric/collector/process.go
Normal file
157
controllers/nginx/pkg/metric/collector/process.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
common "github.com/ncabatoff/process-exporter"
|
||||
"github.com/ncabatoff/process-exporter/proc"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type BinaryNameMatcher struct {
|
||||
name string
|
||||
args []string
|
||||
}
|
||||
|
||||
func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
||||
if len(nacl.Cmdline) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
cmd := filepath.Base(nacl.Cmdline[0])
|
||||
return em.name == cmd, ""
|
||||
}
|
||||
|
||||
var (
|
||||
numprocsDesc = prometheus.NewDesc(
|
||||
"nginx_num_procs",
|
||||
"number of processes",
|
||||
nil, nil)
|
||||
|
||||
cpuSecsDesc = prometheus.NewDesc(
|
||||
"nginx_cpu_seconds_total",
|
||||
"Cpu usage in seconds",
|
||||
nil, nil)
|
||||
|
||||
readBytesDesc = prometheus.NewDesc(
|
||||
"nginx_read_bytes_total",
|
||||
"number of bytes read",
|
||||
nil, nil)
|
||||
|
||||
writeBytesDesc = prometheus.NewDesc(
|
||||
"nginx_write_bytes_total",
|
||||
"number of bytes written",
|
||||
nil, nil)
|
||||
|
||||
memResidentbytesDesc = prometheus.NewDesc(
|
||||
"nginx_resident_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil)
|
||||
|
||||
memVirtualbytesDesc = prometheus.NewDesc(
|
||||
"nginx_virtual_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil)
|
||||
|
||||
startTimeDesc = prometheus.NewDesc(
|
||||
"nginx_oldest_start_time_seconds",
|
||||
"start time in seconds since 1970/01/01",
|
||||
nil, nil)
|
||||
)
|
||||
|
||||
type namedProcess struct {
|
||||
scrapeChan chan scrapeRequest
|
||||
*proc.Grouper
|
||||
fs *proc.FS
|
||||
}
|
||||
|
||||
func NewNamedProcessCollector(children bool, mn common.MatchNamer) (prometheus.Collector, error) {
|
||||
fs, err := proc.NewFS("/proc")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := namedProcess{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
Grouper: proc.NewGrouper(children, mn),
|
||||
fs: fs,
|
||||
}
|
||||
_, err = p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p namedProcess) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- cpuSecsDesc
|
||||
ch <- numprocsDesc
|
||||
ch <- readBytesDesc
|
||||
ch <- writeBytesDesc
|
||||
ch <- memResidentbytesDesc
|
||||
ch <- memVirtualbytesDesc
|
||||
ch <- startTimeDesc
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p namedProcess) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p namedProcess) start() {
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrape(ch)
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (p namedProcess) Stop() {
|
||||
close(p.scrapeChan)
|
||||
}
|
||||
|
||||
func (p namedProcess) scrape(ch chan<- prometheus.Metric) {
|
||||
_, err := p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for gname, gcounts := range p.Groups() {
|
||||
ch <- prometheus.MustNewConstMetric(numprocsDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Procs))
|
||||
ch <- prometheus.MustNewConstMetric(memResidentbytesDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Memresident))
|
||||
ch <- prometheus.MustNewConstMetric(memVirtualbytesDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Memvirtual))
|
||||
ch <- prometheus.MustNewConstMetric(startTimeDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()))
|
||||
ch <- prometheus.MustNewConstMetric(cpuSecsDesc,
|
||||
prometheus.CounterValue, gcounts.Cpu)
|
||||
ch <- prometheus.MustNewConstMetric(readBytesDesc,
|
||||
prometheus.CounterValue, float64(gcounts.ReadBytes))
|
||||
ch <- prometheus.MustNewConstMetric(writeBytesDesc,
|
||||
prometheus.CounterValue, float64(gcounts.WriteBytes))
|
||||
}
|
||||
}
|
24
controllers/nginx/pkg/metric/collector/scrape.go
Normal file
24
controllers/nginx/pkg/metric/collector/scrape.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
type scrapeRequest struct {
|
||||
results chan<- prometheus.Metric
|
||||
done chan struct{}
|
||||
}
|
|
@ -14,16 +14,17 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package collector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -34,7 +35,7 @@ var (
|
|||
waiting = regexp.MustCompile(`Waiting: (\d+)`)
|
||||
)
|
||||
|
||||
type nginxStatus struct {
|
||||
type basicStatus struct {
|
||||
// Active total number of active connections
|
||||
Active int
|
||||
// Accepted total number of accepted client connections
|
||||
|
@ -52,39 +53,39 @@ type nginxStatus struct {
|
|||
}
|
||||
|
||||
// https://github.com/vozlt/nginx-module-vts
|
||||
type Vts struct {
|
||||
type vts struct {
|
||||
NginxVersion string `json:"nginxVersion"`
|
||||
LoadMsec int `json:"loadMsec"`
|
||||
NowMsec int `json:"nowMsec"`
|
||||
// Total connections and requests(same as stub_status_module in NGINX)
|
||||
Connections Connections `json:"connections"`
|
||||
Connections connections `json:"connections"`
|
||||
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone
|
||||
ServerZones map[string]ServerZone `json:"serverZones"`
|
||||
ServerZones map[string]serverZone `json:"serverZones"`
|
||||
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through
|
||||
// the vhost_traffic_status_filter_by_set_key directive
|
||||
FilterZones map[string]map[string]FilterZone `json:"filterZones"`
|
||||
FilterZones map[string]map[string]filterZone `json:"filterZones"`
|
||||
// Traffic(in/out) and request and response counts per server in each upstream group
|
||||
UpstreamZones map[string][]UpstreamZone `json:"upstreamZones"`
|
||||
UpstreamZones map[string][]upstreamZone `json:"upstreamZones"`
|
||||
}
|
||||
|
||||
type ServerZone struct {
|
||||
type serverZone struct {
|
||||
RequestCounter float64 `json:"requestCounter"`
|
||||
InBytes float64 `json:"inBytes"`
|
||||
OutBytes float64 `json:"outBytes"`
|
||||
Responses Response `json:"responses"`
|
||||
Cache Cache `json:"responses"`
|
||||
Responses response `json:"responses"`
|
||||
Cache cache `json:"responses"`
|
||||
}
|
||||
|
||||
type FilterZone struct {
|
||||
type filterZone struct {
|
||||
RequestCounter float64 `json:"requestCounter"`
|
||||
InBytes float64 `json:"inBytes"`
|
||||
OutBytes float64 `json:"outBytes"`
|
||||
Cache Cache `json:"responses"`
|
||||
Responses Response `json:"responses"`
|
||||
Cache cache `json:"responses"`
|
||||
Responses response `json:"responses"`
|
||||
}
|
||||
|
||||
type UpstreamZone struct {
|
||||
Responses Response `json:"responses"`
|
||||
type upstreamZone struct {
|
||||
Responses response `json:"responses"`
|
||||
Server string `json:"server"`
|
||||
RequestCounter float64 `json:"requestCounter"`
|
||||
InBytes float64 `json:"inBytes"`
|
||||
|
@ -97,7 +98,7 @@ type UpstreamZone struct {
|
|||
Down BoolToFloat64 `json:"down"`
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
type cache struct {
|
||||
Miss float64 `json:"miss"`
|
||||
Bypass float64 `json:"bypass"`
|
||||
Expired float64 `json:"expired"`
|
||||
|
@ -108,7 +109,7 @@ type Cache struct {
|
|||
Scarce float64 `json:"scarce"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
type response struct {
|
||||
OneXx float64 `json:"1xx"`
|
||||
TwoXx float64 `json:"2xx"`
|
||||
TheeXx float64 `json:"3xx"`
|
||||
|
@ -116,7 +117,7 @@ type Response struct {
|
|||
FiveXx float64 `json:"5xx"`
|
||||
}
|
||||
|
||||
type Connections struct {
|
||||
type connections struct {
|
||||
Active float64 `json:"active"`
|
||||
Reading float64 `json:"reading"`
|
||||
Writing float64 `json:"writing"`
|
||||
|
@ -140,8 +141,7 @@ func (bit BoolToFloat64) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getNginxStatus() (*nginxStatus, error) {
|
||||
|
||||
func getNginxStatus() (*basicStatus, error) {
|
||||
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)
|
||||
glog.V(3).Infof("start scrapping url: %v", url)
|
||||
|
||||
|
@ -170,10 +170,9 @@ func httpBody(url string) ([]byte, error) {
|
|||
}
|
||||
|
||||
return data, nil
|
||||
|
||||
}
|
||||
|
||||
func getNginxVtsMetrics() (*Vts, error) {
|
||||
func getNginxVtsMetrics() (*vts, error) {
|
||||
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath)
|
||||
glog.V(3).Infof("start scrapping url: %v", url)
|
||||
|
||||
|
@ -183,25 +182,23 @@ func getNginxVtsMetrics() (*Vts, error) {
|
|||
return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err)
|
||||
}
|
||||
|
||||
var vts Vts
|
||||
var vts *vts
|
||||
err = json.Unmarshal(data, &vts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("scrap returned : %v", vts)
|
||||
|
||||
return &vts, nil
|
||||
return vts, nil
|
||||
}
|
||||
|
||||
func parse(data string) *nginxStatus {
|
||||
func parse(data string) *basicStatus {
|
||||
acr := ac.FindStringSubmatch(data)
|
||||
sahrr := sahr.FindStringSubmatch(data)
|
||||
readingr := reading.FindStringSubmatch(data)
|
||||
writingr := writing.FindStringSubmatch(data)
|
||||
waitingr := waiting.FindStringSubmatch(data)
|
||||
|
||||
return &nginxStatus{
|
||||
return &basicStatus{
|
||||
toInt(acr, 1),
|
||||
toInt(sahrr, 1),
|
||||
toInt(sahrr, 2),
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
237
controllers/nginx/pkg/metric/collector/vts.go
Normal file
237
controllers/nginx/pkg/metric/collector/vts.go
Normal file
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
vtsBytesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_bytes_total",
|
||||
"Nginx bytes count",
|
||||
[]string{"server_zone", "direction"}, nil)
|
||||
|
||||
vtsCacheDesc = prometheus.NewDesc(
|
||||
"nginx_vts_cache_total",
|
||||
"Nginx cache count",
|
||||
[]string{"server_zone", "type"}, nil)
|
||||
|
||||
vtsConnectionsDesc = prometheus.NewDesc(
|
||||
"nginx_vts_connections_total",
|
||||
"Nginx connections count",
|
||||
[]string{"type"}, nil)
|
||||
|
||||
vtsResponseDesc = prometheus.NewDesc(
|
||||
"nginx_vts_responses_total",
|
||||
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"server_zone", "status_code"}, nil)
|
||||
|
||||
vtsRequestDesc = prometheus.NewDesc(
|
||||
"nginx_vts_requests_total",
|
||||
"The total number of requested client connections.",
|
||||
[]string{"server_zone"}, nil)
|
||||
|
||||
vtsFilterZoneBytesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_filterzone_bytes_total",
|
||||
"Nginx bytes count",
|
||||
[]string{"server_zone", "country", "direction"}, nil)
|
||||
|
||||
vtsFilterZoneResponseDesc = prometheus.NewDesc(
|
||||
"nginx_vts_filterzone_responses_total",
|
||||
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"server_zone", "country", "status_code"}, nil)
|
||||
|
||||
vtsFilterZoneCacheDesc = prometheus.NewDesc(
|
||||
"nginx_vts_filterzone_cache_total",
|
||||
"Nginx cache count",
|
||||
[]string{"server_zone", "country", "type"}, nil)
|
||||
|
||||
vtsUpstreamBackupDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_backup",
|
||||
"Current backup setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamBytesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_bytes_total",
|
||||
"The total number of bytes sent to this server.",
|
||||
[]string{"upstream", "server", "direction"}, nil)
|
||||
|
||||
vtsUpstreamDownDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_down_total",
|
||||
"Current down setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamFailTimeoutDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_fail_timeout",
|
||||
"Current fail_timeout setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamMaxFailsDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_maxfails",
|
||||
"Current max_fails setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamResponsesDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_responses_total",
|
||||
"The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"upstream", "server", "status_code"}, nil)
|
||||
|
||||
vtsUpstreamRequestDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_requests_total",
|
||||
"The total number of client connections forwarded to this server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamResponseMsecDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_response_msecs_avg",
|
||||
"The average of only upstream response processing times in milliseconds.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
|
||||
vtsUpstreamWeightDesc = prometheus.NewDesc(
|
||||
"nginx_vts_upstream_weight",
|
||||
"Current upstream weight setting of the server.",
|
||||
[]string{"upstream", "server"}, nil)
|
||||
)
|
||||
|
||||
type vtsCollector struct {
|
||||
scrapeChan chan scrapeRequest
|
||||
}
|
||||
|
||||
func NewNGINXVTSCollector() (prometheus.Collector, error) {
|
||||
p := vtsCollector{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p *vtsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- vtsBytesDesc
|
||||
ch <- vtsCacheDesc
|
||||
ch <- vtsConnectionsDesc
|
||||
ch <- vtsRequestDesc
|
||||
ch <- vtsResponseDesc
|
||||
ch <- vtsUpstreamBackupDesc
|
||||
ch <- vtsUpstreamBytesDesc
|
||||
ch <- vtsUpstreamDownDesc
|
||||
ch <- vtsUpstreamFailTimeoutDesc
|
||||
ch <- vtsUpstreamMaxFailsDesc
|
||||
ch <- vtsUpstreamRequestDesc
|
||||
ch <- vtsUpstreamResponseMsecDesc
|
||||
ch <- vtsUpstreamResponsesDesc
|
||||
ch <- vtsUpstreamWeightDesc
|
||||
ch <- vtsFilterZoneBytesDesc
|
||||
ch <- vtsFilterZoneCacheDesc
|
||||
ch <- vtsFilterZoneResponseDesc
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p *vtsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p *vtsCollector) start() {
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrapeVts(ch)
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *vtsCollector) Stop() {
|
||||
close(p.scrapeChan)
|
||||
}
|
||||
|
||||
// scrapeVts scrape nginx vts metrics
|
||||
func (p *vtsCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
||||
nginxMetrics, err := getNginxVtsMetrics()
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
reflectMetrics(&nginxMetrics.Connections, vtsConnectionsDesc, ch)
|
||||
|
||||
for name, zones := range nginxMetrics.UpstreamZones {
|
||||
for pos, value := range zones {
|
||||
reflectMetrics(&zones[pos].Responses, vtsUpstreamResponsesDesc, ch, name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamRequestDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].RequestCounter), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamDownDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].Down), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamWeightDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].Weight), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamResponseMsecDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].ResponseMsec), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamBackupDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamFailTimeoutDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].FailTimeout), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamMaxFailsDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].MaxFails), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].InBytes), name, value.Server, "in")
|
||||
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
|
||||
prometheus.CounterValue, float64(zones[pos].OutBytes), name, value.Server, "out")
|
||||
}
|
||||
}
|
||||
|
||||
for name, zone := range nginxMetrics.ServerZones {
|
||||
reflectMetrics(&zone.Responses, vtsResponseDesc, ch, name)
|
||||
reflectMetrics(&zone.Cache, vtsCacheDesc, ch, name)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsRequestDesc,
|
||||
prometheus.CounterValue, float64(zone.RequestCounter), name)
|
||||
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.InBytes), name, "in")
|
||||
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.OutBytes), name, "out")
|
||||
}
|
||||
|
||||
for serverZone, countries := range nginxMetrics.FilterZones {
|
||||
for country, zone := range countries {
|
||||
reflectMetrics(&zone.Responses, vtsFilterZoneResponseDesc, ch, serverZone, country)
|
||||
reflectMetrics(&zone.Cache, vtsFilterZoneCacheDesc, ch, serverZone, country)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.InBytes), serverZone, country, "in")
|
||||
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
|
||||
prometheus.CounterValue, float64(zone.OutBytes), serverZone, country, "out")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
|
||||
val := reflect.ValueOf(value).Elem()
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
tag := val.Type().Field(i).Tag
|
||||
labels := append(labels, tag.Get("json"))
|
||||
ch <- prometheus.MustNewConstMetric(desc,
|
||||
prometheus.CounterValue, float64(val.Field(i).Interface().(float64)),
|
||||
labels...)
|
||||
}
|
||||
}
|
|
@ -61,6 +61,9 @@ http {
|
|||
client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }};
|
||||
large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }};
|
||||
|
||||
http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }};
|
||||
http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }};
|
||||
|
||||
types_hash_max_size 2048;
|
||||
server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }};
|
||||
server_names_hash_bucket_size {{ $cfg.ServerNameHashBucketSize }};
|
||||
|
@ -79,7 +82,7 @@ http {
|
|||
|
||||
server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }};
|
||||
|
||||
log_format upstreaminfo {{ buildLogFormatUpstream $cfg }};
|
||||
log_format upstreaminfo '{{ buildLogFormatUpstream $cfg }}';
|
||||
|
||||
{{/* map urls that should not appear in access.log */}}
|
||||
{{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}}
|
||||
|
@ -207,10 +210,10 @@ http {
|
|||
{{ range $index, $server := .Servers }}
|
||||
server {
|
||||
server_name {{ $server.Hostname }};
|
||||
listen [::]:80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $index 0 }} ipv6only=off{{end}}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}};
|
||||
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}};
|
||||
{{/* Listen on 442 because port 443 is used in the stream section */}}
|
||||
{{/* This listen cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}}
|
||||
{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}[::]:443 {{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
||||
{{/* This listen on port 442 cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}}
|
||||
{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}{{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if $cfg.UseProxyProtocol }} proxy_protocol {{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
||||
{{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}}
|
||||
# PEM sha: {{ $server.SSLPemChecksum }}
|
||||
ssl_certificate {{ $server.SSLCertificate }};
|
||||
|
@ -243,6 +246,8 @@ http {
|
|||
{{ end }}
|
||||
{{ if not (empty $location.ExternalAuth.Method) }}
|
||||
proxy_method {{ $location.ExternalAuth.Method }};
|
||||
proxy_set_header X-Original-URI $request_uri;
|
||||
proxy_set_header X-Scheme $pass_access_scheme;
|
||||
{{ end }}
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass_request_headers on;
|
||||
|
@ -268,9 +273,13 @@ http {
|
|||
auth_request {{ $authPath }};
|
||||
{{ end }}
|
||||
|
||||
{{ if (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect) }}
|
||||
{{ if not (empty $location.ExternalAuth.SigninURL) }}
|
||||
error_page 401 = {{ $location.ExternalAuth.SigninURL }};
|
||||
{{ end }}
|
||||
|
||||
{{ if (or $location.Redirect.ForceSSLRedirect (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect)) }}
|
||||
# enforce ssl on server side
|
||||
if ($scheme = http) {
|
||||
if ($pass_access_scheme = http) {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
{{ end }}
|
||||
|
@ -314,6 +323,8 @@ http {
|
|||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $pass_port;
|
||||
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
|
||||
proxy_set_header X-Original-URI $request_uri;
|
||||
proxy_set_header X-Scheme $pass_access_scheme;
|
||||
|
||||
# mitigate HTTPoxy Vulnerability
|
||||
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
|
||||
|
@ -331,6 +342,7 @@ http {
|
|||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_buffer_size "{{ $location.Proxy.BufferSize }}";
|
||||
proxy_buffers 4 "{{ $location.Proxy.BufferSize }}";
|
||||
|
||||
proxy_http_version 1.1;
|
||||
|
||||
|
@ -364,7 +376,7 @@ http {
|
|||
# with an external software (like sysdig)
|
||||
location /nginx_status {
|
||||
allow 127.0.0.1;
|
||||
allow ::1;
|
||||
{{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }}
|
||||
deny all;
|
||||
|
||||
access_log off;
|
||||
|
@ -382,7 +394,7 @@ http {
|
|||
# Use the port 18080 (random value just to avoid known ports) as default port for nginx.
|
||||
# Changing this value requires a change in:
|
||||
# https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/nginx/command.go#L104
|
||||
listen [::]:18080 ipv6only=off default_server reuseport backlog={{ .BacklogSize }};
|
||||
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}18080 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} default_server reuseport backlog={{ .BacklogSize }};
|
||||
|
||||
location {{ $healthzURI }} {
|
||||
access_log off;
|
||||
|
@ -404,7 +416,7 @@ http {
|
|||
# TODO: enable extraction for vts module.
|
||||
location /internal_nginx_status {
|
||||
allow 127.0.0.1;
|
||||
allow ::1;
|
||||
{{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }}
|
||||
deny all;
|
||||
|
||||
access_log off;
|
||||
|
@ -464,7 +476,7 @@ stream {
|
|||
{{ buildSSLPassthroughUpstreams $backends .PassthroughBackends }}
|
||||
|
||||
server {
|
||||
listen [::]:443 ipv6only=off{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
|
||||
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{ end }}{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
|
||||
proxy_pass $stream_upstream;
|
||||
ssl_preread on;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ set -e
|
|||
|
||||
export NGINX_VERSION=1.11.10
|
||||
export NDK_VERSION=0.3.0
|
||||
export VTS_VERSION=0.1.12
|
||||
export VTS_VERSION=0.1.11
|
||||
export SETMISC_VERSION=0.31
|
||||
export LUA_VERSION=0.10.7
|
||||
export STICKY_SESSIONS_VERSION=08a395c66e42
|
||||
|
|
Loading…
Reference in a new issue