Merge pull request #423 from aledbf/ngx-vts-prometheus
Scrap json metrics from nginx vts module when enabled
This commit is contained in:
commit
0cb8f59f70
12 changed files with 974 additions and 321 deletions
|
@ -21,7 +21,7 @@ build: clean
|
|||
-ldflags "-s -w -X ${PKG}/pkg/version.RELEASE=${RELEASE} -X ${PKG}/pkg/version.COMMIT=${COMMIT} -X ${PKG}/pkg/version.REPO=${REPO_INFO}" \
|
||||
-o rootfs/nginx-ingress-controller ${PKG}/pkg/cmd/controller
|
||||
|
||||
container: build
|
||||
container:
|
||||
$(DOCKER) build --pull -t $(PREFIX):$(RELEASE) rootfs
|
||||
|
||||
push: container
|
||||
|
|
|
@ -17,217 +17,79 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
common "github.com/ncabatoff/process-exporter"
|
||||
"github.com/ncabatoff/process-exporter/proc"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"k8s.io/ingress/controllers/nginx/pkg/metric/collector"
|
||||
)
|
||||
|
||||
type exeMatcher struct {
|
||||
name string
|
||||
args []string
|
||||
}
|
||||
const (
|
||||
ngxStatusPath = "/internal_nginx_status"
|
||||
ngxVtsPath = "/nginx_status/format/json"
|
||||
)
|
||||
|
||||
func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
||||
if len(nacl.Cmdline) == 0 {
|
||||
return false, ""
|
||||
func (n *NGINXController) setupMonitor(sm statusModule) {
|
||||
csm := n.statusModule
|
||||
if csm != sm {
|
||||
glog.Infof("changing prometheus collector from %v to %v", csm, sm)
|
||||
n.stats.stop(csm)
|
||||
n.stats.start(sm)
|
||||
n.statusModule = sm
|
||||
}
|
||||
cmd := filepath.Base(nacl.Cmdline[0])
|
||||
return em.name == cmd, ""
|
||||
}
|
||||
|
||||
func (n *NGINXController) setupMonitor(args []string) {
|
||||
pc, err := newProcessCollector(true, exeMatcher{"nginx", args})
|
||||
type statsCollector struct {
|
||||
process prometheus.Collector
|
||||
basic collector.Stopable
|
||||
vts collector.Stopable
|
||||
|
||||
namespace string
|
||||
watchClass string
|
||||
}
|
||||
|
||||
func (s *statsCollector) stop(sm statusModule) {
|
||||
switch sm {
|
||||
case defaultStatusModule:
|
||||
s.basic.Stop()
|
||||
prometheus.Unregister(s.basic)
|
||||
break
|
||||
case vtsStatusModule:
|
||||
s.vts.Stop()
|
||||
prometheus.Unregister(s.vts)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func (s *statsCollector) start(sm statusModule) {
|
||||
switch sm {
|
||||
case defaultStatusModule:
|
||||
s.basic = collector.NewNginxStatus(s.namespace, s.watchClass, ngxHealthPort, ngxStatusPath)
|
||||
prometheus.Register(s.basic)
|
||||
break
|
||||
case vtsStatusModule:
|
||||
s.vts = collector.NewNGINXVTSCollector(s.namespace, s.watchClass, ngxHealthPort, ngxVtsPath)
|
||||
prometheus.Register(s.vts)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func newStatsCollector(ns, class, binary string) *statsCollector {
|
||||
glog.Infof("starting new nginx stats collector for Ingress controller running in namespace %v (class %v)", ns, class)
|
||||
pc, err := collector.NewNamedProcess(true, collector.BinaryNameMatcher{
|
||||
Name: "nginx",
|
||||
Binary: binary,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("unexpected error registering nginx collector: %v", err)
|
||||
}
|
||||
err = prometheus.Register(pc)
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error registering nginx collector: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
numprocsDesc = prometheus.NewDesc(
|
||||
"nginx_num_procs",
|
||||
"number of processes",
|
||||
nil, nil)
|
||||
|
||||
cpuSecsDesc = prometheus.NewDesc(
|
||||
"nginx_cpu_seconds_total",
|
||||
"Cpu usage in seconds",
|
||||
nil, nil)
|
||||
|
||||
readBytesDesc = prometheus.NewDesc(
|
||||
"nginx_read_bytes_total",
|
||||
"number of bytes read",
|
||||
nil, nil)
|
||||
|
||||
writeBytesDesc = prometheus.NewDesc(
|
||||
"nginx_write_bytes_total",
|
||||
"number of bytes written",
|
||||
nil, nil)
|
||||
|
||||
memResidentbytesDesc = prometheus.NewDesc(
|
||||
"nginx_resident_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil)
|
||||
|
||||
memVirtualbytesDesc = prometheus.NewDesc(
|
||||
"nginx_virtual_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil)
|
||||
|
||||
startTimeDesc = prometheus.NewDesc(
|
||||
"nginx_oldest_start_time_seconds",
|
||||
"start time in seconds since 1970/01/01",
|
||||
nil, nil)
|
||||
|
||||
activeDesc = prometheus.NewDesc(
|
||||
"nginx_active_connections",
|
||||
"total number of active connections",
|
||||
nil, nil)
|
||||
|
||||
acceptedDesc = prometheus.NewDesc(
|
||||
"nginx_accepted_connections",
|
||||
"total number of accepted client connections",
|
||||
nil, nil)
|
||||
|
||||
handledDesc = prometheus.NewDesc(
|
||||
"nginx_handled_connections",
|
||||
"total number of handled connections",
|
||||
nil, nil)
|
||||
|
||||
requestsDesc = prometheus.NewDesc(
|
||||
"nginx_total_requests",
|
||||
"total number of client requests",
|
||||
nil, nil)
|
||||
|
||||
readingDesc = prometheus.NewDesc(
|
||||
"nginx_current_reading_connections",
|
||||
"current number of connections where nginx is reading the request header",
|
||||
nil, nil)
|
||||
|
||||
writingDesc = prometheus.NewDesc(
|
||||
"nginx_current_writing_connections",
|
||||
"current number of connections where nginx is writing the response back to the client",
|
||||
nil, nil)
|
||||
|
||||
waitingDesc = prometheus.NewDesc(
|
||||
"nginx_current_waiting_connections",
|
||||
"current number of idle client connections waiting for a request",
|
||||
nil, nil)
|
||||
)
|
||||
|
||||
type (
|
||||
scrapeRequest struct {
|
||||
results chan<- prometheus.Metric
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
namedProcessCollector struct {
|
||||
scrapeChan chan scrapeRequest
|
||||
*proc.Grouper
|
||||
fs *proc.FS
|
||||
}
|
||||
)
|
||||
|
||||
func newProcessCollector(
|
||||
children bool,
|
||||
n common.MatchNamer) (*namedProcessCollector, error) {
|
||||
|
||||
fs, err := proc.NewFS("/proc")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &namedProcessCollector{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
Grouper: proc.NewGrouper(children, n),
|
||||
fs: fs,
|
||||
}
|
||||
_, err = p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- cpuSecsDesc
|
||||
ch <- numprocsDesc
|
||||
ch <- readBytesDesc
|
||||
ch <- writeBytesDesc
|
||||
ch <- memResidentbytesDesc
|
||||
ch <- memVirtualbytesDesc
|
||||
ch <- startTimeDesc
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p *namedProcessCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p *namedProcessCollector) start() {
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrape(ch)
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) {
|
||||
s, err := getNginxStatus()
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(activeDesc,
|
||||
prometheus.GaugeValue, float64(s.Active))
|
||||
ch <- prometheus.MustNewConstMetric(acceptedDesc,
|
||||
prometheus.GaugeValue, float64(s.Accepted))
|
||||
ch <- prometheus.MustNewConstMetric(handledDesc,
|
||||
prometheus.GaugeValue, float64(s.Handled))
|
||||
ch <- prometheus.MustNewConstMetric(requestsDesc,
|
||||
prometheus.GaugeValue, float64(s.Requests))
|
||||
ch <- prometheus.MustNewConstMetric(readingDesc,
|
||||
prometheus.GaugeValue, float64(s.Reading))
|
||||
ch <- prometheus.MustNewConstMetric(writingDesc,
|
||||
prometheus.GaugeValue, float64(s.Writing))
|
||||
ch <- prometheus.MustNewConstMetric(waitingDesc,
|
||||
prometheus.GaugeValue, float64(s.Waiting))
|
||||
|
||||
_, err = p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for gname, gcounts := range p.Groups() {
|
||||
glog.Infof("%v", gname)
|
||||
glog.Infof("%v", gcounts)
|
||||
ch <- prometheus.MustNewConstMetric(numprocsDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Procs))
|
||||
ch <- prometheus.MustNewConstMetric(memResidentbytesDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Memresident))
|
||||
ch <- prometheus.MustNewConstMetric(memVirtualbytesDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.Memvirtual))
|
||||
ch <- prometheus.MustNewConstMetric(startTimeDesc,
|
||||
prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()))
|
||||
ch <- prometheus.MustNewConstMetric(cpuSecsDesc,
|
||||
prometheus.CounterValue, gcounts.Cpu)
|
||||
ch <- prometheus.MustNewConstMetric(readBytesDesc,
|
||||
prometheus.CounterValue, float64(gcounts.ReadBytes))
|
||||
ch <- prometheus.MustNewConstMetric(writeBytesDesc,
|
||||
prometheus.CounterValue, float64(gcounts.WriteBytes))
|
||||
glog.Fatalf("unexpected error registering nginx collector: %v", err)
|
||||
}
|
||||
|
||||
return &statsCollector{
|
||||
namespace: ns,
|
||||
watchClass: class,
|
||||
process: pc,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -34,6 +33,8 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
"strings"
|
||||
|
||||
"k8s.io/ingress/controllers/nginx/pkg/config"
|
||||
ngx_template "k8s.io/ingress/controllers/nginx/pkg/template"
|
||||
"k8s.io/ingress/controllers/nginx/pkg/version"
|
||||
|
@ -42,10 +43,14 @@ import (
|
|||
"k8s.io/ingress/core/pkg/net/ssl"
|
||||
)
|
||||
|
||||
type statusModule string
|
||||
|
||||
const (
|
||||
ngxHealthPort = 18080
|
||||
ngxHealthPath = "/healthz"
|
||||
ngxStatusPath = "/internal_nginx_status"
|
||||
|
||||
defaultStatusModule statusModule = "default"
|
||||
vtsStatusModule statusModule = "vts"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -63,7 +68,7 @@ func newNGINXController() ingress.Controller {
|
|||
if ngx == "" {
|
||||
ngx = binary
|
||||
}
|
||||
n := NGINXController{
|
||||
n := &NGINXController{
|
||||
binary: ngx,
|
||||
configmap: &api.ConfigMap{},
|
||||
}
|
||||
|
@ -95,7 +100,7 @@ Error loading new template : %v
|
|||
|
||||
go n.Start()
|
||||
|
||||
return ingress.Controller(&n)
|
||||
return ingress.Controller(n)
|
||||
}
|
||||
|
||||
// NGINXController ...
|
||||
|
@ -107,10 +112,18 @@ type NGINXController struct {
|
|||
storeLister ingress.StoreLister
|
||||
|
||||
binary string
|
||||
|
||||
cmdArgs []string
|
||||
|
||||
watchClass string
|
||||
namespace string
|
||||
|
||||
stats *statsCollector
|
||||
statusModule statusModule
|
||||
}
|
||||
|
||||
// Start start a new NGINX master process running in foreground.
|
||||
func (n NGINXController) Start() {
|
||||
func (n *NGINXController) Start() {
|
||||
glog.Info("starting NGINX process...")
|
||||
|
||||
done := make(chan error, 1)
|
||||
|
@ -157,7 +170,7 @@ func (n *NGINXController) start(cmd *exec.Cmd, done chan error) {
|
|||
return
|
||||
}
|
||||
|
||||
n.setupMonitor(cmd.Args)
|
||||
n.cmdArgs = cmd.Args
|
||||
|
||||
go func() {
|
||||
done <- cmd.Wait()
|
||||
|
@ -177,6 +190,7 @@ func (n NGINXController) Reload(data []byte) ([]byte, bool, error) {
|
|||
}
|
||||
|
||||
o, e := exec.Command(n.binary, "-s", "reload").CombinedOutput()
|
||||
|
||||
return o, true, e
|
||||
}
|
||||
|
||||
|
@ -204,6 +218,7 @@ func (n NGINXController) isReloadRequired(data []byte) bool {
|
|||
}
|
||||
|
||||
if !bytes.Equal(src, data) {
|
||||
|
||||
tmpfile, err := ioutil.TempFile("", "nginx-cfg-diff")
|
||||
if err != nil {
|
||||
glog.Errorf("error creating temporal file: %s", err)
|
||||
|
@ -242,12 +257,20 @@ func (n NGINXController) Info() *ingress.BackendInfo {
|
|||
}
|
||||
|
||||
// OverrideFlags customize NGINX controller flags
|
||||
func (n NGINXController) OverrideFlags(flags *pflag.FlagSet) {
|
||||
ig, err := flags.GetString("ingress-class")
|
||||
if err == nil && ig != "" && ig != defIngressClass {
|
||||
glog.Warningf("only Ingress with class %v will be processed by this ingress controller", ig)
|
||||
func (n *NGINXController) OverrideFlags(flags *pflag.FlagSet) {
|
||||
ic, _ := flags.GetString("ingress-class")
|
||||
wc, _ := flags.GetString("watch-namespace")
|
||||
|
||||
if ic == "" {
|
||||
ic = defIngressClass
|
||||
}
|
||||
flags.Set("ingress-class", defIngressClass)
|
||||
|
||||
if ic != defIngressClass {
|
||||
glog.Warningf("only Ingress with class %v will be processed by this ingress controller", ic)
|
||||
}
|
||||
|
||||
flags.Set("ingress-class", ic)
|
||||
n.stats = newStatsCollector(ic, wc, n.binary)
|
||||
}
|
||||
|
||||
// DefaultIngressClass just return the default ingress class
|
||||
|
@ -313,6 +336,13 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
|
|||
|
||||
cfg := ngx_template.ReadConfig(n.configmap.Data)
|
||||
|
||||
// we need to check if the status module configuration changed
|
||||
if cfg.EnableVtsStatus {
|
||||
n.setupMonitor(vtsStatusModule)
|
||||
} else {
|
||||
n.setupMonitor(defaultStatusModule)
|
||||
}
|
||||
|
||||
// NGINX cannot resize the has tables used to store server names.
|
||||
// For this reason we check if the defined size defined is correct
|
||||
// for the FQDN defined in the ingress rules adjusting the value
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
ac = regexp.MustCompile(`Active connections: (\d+)`)
|
||||
sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`)
|
||||
reading = regexp.MustCompile(`Reading: (\d+)`)
|
||||
writing = regexp.MustCompile(`Writing: (\d+)`)
|
||||
waiting = regexp.MustCompile(`Waiting: (\d+)`)
|
||||
)
|
||||
|
||||
type nginxStatus struct {
|
||||
// Active total number of active connections
|
||||
Active int
|
||||
// Accepted total number of accepted client connections
|
||||
Accepted int
|
||||
// Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).
|
||||
Handled int
|
||||
// Requests total number of client requests.
|
||||
Requests int
|
||||
// Reading current number of connections where nginx is reading the request header.
|
||||
Reading int
|
||||
// Writing current number of connections where nginx is writing the response back to the client.
|
||||
Writing int
|
||||
// Waiting current number of idle client connections waiting for a request.
|
||||
Waiting int
|
||||
}
|
||||
|
||||
func getNginxStatus() (*nginxStatus, error) {
|
||||
resp, err := http.DefaultClient.Get(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx status page (%v)", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx status page (status %v)", resp.StatusCode)
|
||||
}
|
||||
|
||||
return parse(string(data)), nil
|
||||
}
|
||||
|
||||
func parse(data string) *nginxStatus {
|
||||
acr := ac.FindStringSubmatch(data)
|
||||
sahrr := sahr.FindStringSubmatch(data)
|
||||
readingr := reading.FindStringSubmatch(data)
|
||||
writingr := writing.FindStringSubmatch(data)
|
||||
waitingr := waiting.FindStringSubmatch(data)
|
||||
|
||||
return &nginxStatus{
|
||||
toInt(acr, 1),
|
||||
toInt(sahrr, 1),
|
||||
toInt(sahrr, 2),
|
||||
toInt(sahrr, 3),
|
||||
toInt(readingr, 1),
|
||||
toInt(writingr, 1),
|
||||
toInt(waitingr, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func toInt(data []string, pos int) int {
|
||||
if len(data) == 0 {
|
||||
return 0
|
||||
}
|
||||
if pos > len(data) {
|
||||
return 0
|
||||
}
|
||||
if v, err := strconv.Atoi(data[pos]); err == nil {
|
||||
return v
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -259,8 +259,6 @@ type Configuration struct {
|
|||
func NewDefault() Configuration {
|
||||
cfg := Configuration{
|
||||
ClientHeaderBufferSize: "1k",
|
||||
DisableAccessLog: false,
|
||||
DisableIpv6: false,
|
||||
EnableDynamicTLSRecords: true,
|
||||
ErrorLogLevel: errorLevel,
|
||||
HTTP2MaxFieldSize: "4k",
|
||||
|
@ -286,7 +284,6 @@ func NewDefault() Configuration {
|
|||
SSLSessionCacheSize: sslSessionCacheSize,
|
||||
SSLSessionTickets: true,
|
||||
SSLSessionTimeout: sslSessionTimeout,
|
||||
UseProxyProtocol: false,
|
||||
UseGzip: true,
|
||||
WorkerProcesses: runtime.NumCPU(),
|
||||
VtsStatusZoneSize: "10m",
|
||||
|
@ -300,11 +297,9 @@ func NewDefault() Configuration {
|
|||
ProxyCookieDomain: "off",
|
||||
ProxyCookiePath: "off",
|
||||
SSLRedirect: true,
|
||||
ForceSSLRedirect: false,
|
||||
CustomHTTPErrors: []int{},
|
||||
WhitelistSourceRange: []string{},
|
||||
SkipAccessLogURLs: []string{},
|
||||
UsePortInRedirects: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
160
controllers/nginx/pkg/metric/collector/nginx.go
Normal file
160
controllers/nginx/pkg/metric/collector/nginx.go
Normal file
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type (
|
||||
nginxStatusCollector struct {
|
||||
scrapeChan chan scrapeRequest
|
||||
ngxHealthPort int
|
||||
ngxVtsPath string
|
||||
data *nginxStatusData
|
||||
}
|
||||
|
||||
nginxStatusData struct {
|
||||
active *prometheus.Desc
|
||||
accepted *prometheus.Desc
|
||||
handled *prometheus.Desc
|
||||
requests *prometheus.Desc
|
||||
reading *prometheus.Desc
|
||||
writing *prometheus.Desc
|
||||
waiting *prometheus.Desc
|
||||
}
|
||||
)
|
||||
|
||||
func buildNS(namespace, class string) string {
|
||||
if namespace == "" {
|
||||
namespace = "all"
|
||||
}
|
||||
if class == "" {
|
||||
class = "all"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v_%v", namespace, class)
|
||||
}
|
||||
|
||||
// NewNginxStatus returns a new prometheus collector the default nginx status module
|
||||
func NewNginxStatus(namespace, class string, ngxHealthPort int, ngxVtsPath string) Stopable {
|
||||
p := nginxStatusCollector{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
ngxHealthPort: ngxHealthPort,
|
||||
ngxVtsPath: ngxVtsPath,
|
||||
}
|
||||
|
||||
ns := buildNS(namespace, class)
|
||||
|
||||
p.data = &nginxStatusData{
|
||||
active: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "active_connections"),
|
||||
"total number of active connections",
|
||||
nil, nil),
|
||||
|
||||
accepted: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "accepted_connections"),
|
||||
"total number of accepted client connections",
|
||||
nil, nil),
|
||||
|
||||
handled: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "handled_connections"),
|
||||
"total number of handled connections",
|
||||
nil, nil),
|
||||
|
||||
requests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "total_requests"),
|
||||
"total number of client requests",
|
||||
nil, nil),
|
||||
|
||||
reading: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "current_reading_connections"),
|
||||
"current number of connections where nginx is reading the request header",
|
||||
nil, nil),
|
||||
|
||||
writing: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "current_writing_connections"),
|
||||
"current number of connections where nginx is writing the response back to the client",
|
||||
nil, nil),
|
||||
|
||||
waiting: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "current_waiting_connections"),
|
||||
"current number of idle client connections waiting for a request",
|
||||
nil, nil),
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p nginxStatusCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- p.data.active
|
||||
ch <- p.data.accepted
|
||||
ch <- p.data.handled
|
||||
ch <- p.data.requests
|
||||
ch <- p.data.reading
|
||||
ch <- p.data.writing
|
||||
ch <- p.data.waiting
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p nginxStatusCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p nginxStatusCollector) start() {
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrape(ch)
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (p nginxStatusCollector) Stop() {
|
||||
close(p.scrapeChan)
|
||||
}
|
||||
|
||||
// nginxStatusCollector scrap the nginx status
|
||||
func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) {
|
||||
s, err := getNginxStatus(p.ngxHealthPort, p.ngxVtsPath)
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(p.data.active,
|
||||
prometheus.GaugeValue, float64(s.Active))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.accepted,
|
||||
prometheus.GaugeValue, float64(s.Accepted))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.handled,
|
||||
prometheus.GaugeValue, float64(s.Handled))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.requests,
|
||||
prometheus.GaugeValue, float64(s.Requests))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.reading,
|
||||
prometheus.GaugeValue, float64(s.Reading))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.writing,
|
||||
prometheus.GaugeValue, float64(s.Writing))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.waiting,
|
||||
prometheus.GaugeValue, float64(s.Waiting))
|
||||
}
|
173
controllers/nginx/pkg/metric/collector/process.go
Normal file
173
controllers/nginx/pkg/metric/collector/process.go
Normal file
|
@ -0,0 +1,173 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
common "github.com/ncabatoff/process-exporter"
|
||||
"github.com/ncabatoff/process-exporter/proc"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// BinaryNameMatcher ...
|
||||
type BinaryNameMatcher struct {
|
||||
Name string
|
||||
Binary string
|
||||
}
|
||||
|
||||
// MatchAndName returns false if the match failed, otherwise
|
||||
// true and the resulting name.
|
||||
func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
||||
if len(nacl.Cmdline) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
cmd := filepath.Base(em.Binary)
|
||||
return em.Name == cmd, ""
|
||||
}
|
||||
|
||||
type namedProcessData struct {
|
||||
numProcs *prometheus.Desc
|
||||
cpuSecs *prometheus.Desc
|
||||
readBytes *prometheus.Desc
|
||||
writeBytes *prometheus.Desc
|
||||
memResidentbytes *prometheus.Desc
|
||||
memVirtualbytes *prometheus.Desc
|
||||
startTime *prometheus.Desc
|
||||
}
|
||||
|
||||
type namedProcess struct {
|
||||
*proc.Grouper
|
||||
|
||||
scrapeChan chan scrapeRequest
|
||||
fs *proc.FS
|
||||
data namedProcessData
|
||||
}
|
||||
|
||||
// NewNamedProcess returns a new prometheus collector for the nginx process
|
||||
func NewNamedProcess(children bool, mn common.MatchNamer) (prometheus.Collector, error) {
|
||||
fs, err := proc.NewFS("/proc")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := namedProcess{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
Grouper: proc.NewGrouper(children, mn),
|
||||
fs: fs,
|
||||
}
|
||||
_, err = p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.data = namedProcessData{
|
||||
numProcs: prometheus.NewDesc(
|
||||
"num_procs",
|
||||
"number of processes",
|
||||
nil, nil),
|
||||
|
||||
cpuSecs: prometheus.NewDesc(
|
||||
"cpu_seconds_total",
|
||||
"Cpu usage in seconds",
|
||||
nil, nil),
|
||||
|
||||
readBytes: prometheus.NewDesc(
|
||||
"read_bytes_total",
|
||||
"number of bytes read",
|
||||
nil, nil),
|
||||
|
||||
writeBytes: prometheus.NewDesc(
|
||||
"write_bytes_total",
|
||||
"number of bytes written",
|
||||
nil, nil),
|
||||
|
||||
memResidentbytes: prometheus.NewDesc(
|
||||
"resident_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil),
|
||||
|
||||
memVirtualbytes: prometheus.NewDesc(
|
||||
"virtual_memory_bytes",
|
||||
"number of bytes of memory in use",
|
||||
nil, nil),
|
||||
|
||||
startTime: prometheus.NewDesc(
|
||||
"oldest_start_time_seconds",
|
||||
"start time in seconds since 1970/01/01",
|
||||
nil, nil),
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p namedProcess) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- p.data.cpuSecs
|
||||
ch <- p.data.numProcs
|
||||
ch <- p.data.readBytes
|
||||
ch <- p.data.writeBytes
|
||||
ch <- p.data.memResidentbytes
|
||||
ch <- p.data.memVirtualbytes
|
||||
ch <- p.data.startTime
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p namedProcess) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p namedProcess) start() {
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrape(ch)
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (p namedProcess) Stop() {
|
||||
close(p.scrapeChan)
|
||||
}
|
||||
|
||||
func (p namedProcess) scrape(ch chan<- prometheus.Metric) {
|
||||
_, err := p.Update(p.fs.AllProcs())
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, gcounts := range p.Groups() {
|
||||
ch <- prometheus.MustNewConstMetric(p.data.numProcs,
|
||||
prometheus.GaugeValue, float64(gcounts.Procs))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.memResidentbytes,
|
||||
prometheus.GaugeValue, float64(gcounts.Memresident))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.memVirtualbytes,
|
||||
prometheus.GaugeValue, float64(gcounts.Memvirtual))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.startTime,
|
||||
prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.cpuSecs,
|
||||
prometheus.CounterValue, gcounts.Cpu)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.readBytes,
|
||||
prometheus.CounterValue, float64(gcounts.ReadBytes))
|
||||
ch <- prometheus.MustNewConstMetric(p.data.writeBytes,
|
||||
prometheus.CounterValue, float64(gcounts.WriteBytes))
|
||||
}
|
||||
}
|
30
controllers/nginx/pkg/metric/collector/scrape.go
Normal file
30
controllers/nginx/pkg/metric/collector/scrape.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
// Stopable defines a prometheus collector that can be stopped
|
||||
type Stopable interface {
|
||||
prometheus.Collector
|
||||
Stop()
|
||||
}
|
||||
|
||||
type scrapeRequest struct {
|
||||
results chan<- prometheus.Metric
|
||||
done chan struct{}
|
||||
}
|
225
controllers/nginx/pkg/metric/collector/status.go
Normal file
225
controllers/nginx/pkg/metric/collector/status.go
Normal file
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
ac = regexp.MustCompile(`Active connections: (\d+)`)
|
||||
sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`)
|
||||
reading = regexp.MustCompile(`Reading: (\d+)`)
|
||||
writing = regexp.MustCompile(`Writing: (\d+)`)
|
||||
waiting = regexp.MustCompile(`Waiting: (\d+)`)
|
||||
)
|
||||
|
||||
type basicStatus struct {
|
||||
// Active total number of active connections
|
||||
Active int
|
||||
// Accepted total number of accepted client connections
|
||||
Accepted int
|
||||
// Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).
|
||||
Handled int
|
||||
// Requests total number of client requests.
|
||||
Requests int
|
||||
// Reading current number of connections where nginx is reading the request header.
|
||||
Reading int
|
||||
// Writing current number of connections where nginx is writing the response back to the client.
|
||||
Writing int
|
||||
// Waiting current number of idle client connections waiting for a request.
|
||||
Waiting int
|
||||
}
|
||||
|
||||
// https://github.com/vozlt/nginx-module-vts
|
||||
type vts struct {
|
||||
NginxVersion string `json:"nginxVersion"`
|
||||
LoadMsec int `json:"loadMsec"`
|
||||
NowMsec int `json:"nowMsec"`
|
||||
// Total connections and requests(same as stub_status_module in NGINX)
|
||||
Connections connections `json:"connections"`
|
||||
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone
|
||||
ServerZones map[string]serverZone `json:"serverZones"`
|
||||
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through
|
||||
// the vhost_traffic_status_filter_by_set_key directive
|
||||
FilterZones map[string]map[string]filterZone `json:"filterZones"`
|
||||
// Traffic(in/out) and request and response counts per server in each upstream group
|
||||
UpstreamZones map[string][]upstreamZone `json:"upstreamZones"`
|
||||
}
|
||||
|
||||
type serverZone struct {
|
||||
RequestCounter float64 `json:"requestCounter"`
|
||||
InBytes float64 `json:"inBytes"`
|
||||
OutBytes float64 `json:"outBytes"`
|
||||
Responses response `json:"responses"`
|
||||
Cache cache `json:"cache"`
|
||||
}
|
||||
|
||||
type filterZone struct {
|
||||
RequestCounter float64 `json:"requestCounter"`
|
||||
InBytes float64 `json:"inBytes"`
|
||||
OutBytes float64 `json:"outBytes"`
|
||||
Cache cache `json:"cache"`
|
||||
Responses response `json:"responses"`
|
||||
}
|
||||
|
||||
type upstreamZone struct {
|
||||
Responses response `json:"responses"`
|
||||
Server string `json:"server"`
|
||||
RequestCounter float64 `json:"requestCounter"`
|
||||
InBytes float64 `json:"inBytes"`
|
||||
OutBytes float64 `json:"outBytes"`
|
||||
ResponseMsec float64 `json:"responseMsec"`
|
||||
Weight float64 `json:"weight"`
|
||||
MaxFails float64 `json:"maxFails"`
|
||||
FailTimeout float64 `json:"failTimeout"`
|
||||
Backup BoolToFloat64 `json:"backup"`
|
||||
Down BoolToFloat64 `json:"down"`
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
Miss float64 `json:"miss"`
|
||||
Bypass float64 `json:"bypass"`
|
||||
Expired float64 `json:"expired"`
|
||||
Stale float64 `json:"stale"`
|
||||
Updating float64 `json:"updating"`
|
||||
Revalidated float64 `json:"revalidated"`
|
||||
Hit float64 `json:"hit"`
|
||||
Scarce float64 `json:"scarce"`
|
||||
}
|
||||
|
||||
type response struct {
|
||||
OneXx float64 `json:"1xx"`
|
||||
TwoXx float64 `json:"2xx"`
|
||||
TheeXx float64 `json:"3xx"`
|
||||
FourXx float64 `json:"4xx"`
|
||||
FiveXx float64 `json:"5xx"`
|
||||
}
|
||||
|
||||
type connections struct {
|
||||
Active float64 `json:"active"`
|
||||
Reading float64 `json:"reading"`
|
||||
Writing float64 `json:"writing"`
|
||||
Waiting float64 `json:"waiting"`
|
||||
Accepted float64 `json:"accepted"`
|
||||
Handled float64 `json:"handled"`
|
||||
Requests float64 `json:"requests"`
|
||||
}
|
||||
|
||||
// BoolToFloat64 ...
|
||||
type BoolToFloat64 float64
|
||||
|
||||
// UnmarshalJSON ...
|
||||
func (bit BoolToFloat64) UnmarshalJSON(data []byte) error {
|
||||
asString := string(data)
|
||||
if asString == "1" || asString == "true" {
|
||||
bit = 1
|
||||
} else if asString == "0" || asString == "false" {
|
||||
bit = 0
|
||||
} else {
|
||||
return fmt.Errorf(fmt.Sprintf("Boolean unmarshal error: invalid input %s", asString))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNginxStatus(ngxHealthPort int, ngxStatusPath string) (*basicStatus, error) {
|
||||
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)
|
||||
glog.V(3).Infof("start scrapping url: %v", url)
|
||||
|
||||
data, err := httpBody(url)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err)
|
||||
}
|
||||
|
||||
return parse(string(data)), nil
|
||||
}
|
||||
|
||||
func httpBody(url string) ([]byte, error) {
|
||||
resp, err := http.DefaultClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx : %v", err)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func getNginxVtsMetrics(ngxHealthPort int, ngxVtsPath string) (*vts, error) {
|
||||
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath)
|
||||
glog.V(3).Infof("start scrapping url: %v", url)
|
||||
|
||||
data, err := httpBody(url)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err)
|
||||
}
|
||||
|
||||
var vts *vts
|
||||
err = json.Unmarshal(data, &vts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err)
|
||||
}
|
||||
glog.V(3).Infof("scrap returned : %v", vts)
|
||||
return vts, nil
|
||||
}
|
||||
|
||||
func parse(data string) *basicStatus {
|
||||
acr := ac.FindStringSubmatch(data)
|
||||
sahrr := sahr.FindStringSubmatch(data)
|
||||
readingr := reading.FindStringSubmatch(data)
|
||||
writingr := writing.FindStringSubmatch(data)
|
||||
waitingr := waiting.FindStringSubmatch(data)
|
||||
|
||||
return &basicStatus{
|
||||
toInt(acr, 1),
|
||||
toInt(sahrr, 1),
|
||||
toInt(sahrr, 2),
|
||||
toInt(sahrr, 3),
|
||||
toInt(readingr, 1),
|
||||
toInt(writingr, 1),
|
||||
toInt(waitingr, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func toInt(data []string, pos int) int {
|
||||
if len(data) == 0 {
|
||||
return 0
|
||||
}
|
||||
if pos > len(data) {
|
||||
return 0
|
||||
}
|
||||
if v, err := strconv.Atoi(data[pos]); err == nil {
|
||||
return v
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -14,35 +14,37 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
)
|
||||
|
||||
func TestParseStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
out *nginxStatus
|
||||
out *basicStatus
|
||||
}{
|
||||
{`Active connections: 43
|
||||
server accepts handled requests
|
||||
7368 7368 10993
|
||||
Reading: 0 Writing: 5 Waiting: 38`,
|
||||
&nginxStatus{43, 7368, 7368, 10993, 0, 5, 38},
|
||||
&basicStatus{43, 7368, 7368, 10993, 0, 5, 38},
|
||||
},
|
||||
{`Active connections: 0
|
||||
server accepts handled requests
|
||||
1 7 0
|
||||
Reading: A Writing: B Waiting: 38`,
|
||||
&nginxStatus{0, 1, 7, 0, 0, 0, 38},
|
||||
&basicStatus{0, 1, 7, 0, 0, 0, 38},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
r := parse(test.in)
|
||||
if !reflect.DeepEqual(r, test.out) {
|
||||
if diff := pretty.Compare(r, test.out); diff != "" {
|
||||
t.Logf("%v", diff)
|
||||
t.Fatalf("expected %v but returned %v", test.out, r)
|
||||
}
|
||||
}
|
269
controllers/nginx/pkg/metric/collector/vts.go
Normal file
269
controllers/nginx/pkg/metric/collector/vts.go
Normal file
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const system = "nginx"
|
||||
|
||||
type (
|
||||
vtsCollector struct {
|
||||
scrapeChan chan scrapeRequest
|
||||
ngxHealthPort int
|
||||
ngxVtsPath string
|
||||
data *vtsData
|
||||
}
|
||||
|
||||
vtsData struct {
|
||||
bytes *prometheus.Desc
|
||||
cache *prometheus.Desc
|
||||
connections *prometheus.Desc
|
||||
response *prometheus.Desc
|
||||
request *prometheus.Desc
|
||||
filterZoneBytes *prometheus.Desc
|
||||
filterZoneResponse *prometheus.Desc
|
||||
filterZoneCache *prometheus.Desc
|
||||
upstreamBackup *prometheus.Desc
|
||||
upstreamBytes *prometheus.Desc
|
||||
upstreamDown *prometheus.Desc
|
||||
upstreamFailTimeout *prometheus.Desc
|
||||
upstreamMaxFails *prometheus.Desc
|
||||
upstreamResponses *prometheus.Desc
|
||||
upstreamRequest *prometheus.Desc
|
||||
upstreamResponseMsec *prometheus.Desc
|
||||
upstreamWeight *prometheus.Desc
|
||||
}
|
||||
)
|
||||
|
||||
// NewNGINXVTSCollector returns a new prometheus collector for the VTS module
|
||||
func NewNGINXVTSCollector(namespace, class string, ngxHealthPort int, ngxVtsPath string) Stopable {
|
||||
p := vtsCollector{
|
||||
scrapeChan: make(chan scrapeRequest),
|
||||
ngxHealthPort: ngxHealthPort,
|
||||
ngxVtsPath: ngxVtsPath,
|
||||
}
|
||||
|
||||
ns := buildNS(namespace, class)
|
||||
|
||||
p.data = &vtsData{
|
||||
bytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "bytes_total"),
|
||||
"Nginx bytes count",
|
||||
[]string{"server_zone", "direction"}, nil),
|
||||
|
||||
cache: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "cache_total"),
|
||||
"Nginx cache count",
|
||||
[]string{"server_zone", "type"}, nil),
|
||||
|
||||
connections: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "connections_total"),
|
||||
"Nginx connections count",
|
||||
[]string{"type"}, nil),
|
||||
|
||||
response: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "responses_total"),
|
||||
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"server_zone", "status_code"}, nil),
|
||||
|
||||
request: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "requests_total"),
|
||||
"The total number of requested client connections.",
|
||||
[]string{"server_zone"}, nil),
|
||||
|
||||
filterZoneBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "filterzone_bytes_total"),
|
||||
"Nginx bytes count",
|
||||
[]string{"server_zone", "country", "direction"}, nil),
|
||||
|
||||
filterZoneResponse: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "filterzone_responses_total"),
|
||||
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"server_zone", "country", "status_code"}, nil),
|
||||
|
||||
filterZoneCache: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "filterzone_cache_total"),
|
||||
"Nginx cache count",
|
||||
[]string{"server_zone", "country", "type"}, nil),
|
||||
|
||||
upstreamBackup: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_backup"),
|
||||
"Current backup setting of the server.",
|
||||
[]string{"upstream", "server"}, nil),
|
||||
|
||||
upstreamBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_bytes_total"),
|
||||
"The total number of bytes sent to this server.",
|
||||
[]string{"upstream", "server", "direction"}, nil),
|
||||
|
||||
upstreamDown: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "vts_upstream_down_total"),
|
||||
"Current down setting of the server.",
|
||||
[]string{"upstream", "server"}, nil),
|
||||
|
||||
upstreamFailTimeout: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_fail_timeout"),
|
||||
"Current fail_timeout setting of the server.",
|
||||
[]string{"upstream", "server"}, nil),
|
||||
|
||||
upstreamMaxFails: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_maxfails"),
|
||||
"Current max_fails setting of the server.",
|
||||
[]string{"upstream", "server"}, nil),
|
||||
|
||||
upstreamResponses: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_responses_total"),
|
||||
"The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
|
||||
[]string{"upstream", "server", "status_code"}, nil),
|
||||
|
||||
upstreamRequest: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_requests_total"),
|
||||
"The total number of client connections forwarded to this server.",
|
||||
[]string{"upstream", "server"}, nil),
|
||||
|
||||
upstreamResponseMsec: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_response_msecs_avg"),
|
||||
"The average of only upstream response processing times in milliseconds.",
|
||||
[]string{"upstream", "server"}, nil),
|
||||
|
||||
upstreamWeight: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(system, ns, "upstream_weight"),
|
||||
"Current upstream weight setting of the server.",
|
||||
[]string{"upstream", "server"}, nil),
|
||||
}
|
||||
|
||||
go p.start()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (p vtsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- p.data.bytes
|
||||
ch <- p.data.cache
|
||||
ch <- p.data.connections
|
||||
ch <- p.data.request
|
||||
ch <- p.data.response
|
||||
ch <- p.data.upstreamBackup
|
||||
ch <- p.data.upstreamBytes
|
||||
ch <- p.data.upstreamDown
|
||||
ch <- p.data.upstreamFailTimeout
|
||||
ch <- p.data.upstreamMaxFails
|
||||
ch <- p.data.upstreamRequest
|
||||
ch <- p.data.upstreamResponseMsec
|
||||
ch <- p.data.upstreamResponses
|
||||
ch <- p.data.upstreamWeight
|
||||
ch <- p.data.filterZoneBytes
|
||||
ch <- p.data.filterZoneCache
|
||||
ch <- p.data.filterZoneResponse
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (p vtsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
req := scrapeRequest{results: ch, done: make(chan struct{})}
|
||||
p.scrapeChan <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
func (p vtsCollector) start() {
|
||||
for req := range p.scrapeChan {
|
||||
ch := req.results
|
||||
p.scrapeVts(ch)
|
||||
req.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (p vtsCollector) Stop() {
|
||||
close(p.scrapeChan)
|
||||
}
|
||||
|
||||
// scrapeVts scrape nginx vts metrics
|
||||
func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
||||
nginxMetrics, err := getNginxVtsMetrics(p.ngxHealthPort, p.ngxVtsPath)
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
reflectMetrics(&nginxMetrics.Connections, p.data.connections, ch)
|
||||
|
||||
for name, zones := range nginxMetrics.UpstreamZones {
|
||||
for pos, value := range zones {
|
||||
reflectMetrics(&zones[pos].Responses, p.data.upstreamResponses, ch, name, value.Server)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamRequest,
|
||||
prometheus.CounterValue, zones[pos].RequestCounter, name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamDown,
|
||||
prometheus.CounterValue, float64(zones[pos].Down), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamWeight,
|
||||
prometheus.CounterValue, zones[pos].Weight, name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamResponseMsec,
|
||||
prometheus.CounterValue, zones[pos].ResponseMsec, name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamBackup,
|
||||
prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamFailTimeout,
|
||||
prometheus.CounterValue, zones[pos].FailTimeout, name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamMaxFails,
|
||||
prometheus.CounterValue, zones[pos].MaxFails, name, value.Server)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes,
|
||||
prometheus.CounterValue, zones[pos].InBytes, name, value.Server, "in")
|
||||
ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes,
|
||||
prometheus.CounterValue, zones[pos].OutBytes, name, value.Server, "out")
|
||||
}
|
||||
}
|
||||
|
||||
for name, zone := range nginxMetrics.ServerZones {
|
||||
reflectMetrics(&zone.Responses, p.data.response, ch, name)
|
||||
reflectMetrics(&zone.Cache, p.data.cache, ch, name)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(p.data.request,
|
||||
prometheus.CounterValue, zone.RequestCounter, name)
|
||||
ch <- prometheus.MustNewConstMetric(p.data.bytes,
|
||||
prometheus.CounterValue, zone.InBytes, name, "in")
|
||||
ch <- prometheus.MustNewConstMetric(p.data.bytes,
|
||||
prometheus.CounterValue, zone.OutBytes, name, "out")
|
||||
}
|
||||
|
||||
for serverZone, countries := range nginxMetrics.FilterZones {
|
||||
for country, zone := range countries {
|
||||
reflectMetrics(&zone.Responses, p.data.filterZoneResponse, ch, serverZone, country)
|
||||
reflectMetrics(&zone.Cache, p.data.filterZoneCache, ch, serverZone, country)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes,
|
||||
prometheus.CounterValue, float64(zone.InBytes), serverZone, country, "in")
|
||||
ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes,
|
||||
prometheus.CounterValue, float64(zone.OutBytes), serverZone, country, "out")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
|
||||
val := reflect.ValueOf(value).Elem()
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
tag := val.Type().Field(i).Tag
|
||||
l := append(labels, tag.Get("json"))
|
||||
ch <- prometheus.MustNewConstMetric(desc,
|
||||
prometheus.CounterValue, float64(val.Field(i).Interface().(float64)),
|
||||
l...)
|
||||
}
|
||||
}
|
|
@ -240,6 +240,8 @@ http {
|
|||
{{ if not (empty $authPath) }}
|
||||
location = {{ $authPath }} {
|
||||
internal;
|
||||
set $proxy_upstream_name "internal";
|
||||
|
||||
{{ if not $location.ExternalAuth.SendBody }}
|
||||
proxy_pass_request_body off;
|
||||
proxy_set_header Content-Length "";
|
||||
|
@ -402,6 +404,8 @@ http {
|
|||
}
|
||||
|
||||
location /nginx_status {
|
||||
set $proxy_upstream_name "internal";
|
||||
|
||||
{{ if $cfg.EnableVtsStatus }}
|
||||
vhost_traffic_status_display;
|
||||
vhost_traffic_status_display_format html;
|
||||
|
@ -415,6 +419,8 @@ http {
|
|||
# using prometheus.
|
||||
# TODO: enable extraction for vts module.
|
||||
location /internal_nginx_status {
|
||||
set $proxy_upstream_name "internal";
|
||||
|
||||
allow 127.0.0.1;
|
||||
{{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }}
|
||||
deny all;
|
||||
|
|
Loading…
Reference in a new issue