From f79257db36da0f70164be4cd6ef6180b4a8d1286 Mon Sep 17 00:00:00 2001 From: Giancarlo Rubio Date: Sun, 26 Feb 2017 22:42:39 +0100 Subject: [PATCH] rollback nginx metrics --- .../nginx/pkg/cmd/controller/metrics.go | 343 +++++++++++------- .../nginx/pkg/cmd/controller/metrics_test.go | 1 + .../nginx/pkg/cmd/controller/status.go | 92 ++--- .../nginx/pkg/cmd/controller/status_test.go | 18 + controllers/nginx/pkg/config/config.go | 1 + 5 files changed, 292 insertions(+), 163 deletions(-) create mode 100644 controllers/nginx/pkg/cmd/controller/metrics_test.go diff --git a/controllers/nginx/pkg/cmd/controller/metrics.go b/controllers/nginx/pkg/cmd/controller/metrics.go index b114d1152..86fa3ce2e 100644 --- a/controllers/nginx/pkg/cmd/controller/metrics.go +++ b/controllers/nginx/pkg/cmd/controller/metrics.go @@ -25,6 +25,7 @@ import ( "github.com/ncabatoff/process-exporter/proc" "github.com/prometheus/client_golang/prometheus" "reflect" + "strings" ) type exeMatcher struct { @@ -41,9 +42,12 @@ func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) { } func (n *NGINXController) setupMonitor(args []string) { - pc, err := newProcessCollector(true, exeMatcher{"nginx", args}) + var enableVts = true + + // TODO fix true + pc, err := newProcessCollector(true, exeMatcher{"nginx", args}, enableVts) if err != nil { - glog.Warningf("unexpected error registering nginx collector: %v", err) + glog.Fatalf("unexpected error registering nginx collector: %v", err) } err = prometheus.Register(pc) if err != nil { @@ -79,22 +83,6 @@ var ( "number of bytes read", nil, nil) - //vts metrics - bytesDesc = prometheus.NewDesc( - "nginx_bytes_total", - "Nginx bytes count", - []string{"server_zones", "direction"}, nil) - - cacheDesc = prometheus.NewDesc( - "nginx_cache_total", - "Nginx cache count", - []string{"server_zones", "type"}, nil) - - connectionsDesc = prometheus.NewDesc( - "nginx_connections_total", - "Nginx connections count", - []string{"type"}, nil) - startTimeDesc = prometheus.NewDesc( "nginx_oldest_start_time_seconds", "start time in seconds since 1970/01/01", @@ -105,60 +93,126 @@ var ( "number of bytes written", nil, nil) - responseDesc = prometheus.NewDesc( - "nginx_responses_total", + //vts metrics + vtsBytesDesc = prometheus.NewDesc( + "nginx_vts_bytes_total", + "Nginx bytes count", + []string{"server_zone", "direction"}, nil) + + vtsCacheDesc = prometheus.NewDesc( + "nginx_vts_cache_total", + "Nginx cache count", + []string{"server_zone", "type"}, nil) + + vtsConnectionsDesc = prometheus.NewDesc( + "nginx_vts_connections_total", + "Nginx connections count", + []string{"type"}, nil) + + vtsResponseDesc = prometheus.NewDesc( + "nginx_vts_responses_total", "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", - []string{"server_zones", "status_code"}, nil) + []string{"server_zone", "status_code"}, nil) - requestDesc = prometheus.NewDesc( - "nginx_requests_total", + vtsRequestDesc = prometheus.NewDesc( + "nginx_vts_requests_total", "The total number of requested client connections.", - []string{"server_zones"}, nil) + []string{"server_zone"}, nil) - upstreamBackupDesc = prometheus.NewDesc( - "nginx_upstream_backup", + vtsFilterZoneBytesDesc = prometheus.NewDesc( + "nginx_vts_filterzone_bytes_total", + "Nginx bytes count", + []string{"server_zone", "country", "direction"}, nil) + + vtsFilterZoneResponseDesc = prometheus.NewDesc( + "nginx_vts_filterzone_responses_total", + "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", + []string{"server_zone", "country", "status_code"}, nil) + + vtsFilterZoneCacheDesc = prometheus.NewDesc( + "nginx_vts_filterzone_cache_total", + "Nginx cache count", + []string{"server_zone", "country", "type"}, nil) + + vtsUpstreamBackupDesc = prometheus.NewDesc( + "nginx_vts_upstream_backup", "Current backup setting of the server.", []string{"upstream", "server"}, nil) - upstreamBytesDesc = prometheus.NewDesc( - "nginx_upstream_bytes_total", + vtsUpstreamBytesDesc = prometheus.NewDesc( + "nginx_vts_upstream_bytes_total", "The total number of bytes sent to this server.", []string{"upstream", "server", "direction"}, nil) - upstreamDownDesc = prometheus.NewDesc( - "nginx_upstream_down_total", + vtsUpstreamDownDesc = prometheus.NewDesc( + "nginx_vts_upstream_down_total", "Current down setting of the server.", []string{"upstream", "server"}, nil) - upstreamFailTimeoutDesc = prometheus.NewDesc( - "nginx_upstream_fail_timeout", + vtsUpstreamFailTimeoutDesc = prometheus.NewDesc( + "nginx_vts_upstream_fail_timeout", "Current fail_timeout setting of the server.", []string{"upstream", "server"}, nil) - upstreamMaxFailsDesc = prometheus.NewDesc( - "nginx_upstream_maxfails", + vtsUpstreamMaxFailsDesc = prometheus.NewDesc( + "nginx_vts_upstream_maxfails", "Current max_fails setting of the server.", []string{"upstream", "server"}, nil) - upstreamResponsesDesc = prometheus.NewDesc( - "nginx_upstream_responses_total", + vtsUpstreamResponsesDesc = prometheus.NewDesc( + "nginx_vts_upstream_responses_total", "The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", []string{"upstream", "server", "status_code"}, nil) - upstreamRequestDesc = prometheus.NewDesc( - "nginx_upstream_requests_total", + vtsUpstreamRequestDesc = prometheus.NewDesc( + "nginx_vts_upstream_requests_total", "The total number of client connections forwarded to this server.", []string{"upstream", "server"}, nil) - upstreamResponseMsecDesc = prometheus.NewDesc( - "nginx_upstream_response_msecs_avg", + vtsUpstreamResponseMsecDesc = prometheus.NewDesc( + "nginx_vts_upstream_response_msecs_avg", "The average of only upstream response processing times in milliseconds.", []string{"upstream", "server"}, nil) - upstreamWeightDesc = prometheus.NewDesc( - "nginx_upstream_weight", + vtsUpstreamWeightDesc = prometheus.NewDesc( + "nginx_vts_upstream_weight", "Current upstream weight setting of the server.", []string{"upstream", "server"}, nil) + + activeDesc = prometheus.NewDesc( + "nginx_active_connections", + "total number of active connections", + nil, nil) + + acceptedDesc = prometheus.NewDesc( + "nginx_accepted_connections", + "total number of accepted client connections", + nil, nil) + + handledDesc = prometheus.NewDesc( + "nginx_handled_connections", + "total number of handled connections", + nil, nil) + + requestsDesc = prometheus.NewDesc( + "nginx_total_requests", + "total number of client requests", + nil, nil) + + readingDesc = prometheus.NewDesc( + "nginx_current_reading_connections", + "current number of connections where nginx is reading the request header", + nil, nil) + + writingDesc = prometheus.NewDesc( + "nginx_current_writing_connections", + "current number of connections where nginx is writing the response back to the client", + nil, nil) + + waitingDesc = prometheus.NewDesc( + "nginx_current_waiting_connections", + "current number of idle client connections waiting for a request", + nil, nil) ) type ( @@ -170,23 +224,27 @@ type ( namedProcessCollector struct { scrapeChan chan scrapeRequest *proc.Grouper - fs *proc.FS + fs *proc.FS + vtsCollector bool } ) func newProcessCollector( children bool, - n common.MatchNamer) (*namedProcessCollector, error) { + n common.MatchNamer, + vtsCollector bool) (*namedProcessCollector, error) { fs, err := proc.NewFS("/proc") if err != nil { return nil, err } p := &namedProcessCollector{ - scrapeChan: make(chan scrapeRequest), - Grouper: proc.NewGrouper(children, n), - fs: fs, + scrapeChan: make(chan scrapeRequest), + Grouper: proc.NewGrouper(children, n), + fs: fs, + vtsCollector: vtsCollector, } + _, err = p.Update(p.fs.AllProcs()) if err != nil { return nil, err @@ -199,30 +257,37 @@ func newProcessCollector( // Describe implements prometheus.Collector. func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- cpuSecsDesc + ch <- numprocsDesc + ch <- readBytesDesc + ch <- writeBytesDesc ch <- memResidentbytesDesc ch <- memVirtualbytesDesc ch <- startTimeDesc - ch <- bytesDesc - ch <- cacheDesc - ch <- connectionsDesc - ch <- readBytesDesc - ch <- requestDesc - ch <- responseDesc - ch <- writeBytesDesc - ch <- upstreamBackupDesc - ch <- upstreamBytesDesc - ch <- upstreamDownDesc - ch <- upstreamFailTimeoutDesc - ch <- upstreamMaxFailsDesc - ch <- upstreamRequestDesc - ch <- upstreamResponseMsecDesc - ch <- upstreamResponsesDesc - ch <- upstreamWeightDesc - - ch <- numprocsDesc + if p.vtsCollector { + ch <- vtsBytesDesc + ch <- vtsCacheDesc + ch <- vtsConnectionsDesc + ch <- readBytesDesc + ch <- vtsRequestDesc + ch <- vtsResponseDesc + ch <- writeBytesDesc + ch <- vtsUpstreamBackupDesc + ch <- vtsUpstreamBytesDesc + ch <- vtsUpstreamDownDesc + ch <- vtsUpstreamFailTimeoutDesc + ch <- vtsUpstreamMaxFailsDesc + ch <- vtsUpstreamRequestDesc + ch <- vtsUpstreamResponseMsecDesc + ch <- vtsUpstreamResponsesDesc + ch <- vtsUpstreamWeightDesc + ch <- vtsFilterZoneBytesDesc + ch <- vtsFilterZoneCacheDesc + ch <- vtsFilterZoneResponseDesc + } } @@ -234,71 +299,88 @@ func (p *namedProcessCollector) Collect(ch chan<- prometheus.Metric) { } func (p *namedProcessCollector) start() { + + //glog.Warningf("OOO %v", p.configmap.Data) + for req := range p.scrapeChan { ch := req.results - p.scrape(ch) + p.scrapeNginxStatus(ch) + + if p.vtsCollector { + p.scrapeVts(ch) + } + req.done <- struct{}{} } } -func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) { - - val := reflect.ValueOf(value).Elem() - - for i := 0; i < val.NumField(); i++ { - tag := val.Type().Field(i).Tag - - labels := append(labels, tag.Get("json")) - ch <- prometheus.MustNewConstMetric(desc, - prometheus.CounterValue, float64(val.Field(i).Interface().(float64)), - labels...) - } - -} - -func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) { - - nginxMetrics, err := getNginxMetrics() +func (p *namedProcessCollector) scrapeNginxStatus(ch chan<- prometheus.Metric) { + s, err := getNginxStatus() if err != nil { glog.Warningf("unexpected error obtaining nginx status info: %v", err) return } + p.scrapeProcs(ch) + ch <- prometheus.MustNewConstMetric(activeDesc, + prometheus.GaugeValue, float64(s.Active)) + ch <- prometheus.MustNewConstMetric(acceptedDesc, + prometheus.GaugeValue, float64(s.Accepted)) + ch <- prometheus.MustNewConstMetric(handledDesc, + prometheus.GaugeValue, float64(s.Handled)) + ch <- prometheus.MustNewConstMetric(requestsDesc, + prometheus.GaugeValue, float64(s.Requests)) + ch <- prometheus.MustNewConstMetric(readingDesc, + prometheus.GaugeValue, float64(s.Reading)) + ch <- prometheus.MustNewConstMetric(writingDesc, + prometheus.GaugeValue, float64(s.Writing)) + ch <- prometheus.MustNewConstMetric(waitingDesc, + prometheus.GaugeValue, float64(s.Waiting)) - reflectMetrics(&nginxMetrics.Connections, connectionsDesc, ch) +} + +func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) { + + nginxMetrics, err := getNginxVtsMetrics() + if err != nil { + glog.Warningf("unexpected error obtaining nginx status info: %v", err) + return + } + + reflectMetrics(&nginxMetrics.Connections, vtsConnectionsDesc, ch) for name, zones := range nginxMetrics.UpstreamZones { for pos, value := range zones { - reflectMetrics(&zones[pos].Responses, upstreamResponsesDesc, ch, name, value.Server) + reflectMetrics(&zones[pos].Responses, vtsUpstreamResponsesDesc, ch, name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamRequestDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamRequestDesc, prometheus.CounterValue, float64(zones[pos].RequestCounter), name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamDownDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamDownDesc, prometheus.CounterValue, float64(zones[pos].Down), name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamWeightDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamWeightDesc, prometheus.CounterValue, float64(zones[pos].Weight), name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamResponseMsecDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamResponseMsecDesc, prometheus.CounterValue, float64(zones[pos].ResponseMsec), name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamBackupDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamBackupDesc, prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamFailTimeoutDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamFailTimeoutDesc, prometheus.CounterValue, float64(zones[pos].FailTimeout), name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamMaxFailsDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamMaxFailsDesc, prometheus.CounterValue, float64(zones[pos].MaxFails), name, value.Server) - ch <- prometheus.MustNewConstMetric(upstreamBytesDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc, prometheus.CounterValue, float64(zones[pos].InBytes), name, value.Server, "in") - ch <- prometheus.MustNewConstMetric(upstreamBytesDesc, + ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc, prometheus.CounterValue, float64(zones[pos].OutBytes), name, value.Server, "out") } @@ -306,42 +388,44 @@ func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) { for name, zone := range nginxMetrics.ServerZones { - reflectMetrics(&zone.Responses, responseDesc, ch, name) + reflectMetrics(&zone.Responses, vtsResponseDesc, ch, name) + reflectMetrics(&zone.Cache, vtsCacheDesc, ch, name) - ch <- prometheus.MustNewConstMetric(requestDesc, + ch <- prometheus.MustNewConstMetric(vtsRequestDesc, prometheus.CounterValue, float64(zone.RequestCounter), name) - ch <- prometheus.MustNewConstMetric(bytesDesc, + ch <- prometheus.MustNewConstMetric(vtsBytesDesc, prometheus.CounterValue, float64(zone.InBytes), name, "in") - ch <- prometheus.MustNewConstMetric(bytesDesc, + ch <- prometheus.MustNewConstMetric(vtsBytesDesc, prometheus.CounterValue, float64(zone.OutBytes), name, "out") - //cache - ch <- prometheus.MustNewConstMetric(cacheDesc, - prometheus.CounterValue, float64(zone.Responses.CacheBypass), name, "bypass") - - ch <- prometheus.MustNewConstMetric(cacheDesc, - prometheus.CounterValue, float64(zone.Responses.CacheExpired), name, "expired") - - ch <- prometheus.MustNewConstMetric(cacheDesc, - prometheus.CounterValue, float64(zone.Responses.CacheHit), name, "hit") - - ch <- prometheus.MustNewConstMetric(cacheDesc, - prometheus.CounterValue, float64(zone.Responses.CacheRevalidated), name, "revalidated") - - ch <- prometheus.MustNewConstMetric(cacheDesc, - prometheus.CounterValue, float64(zone.Responses.CacheUpdating), name, "updating") - - ch <- prometheus.MustNewConstMetric(cacheDesc, - prometheus.CounterValue, float64(zone.Responses.CacheStale), name, "stale") - - ch <- prometheus.MustNewConstMetric(cacheDesc, - prometheus.CounterValue, float64(zone.Responses.CacheScarce), name, "scarce") - } - _, err = p.Update(p.fs.AllProcs()) + for serverZone, countries := range nginxMetrics.FilterZones { + + for country, zone := range countries { + + serverZone = strings.Replace(serverZone, "country::", "", 1) + + reflectMetrics(&zone.Responses, vtsFilterZoneResponseDesc, ch, serverZone, country) + reflectMetrics(&zone.Cache, vtsFilterZoneCacheDesc, ch, serverZone, country) + + ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc, + prometheus.CounterValue, float64(zone.InBytes), serverZone, country, "in") + + ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc, + prometheus.CounterValue, float64(zone.OutBytes), serverZone, country, "out") + + } + + } + +} + +func (p *namedProcessCollector) scrapeProcs(ch chan<- prometheus.Metric) { + + _, err := p.Update(p.fs.AllProcs()) if err != nil { glog.Warningf("unexpected error obtaining nginx process info: %v", err) return @@ -366,3 +450,18 @@ func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) { prometheus.CounterValue, float64(gcounts.WriteBytes)) } } + +func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) { + + val := reflect.ValueOf(value).Elem() + + for i := 0; i < val.NumField(); i++ { + tag := val.Type().Field(i).Tag + + labels := append(labels, tag.Get("json")) + ch <- prometheus.MustNewConstMetric(desc, + prometheus.CounterValue, float64(val.Field(i).Interface().(float64)), + labels...) + } + +} diff --git a/controllers/nginx/pkg/cmd/controller/metrics_test.go b/controllers/nginx/pkg/cmd/controller/metrics_test.go new file mode 100644 index 000000000..06ab7d0f9 --- /dev/null +++ b/controllers/nginx/pkg/cmd/controller/metrics_test.go @@ -0,0 +1 @@ +package main diff --git a/controllers/nginx/pkg/cmd/controller/status.go b/controllers/nginx/pkg/cmd/controller/status.go index 78e53df51..b3bb324d1 100644 --- a/controllers/nginx/pkg/cmd/controller/status.go +++ b/controllers/nginx/pkg/cmd/controller/status.go @@ -19,6 +19,7 @@ package main import ( "encoding/json" "fmt" + "github.com/golang/glog" "io/ioutil" "net/http" "regexp" @@ -50,45 +51,44 @@ type nginxStatus struct { Waiting int } +// https://github.com/vozlt/nginx-module-vts type Vts struct { - NginxVersion string `json:"nginxVersion"` - LoadMsec int `json:"loadMsec"` - NowMsec int `json:"nowMsec"` - Connections Connections `json:"connections"` - ServerZones map[string]ServerZones `json:"serverZones"` - FilterZones map[string]FilterZone `json:"filterZones"` + NginxVersion string `json:"nginxVersion"` + LoadMsec int `json:"loadMsec"` + NowMsec int `json:"nowMsec"` + // Total connections and requests(same as stub_status_module in NGINX) + Connections Connections `json:"connections"` + // Traffic(in/out) and request and response counts and cache hit ratio per each server zone + ServerZones map[string]ServerZone `json:"serverZones"` + // Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through + // the vhost_traffic_status_filter_by_set_key directive + FilterZones map[string]map[string]FilterZone `json:"filterZones"` + // Traffic(in/out) and request and response counts per server in each upstream group UpstreamZones map[string][]UpstreamZone `json:"upstreamZones"` } -type ServerZones struct { - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - Responses Response `json:"responses"` - OverCounts OverCounts `json:"overCounts"` -} - -type OverCounts struct { - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - OneXx float64 `json:"1xx"` - TwoXx float64 `json:"2xx"` - TheeXx float64 `json:"3xx"` - FourXx float64 `json:"4xx"` - FiveXx float64 `json:"5xx"` +type ServerZone struct { + RequestCounter float64 `json:"requestCounter"` + InBytes float64 `json:"inBytes"` + OutBytes float64 `json:"outBytes"` + Responses Response `json:"responses"` + Cache Cache `json:"responses"` } type FilterZone struct { + RequestCounter float64 `json:"requestCounter"` + InBytes float64 `json:"inBytes"` + OutBytes float64 `json:"outBytes"` + Cache Cache `json:"responses"` + Responses Response `json:"responses"` } type UpstreamZone struct { + Responses Response `json:"responses"` Server string `json:"server"` RequestCounter float64 `json:"requestCounter"` InBytes float64 `json:"inBytes"` OutBytes float64 `json:"outBytes"` - Responses Response `json:"responses"` - OverCounts OverCounts `json:"overcounts"` ResponseMsec float64 `json:"responseMsec"` Weight float64 `json:"weight"` MaxFails float64 `json:"maxFails"` @@ -97,20 +97,23 @@ type UpstreamZone struct { Down BoolToFloat64 `json:"down"` } +type Cache struct { + Miss float64 `json:"miss"` + Bypass float64 `json:"bypass"` + Expired float64 `json:"expired"` + Stale float64 `json:"stale"` + Updating float64 `json:"updating"` + Revalidated float64 `json:"revalidated"` + Hit float64 `json:"hit"` + Scarce float64 `json:"scarce"` +} + type Response struct { - OneXx float64 `json:"1xx"` - TwoXx float64 `json:"2xx"` - TheeXx float64 `json:"3xx"` - FourXx float64 `json:"4xx"` - FiveXx float64 `json:"5xx"` - CacheMiss float64 `json:"miss"` - CacheBypass float64 `json:"bypass"` - CacheExpired float64 `json:"expired"` - CacheStale float64 `json:"stale"` - CacheUpdating float64 `json:"updating"` - CacheRevalidated float64 `json:"revalidated"` - CacheHit float64 `json:"hit"` - CacheScarce float64 `json:"scarce"` + OneXx float64 `json:"1xx"` + TwoXx float64 `json:"2xx"` + TheeXx float64 `json:"3xx"` + FourXx float64 `json:"4xx"` + FiveXx float64 `json:"5xx"` } type Connections struct { @@ -138,7 +141,11 @@ func (bit BoolToFloat64) UnmarshalJSON(data []byte) error { } func getNginxStatus() (*nginxStatus, error) { - data, err := httpBody(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)) + + url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath) + glog.V(3).Infof("scrapping url: %v", url) + + data, err := httpBody(url) if err != nil { return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) @@ -166,9 +173,12 @@ func httpBody(url string) ([]byte, error) { } func getNginxVtsMetrics() (*Vts, error) { - data, err := httpBody(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath)) + url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath) + glog.V(3).Infof("scrapping url: %v", url) - if err { + data, err := httpBody(url) + + if err != nil { return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err) } diff --git a/controllers/nginx/pkg/cmd/controller/status_test.go b/controllers/nginx/pkg/cmd/controller/status_test.go index 1dda3a01e..9a4fd4c45 100644 --- a/controllers/nginx/pkg/cmd/controller/status_test.go +++ b/controllers/nginx/pkg/cmd/controller/status_test.go @@ -68,3 +68,21 @@ func TestToint(t *testing.T) { } } } +// +//func TestUnmarshalJSON (t *testing.T){ +// tests := []struct{ +// in []byte +// exp float64 +// error error +// }{ +// {in: "false",exp: 0}, +// {"0", 0}, +// {"true", 1}, +// {"1", 1}, +// {" errr", error}, +// } +// +// for _,test := range tests +// +// +//} \ No newline at end of file diff --git a/controllers/nginx/pkg/config/config.go b/controllers/nginx/pkg/config/config.go index 0001d9502..2e59a74a9 100644 --- a/controllers/nginx/pkg/config/config.go +++ b/controllers/nginx/pkg/config/config.go @@ -289,6 +289,7 @@ func NewDefault() Configuration { UseProxyProtocol: false, UseGzip: true, WorkerProcesses: runtime.NumCPU(), + EnableVtsStatus: false, VtsStatusZoneSize: "10m", UseHTTP2: true, Backend: defaults.Backend{