From be9bf4e4d383e40d7ac1fda4cfd14ed26153e378 Mon Sep 17 00:00:00 2001 From: Marco Cadetg Date: Fri, 17 Mar 2023 15:13:21 +0100 Subject: [PATCH] exclude creation and exporting of socket metrics via flag --- cmd/dataplane/main.go | 2 +- cmd/nginx/main.go | 2 +- internal/ingress/controller/controller.go | 9 +- internal/ingress/metric/collectors/socket.go | 189 +++++++++++------- .../ingress/metric/collectors/socket_test.go | 116 ++++++++++- internal/ingress/metric/main.go | 4 +- pkg/flags/flags.go | 10 +- 7 files changed, 251 insertions(+), 81 deletions(-) diff --git a/cmd/dataplane/main.go b/cmd/dataplane/main.go index 0ab978429..6fd559e4d 100644 --- a/cmd/dataplane/main.go +++ b/cmd/dataplane/main.go @@ -70,7 +70,7 @@ func main() { mc := metric.NewDummyCollector() if conf.EnableMetrics { // TODO: Ingress class is not a part of dataplane anymore - mc, err = metric.NewCollector(conf.MetricsPerHost, conf.ReportStatusClasses, reg, conf.IngressClassConfiguration.Controller, *conf.MetricsBuckets) + mc, err = metric.NewCollector(conf.MetricsPerHost, conf.ReportStatusClasses, reg, conf.IngressClassConfiguration.Controller, *conf.MetricsBuckets, conf.ExcludeSocketMetrics) if err != nil { klog.Fatalf("Error creating prometheus collector: %v", err) } diff --git a/cmd/nginx/main.go b/cmd/nginx/main.go index 9f0973ec9..48dd933dc 100644 --- a/cmd/nginx/main.go +++ b/cmd/nginx/main.go @@ -133,7 +133,7 @@ func main() { mc := metric.NewDummyCollector() if conf.EnableMetrics { - mc, err = metric.NewCollector(conf.MetricsPerHost, conf.ReportStatusClasses, reg, conf.IngressClassConfiguration.Controller, *conf.MetricsBuckets) + mc, err = metric.NewCollector(conf.MetricsPerHost, conf.ReportStatusClasses, reg, conf.IngressClassConfiguration.Controller, *conf.MetricsBuckets, conf.ExcludeSocketMetrics) if err != nil { klog.Fatalf("Error creating prometheus collector: %v", err) } diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index 1914765d8..d33f427f2 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -101,10 +101,11 @@ type Configuration struct { EnableProfiling bool - EnableMetrics bool - MetricsPerHost bool - MetricsBuckets *collectors.HistogramBuckets - ReportStatusClasses bool + EnableMetrics bool + MetricsPerHost bool + MetricsBuckets *collectors.HistogramBuckets + ReportStatusClasses bool + ExcludeSocketMetrics []string FakeCertificate *ingress.SSLCert diff --git a/internal/ingress/metric/collectors/socket.go b/internal/ingress/metric/collectors/socket.go index 23048d5d6..381bdaf29 100644 --- a/internal/ingress/metric/collectors/socket.go +++ b/internal/ingress/metric/collectors/socket.go @@ -21,6 +21,7 @@ import ( "io" "net" "os" + "strings" "syscall" jsoniter "github.com/json-iterator/go" @@ -78,7 +79,7 @@ type SocketCollector struct { listener net.Listener - metricMapping map[string]interface{} + metricMapping map[string]prometheus.Collector hosts sets.Set[string] @@ -106,7 +107,7 @@ var defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} // NewSocketCollector creates a new SocketCollector instance using // the ingress watch namespace and class used by the controller -func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStatusClasses bool, buckets HistogramBuckets) (*SocketCollector, error) { +func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStatusClasses bool, buckets HistogramBuckets, excludedMetrics []string) (*SocketCollector, error) { socket := "/tmp/nginx/prometheus-nginx.socket" // unix sockets must be unlink()ed before being used _ = syscall.Unlink(socket) @@ -132,13 +133,20 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat requestTags = append(requestTags, "host") } + em := make(map[string]struct{}, len(excludedMetrics)) + for _, m := range excludedMetrics { + // remove potential nginx_ingress_controller prefix from the metric name + // TBD: how to handle fully qualified histogram metrics e.g. _buckets and _sum. Should we just remove the suffix and remove the histogram metric or ignore it? + em[strings.TrimPrefix(m, "nginx_ingress_controller_")] = struct{}{} + } + sc := &SocketCollector{ listener: listener, metricsPerHost: metricsPerHost, reportStatusClasses: reportStatusClasses, - connectTime: prometheus.NewHistogramVec( + connectTime: histogramMetric( prometheus.HistogramOpts{ Name: "connect_duration_seconds", Help: "The time spent on establishing a connection with the upstream server", @@ -147,8 +155,10 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat Buckets: buckets.TimeBuckets, }, requestTags, + em, ), - headerTime: prometheus.NewHistogramVec( + + headerTime: histogramMetric( prometheus.HistogramOpts{ Name: "header_duration_seconds", Help: "The time spent on receiving first header from the upstream server", @@ -157,8 +167,9 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat Buckets: buckets.TimeBuckets, }, requestTags, + em, ), - responseTime: prometheus.NewHistogramVec( + responseTime: histogramMetric( prometheus.HistogramOpts{ Name: "response_duration_seconds", Help: "The time spent on receiving the response from the upstream server", @@ -167,8 +178,10 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat Buckets: buckets.TimeBuckets, }, requestTags, + em, ), - requestTime: prometheus.NewHistogramVec( + + requestTime: histogramMetric( prometheus.HistogramOpts{ Name: "request_duration_seconds", Help: "The request processing time in milliseconds", @@ -177,9 +190,10 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat Buckets: buckets.TimeBuckets, }, requestTags, + em, ), - responseLength: prometheus.NewHistogramVec( + responseLength: histogramMetric( prometheus.HistogramOpts{ Name: "response_size", Help: "The response length (including request line, header, and request body)", @@ -188,19 +202,22 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat Buckets: buckets.LengthBuckets, }, requestTags, + em, ), - requestLength: prometheus.NewHistogramVec( + + requestLength: histogramMetric( prometheus.HistogramOpts{ Name: "request_size", Help: "The request length (including request line, header, and request body)", Namespace: PrometheusNamespace, - Buckets: buckets.LengthBuckets, ConstLabels: constLabels, + Buckets: buckets.LengthBuckets, }, requestTags, + em, ), - requests: prometheus.NewCounterVec( + requests: counterMetric( prometheus.CounterOpts{ Name: "requests", Help: "The total number of client requests", @@ -208,9 +225,10 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat ConstLabels: constLabels, }, requestTags, + em, ), - bytesSent: prometheus.NewHistogramVec( + bytesSent: histogramMetric( prometheus.HistogramOpts{ Name: "bytes_sent", Help: "DEPRECATED The number of bytes sent to a client", @@ -219,9 +237,10 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat ConstLabels: constLabels, }, requestTags, + em, ), - upstreamLatency: prometheus.NewSummaryVec( + upstreamLatency: summaryMetric( prometheus.SummaryOpts{ Name: "ingress_upstream_latency_seconds", Help: "DEPRECATED Upstream service latency per Ingress", @@ -230,10 +249,11 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat Objectives: defObjectives, }, []string{"ingress", "namespace", "service", "canary"}, + em, ), } - sc.metricMapping = map[string]interface{}{ + sc.metricMapping = map[string]prometheus.Collector{ prometheus.BuildFQName(PrometheusNamespace, "", "requests"): sc.requests, prometheus.BuildFQName(PrometheusNamespace, "", "connect_duration_seconds"): sc.connectTime, @@ -249,9 +269,52 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat prometheus.BuildFQName(PrometheusNamespace, "", "ingress_upstream_latency_seconds"): sc.upstreamLatency, } + for m := range em { + // remove excluded metrics from the metricMapping + delete(sc.metricMapping, prometheus.BuildFQName(PrometheusNamespace, "", m)) + } + return sc, nil } +func containsMetric(excludedMetrics map[string]struct{}, name string) bool { + if _, ok := excludedMetrics[name]; ok { + klog.V(3).InfoS("Skipping metric", "metric", name) + return true + } + return false +} + +func summaryMetric(opts prometheus.SummaryOpts, requestTags []string, excludedMetrics map[string]struct{}) *prometheus.SummaryVec { + if containsMetric(excludedMetrics, opts.Name) { + return nil + } + return prometheus.NewSummaryVec( + opts, + requestTags, + ) +} + +func counterMetric(opts prometheus.CounterOpts, requestTags []string, excludedMetrics map[string]struct{}) *prometheus.CounterVec { + if containsMetric(excludedMetrics, opts.Name) { + return nil + } + return prometheus.NewCounterVec( + opts, + requestTags, + ) +} + +func histogramMetric(opts prometheus.HistogramOpts, requestTags []string, excludedMetrics map[string]struct{}) *prometheus.HistogramVec { + if containsMetric(excludedMetrics, opts.Name) { + return nil + } + return prometheus.NewHistogramVec( + opts, + requestTags, + ) +} + func (sc *SocketCollector) handleMessage(msg []byte) { klog.V(5).InfoS("Metric", "message", string(msg)) @@ -305,30 +368,36 @@ func (sc *SocketCollector) handleMessage(msg []byte) { "canary": stats.Canary, } - requestsMetric, err := sc.requests.GetMetricWith(collectorLabels) - if err != nil { - klog.ErrorS(err, "Error fetching requests metric") - } else { - requestsMetric.Inc() + if sc.requests != nil { + requestsMetric, err := sc.requests.GetMetricWith(collectorLabels) + if err != nil { + klog.ErrorS(err, "Error fetching requests metric") + } else { + requestsMetric.Inc() + } } if stats.Latency != -1 { - connectTimeMetric, err := sc.connectTime.GetMetricWith(requestLabels) - if err != nil { - klog.ErrorS(err, "Error fetching connect time metric") - } else { - connectTimeMetric.Observe(stats.Latency) + if sc.connectTime != nil { + connectTimeMetric, err := sc.connectTime.GetMetricWith(requestLabels) + if err != nil { + klog.ErrorS(err, "Error fetching connect time metric") + } else { + connectTimeMetric.Observe(stats.Latency) + } } - latencyMetric, err := sc.upstreamLatency.GetMetricWith(latencyLabels) - if err != nil { - klog.ErrorS(err, "Error fetching latency metric") - } else { - latencyMetric.Observe(stats.Latency) + if sc.upstreamLatency != nil { + latencyMetric, err := sc.upstreamLatency.GetMetricWith(latencyLabels) + if err != nil { + klog.ErrorS(err, "Error fetching latency metric") + } else { + latencyMetric.Observe(stats.Latency) + } } } - if stats.HeaderTime != -1 { + if stats.HeaderTime != -1 && sc.headerTime != nil { headerTimeMetric, err := sc.headerTime.GetMetricWith(requestLabels) if err != nil { klog.ErrorS(err, "Error fetching header time metric") @@ -337,7 +406,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { } } - if stats.RequestTime != -1 { + if stats.RequestTime != -1 && sc.requestTime != nil { requestTimeMetric, err := sc.requestTime.GetMetricWith(requestLabels) if err != nil { klog.ErrorS(err, "Error fetching request duration metric") @@ -346,7 +415,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { } } - if stats.RequestLength != -1 { + if stats.RequestLength != -1 && sc.requestLength != nil { requestLengthMetric, err := sc.requestLength.GetMetricWith(requestLabels) if err != nil { klog.ErrorS(err, "Error fetching request length metric") @@ -355,7 +424,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { } } - if stats.ResponseTime != -1 { + if stats.ResponseTime != -1 && sc.responseTime != nil { responseTimeMetric, err := sc.responseTime.GetMetricWith(requestLabels) if err != nil { klog.ErrorS(err, "Error fetching upstream response time metric") @@ -365,18 +434,22 @@ func (sc *SocketCollector) handleMessage(msg []byte) { } if stats.ResponseLength != -1 { - bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels) - if err != nil { - klog.ErrorS(err, "Error fetching bytes sent metric") - } else { - bytesSentMetric.Observe(stats.ResponseLength) + if sc.bytesSent != nil { + bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels) + if err != nil { + klog.ErrorS(err, "Error fetching bytes sent metric") + } else { + bytesSentMetric.Observe(stats.ResponseLength) + } } - responseSizeMetric, err := sc.responseLength.GetMetricWith(requestLabels) - if err != nil { - klog.ErrorS(err, "Error fetching bytes sent metric") - } else { - responseSizeMetric.Observe(stats.ResponseLength) + if sc.responseLength != nil { + responseSizeMetric, err := sc.responseLength.GetMetricWith(requestLabels) + if err != nil { + klog.ErrorS(err, "Error fetching bytes sent metric") + } else { + responseSizeMetric.Observe(stats.ResponseLength) + } } } } @@ -471,36 +544,16 @@ func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus // Describe implements prometheus.Collector func (sc SocketCollector) Describe(ch chan<- *prometheus.Desc) { - sc.connectTime.Describe(ch) - sc.headerTime.Describe(ch) - sc.responseTime.Describe(ch) - sc.requestTime.Describe(ch) - - sc.requestLength.Describe(ch) - sc.responseLength.Describe(ch) - - sc.requests.Describe(ch) - - sc.upstreamLatency.Describe(ch) - - sc.bytesSent.Describe(ch) + for _, metric := range sc.metricMapping { + metric.Describe(ch) + } } // Collect implements the prometheus.Collector interface. func (sc SocketCollector) Collect(ch chan<- prometheus.Metric) { - sc.connectTime.Collect(ch) - sc.headerTime.Collect(ch) - sc.responseTime.Collect(ch) - sc.requestTime.Collect(ch) - - sc.requestLength.Collect(ch) - sc.responseLength.Collect(ch) - - sc.requests.Collect(ch) - - sc.upstreamLatency.Collect(ch) - - sc.bytesSent.Collect(ch) + for _, metric := range sc.metricMapping { + metric.Collect(ch) + } } // SetHosts sets the hostnames that are being served by the ingress controller diff --git a/internal/ingress/metric/collectors/socket_test.go b/internal/ingress/metric/collectors/socket_test.go index 4bebc7600..fe442aba0 100644 --- a/internal/ingress/metric/collectors/socket_test.go +++ b/internal/ingress/metric/collectors/socket_test.go @@ -84,6 +84,7 @@ func TestCollector(t *testing.T) { data []string metrics []string useStatusClasses bool + excludeMetrics []string wantBefore string removeIngresses []string wantAfter string @@ -470,13 +471,126 @@ func TestCollector(t *testing.T) { wantAfter: ` `, }, + { + name: "basic exclude metrics test", + data: []string{`[{ + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamLatency":1.0, + "upstreamHeaderTime":5.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app", + "canary":"" + }]`}, + excludeMetrics: []string{"nginx_ingress_controller_connect_duration_seconds"}, + metrics: []string{"nginx_ingress_controller_connect_duration_seconds", "nginx_ingress_controller_response_duration_seconds"}, + useStatusClasses: true, + wantBefore: ` + # HELP nginx_ingress_controller_response_duration_seconds The time spent on receiving the response from the upstream server + # TYPE nginx_ingress_controller_response_duration_seconds histogram + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.005"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.01"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.025"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.05"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.25"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="2.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="10"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="+Inf"} 1 + nginx_ingress_controller_response_duration_seconds_sum{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx"} 200 + nginx_ingress_controller_response_duration_seconds_count{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx"} 1 + `, + }, + { + name: "remove metrics with the short metric name", + data: []string{`[{ + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamLatency":1.0, + "upstreamHeaderTime":5.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app", + "canary":"" + }]`}, + excludeMetrics: []string{"response_duration_seconds"}, + metrics: []string{"nginx_ingress_controller_response_duration_seconds"}, + useStatusClasses: true, + wantBefore: ` + `, + }, + { + name: "exclude metrics make sure to only remove exactly matched metrics", + data: []string{`[{ + "host":"testshop.com", + "status":"200", + "bytesSent":150.0, + "method":"GET", + "path":"/admin", + "requestLength":300.0, + "requestTime":60.0, + "upstreamLatency":1.0, + "upstreamHeaderTime":5.0, + "upstreamName":"test-upstream", + "upstreamIP":"1.1.1.1:8080", + "upstreamResponseTime":200, + "upstreamStatus":"220", + "namespace":"test-app-production", + "ingress":"web-yml", + "service":"test-app", + "canary":"" + }]`}, + excludeMetrics: []string{"response_duration_seconds2", "test.*", "nginx_ingress_.*", "response_duration_secon"}, + metrics: []string{"nginx_ingress_controller_response_duration_seconds"}, + useStatusClasses: true, + wantBefore: ` + # HELP nginx_ingress_controller_response_duration_seconds The time spent on receiving the response from the upstream server + # TYPE nginx_ingress_controller_response_duration_seconds histogram + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.005"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.01"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.025"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.05"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.25"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="0.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="1"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="2.5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="5"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="10"} 0 + nginx_ingress_controller_response_duration_seconds_bucket{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx",le="+Inf"} 1 + nginx_ingress_controller_response_duration_seconds_sum{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx"} 200 + nginx_ingress_controller_response_duration_seconds_count{canary="",controller_class="ingress",controller_namespace="default",controller_pod="pod",host="testshop.com",ingress="web-yml",method="GET",namespace="test-app-production",path="/admin",service="test-app",status="2xx"} 1 + `, + }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { registry := prometheus.NewPedanticRegistry() - sc, err := NewSocketCollector("pod", "default", "ingress", true, c.useStatusClasses, buckets) + sc, err := NewSocketCollector("pod", "default", "ingress", true, c.useStatusClasses, buckets, c.excludeMetrics) if err != nil { t.Errorf("%v: unexpected error creating new SocketCollector: %v", c.name, err) } diff --git a/internal/ingress/metric/main.go b/internal/ingress/metric/main.go index cac86e889..b2f721f62 100644 --- a/internal/ingress/metric/main.go +++ b/internal/ingress/metric/main.go @@ -71,7 +71,7 @@ type collector struct { } // NewCollector creates a new metric collector the for ingress controller -func NewCollector(metricsPerHost, reportStatusClasses bool, registry *prometheus.Registry, ingressclass string, buckets collectors.HistogramBuckets) (Collector, error) { +func NewCollector(metricsPerHost, reportStatusClasses bool, registry *prometheus.Registry, ingressclass string, buckets collectors.HistogramBuckets, excludedSocketMetrics []string) (Collector, error) { podNamespace := os.Getenv("POD_NAMESPACE") if podNamespace == "" { podNamespace = "default" @@ -89,7 +89,7 @@ func NewCollector(metricsPerHost, reportStatusClasses bool, registry *prometheus return nil, err } - s, err := collectors.NewSocketCollector(podName, podNamespace, ingressclass, metricsPerHost, reportStatusClasses, buckets) + s, err := collectors.NewSocketCollector(podName, podNamespace, ingressclass, metricsPerHost, reportStatusClasses, buckets, excludedSocketMetrics) if err != nil { return nil, err } diff --git a/pkg/flags/flags.go b/pkg/flags/flags.go index 911ab775c..f75c1b845 100644 --- a/pkg/flags/flags.go +++ b/pkg/flags/flags.go @@ -171,10 +171,11 @@ Requires the update-status parameter.`) reportStatusClasses = flags.Bool("report-status-classes", false, `Use status classes (2xx, 3xx, 4xx and 5xx) instead of status codes in metrics.`) - timeBuckets = flags.Float64Slice("time-buckets", prometheus.DefBuckets, "Set of buckets which will be used for prometheus histogram metrics such as RequestTime, ResponseTime.") - lengthBuckets = flags.Float64Slice("length-buckets", prometheus.LinearBuckets(10, 10, 10), "Set of buckets which will be used for prometheus histogram metrics such as RequestLength, ResponseLength.") - sizeBuckets = flags.Float64Slice("size-buckets", prometheus.ExponentialBuckets(10, 10, 7), "Set of buckets which will be used for prometheus histogram metrics such as BytesSent.") - monitorMaxBatchSize = flags.Int("monitor-max-batch-size", 10000, "Max batch size of NGINX metrics.") + timeBuckets = flags.Float64Slice("time-buckets", prometheus.DefBuckets, "Set of buckets which will be used for prometheus histogram metrics such as RequestTime, ResponseTime.") + lengthBuckets = flags.Float64Slice("length-buckets", prometheus.LinearBuckets(10, 10, 10), "Set of buckets which will be used for prometheus histogram metrics such as RequestLength, ResponseLength.") + sizeBuckets = flags.Float64Slice("size-buckets", prometheus.ExponentialBuckets(10, 10, 7), "Set of buckets which will be used for prometheus histogram metrics such as BytesSent.") + excludeSocketMetrics = flags.StringSlice("exclude-socket-metrics", []string{}, "Set of socket metrics to exclude which won't be exported nor being calculated. E.g. 'nginx_ingress_controller_success,nginx_ingress_controller_header_duration_seconds'.") + monitorMaxBatchSize = flags.Int("monitor-max-batch-size", 10000, "Max batch size of NGINX metrics.") httpPort = flags.Int("http-port", 80, `Port to use for servicing HTTP traffic.`) httpsPort = flags.Int("https-port", 443, `Port to use for servicing HTTPS traffic.`) @@ -328,6 +329,7 @@ https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-g MetricsPerHost: *metricsPerHost, MetricsBuckets: histogramBuckets, ReportStatusClasses: *reportStatusClasses, + ExcludeSocketMetrics: *excludeSocketMetrics, MonitorMaxBatchSize: *monitorMaxBatchSize, DisableServiceExternalName: *disableServiceExternalName, EnableSSLPassthrough: *enableSSLPassthrough,