ingress-nginx-helm/internal/ingress/metric/collectors/controller.go

440 lines
14 KiB
Go
Raw Normal View History

2018-07-07 17:46:18 +00:00
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collectors
import (
2018-07-27 16:41:37 +00:00
"fmt"
2018-07-07 17:46:18 +00:00
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/ingress-nginx/pkg/apis/ingress"
2021-09-26 18:02:22 +00:00
"k8s.io/ingress-nginx/version"
2020-08-08 23:31:02 +00:00
"k8s.io/klog/v2"
2018-07-07 17:46:18 +00:00
)
var (
operation = []string{"controller_namespace", "controller_class", "controller_pod"}
ingressOperation = []string{"controller_namespace", "controller_class", "controller_pod", "namespace", "ingress"}
sslLabelHost = []string{"namespace", "class", "host", "secret_name", "identifier"}
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
sslInfoLabels = []string{"namespace", "class", "host", "secret_name", "identifier", "issuer_organization", "issuer_common_name", "serial_number", "public_key_algorithm"}
orphanityLabels = []string{"controller_namespace", "controller_class", "controller_pod", "namespace", "ingress", "type"}
2018-07-07 17:46:18 +00:00
)
// Controller defines base metrics about the ingress controller
type Controller struct {
prometheus.Collector
configHash prometheus.Gauge
configSuccess prometheus.Gauge
configSuccessTime prometheus.Gauge
reloadOperation *prometheus.CounterVec
reloadOperationErrors *prometheus.CounterVec
checkIngressOperation *prometheus.CounterVec
checkIngressOperationErrors *prometheus.CounterVec
sslExpireTime *prometheus.GaugeVec
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
sslInfo *prometheus.GaugeVec
OrphanIngress *prometheus.GaugeVec
2018-07-07 17:46:18 +00:00
2018-07-27 16:41:37 +00:00
constLabels prometheus.Labels
labels prometheus.Labels
leaderElection *prometheus.GaugeVec
2021-09-26 18:02:22 +00:00
buildInfo prometheus.Collector
2018-07-07 17:46:18 +00:00
}
// NewController creates a new prometheus collector for the
// Ingress controller operations
func NewController(pod, namespace, class string) *Controller {
constLabels := prometheus.Labels{
"controller_namespace": namespace,
"controller_class": class,
"controller_pod": pod,
}
cm := &Controller{
2018-07-27 16:41:37 +00:00
constLabels: constLabels,
2018-07-07 17:46:18 +00:00
labels: prometheus.Labels{
"namespace": namespace,
"class": class,
},
2021-09-26 18:02:22 +00:00
buildInfo: prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "build_info",
Help: "A metric with a constant '1' labeled with information about the build.",
ConstLabels: prometheus.Labels{
"controller_namespace": namespace,
"controller_class": class,
"controller_pod": pod,
"release": version.RELEASE,
"build": version.COMMIT,
"repository": version.REPO,
},
},
func() float64 { return 1 },
),
2018-07-07 17:46:18 +00:00
configHash: prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "config_hash",
Help: "Running configuration hash actually running",
ConstLabels: constLabels,
},
),
configSuccess: prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "config_last_reload_successful",
2018-08-03 13:50:53 +00:00
Help: "Whether the last configuration reload attempt was successful",
2018-07-07 17:46:18 +00:00
ConstLabels: constLabels,
}),
configSuccessTime: prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "config_last_reload_successful_timestamp_seconds",
Help: "Timestamp of the last successful configuration reload.",
ConstLabels: constLabels,
}),
reloadOperation: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: PrometheusNamespace,
Name: "success",
Help: `Cumulative number of Ingress controller reload operations`,
},
operation,
),
reloadOperationErrors: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: PrometheusNamespace,
Name: "errors",
Help: `Cumulative number of Ingress controller errors during reload operations`,
},
operation,
),
checkIngressOperationErrors: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: PrometheusNamespace,
Name: "check_errors",
Help: `Cumulative number of Ingress controller errors during syntax check operations`,
},
ingressOperation,
),
checkIngressOperation: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: PrometheusNamespace,
Name: "check_success",
Help: `Cumulative number of Ingress controller syntax check operations`,
},
ingressOperation,
),
2018-07-07 17:46:18 +00:00
sslExpireTime: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "ssl_expire_time_seconds",
Help: `Number of seconds since 1970 to the SSL Certificate expire.
An example to check if this certificate will expire in 10 days is: "nginx_ingress_controller_ssl_expire_time_seconds < (time() + (10 * 24 * 3600))"`,
},
sslLabelHost,
),
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
sslInfo: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "ssl_certificate_info",
Help: `Hold all labels associated to a certificate`,
},
sslInfoLabels,
),
leaderElection: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "leader_election_status",
Help: "Gauge reporting status of the leader election, 0 indicates follower, 1 indicates leader. 'name' is the string used to identify the lease",
ConstLabels: constLabels,
},
[]string{"name"},
),
OrphanIngress: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Name: "orphan_ingress",
Help: `Gauge reporting status of ingress orphanity, 1 indicates orphaned ingress.
'namespace' is the string used to identify namespace of ingress, 'ingress' for ingress name and 'type' for 'no-service' or 'no-endpoint' of orphanity`,
},
orphanityLabels,
),
2018-07-07 17:46:18 +00:00
}
return cm
}
// IncReloadCount increment the reload counter
func (cm *Controller) IncReloadCount() {
2018-07-27 16:41:37 +00:00
cm.reloadOperation.With(cm.constLabels).Inc()
2018-07-07 17:46:18 +00:00
}
// IncReloadErrorCount increment the reload error counter
func (cm *Controller) IncReloadErrorCount() {
2018-07-27 16:41:37 +00:00
cm.reloadOperationErrors.With(cm.constLabels).Inc()
2018-07-07 17:46:18 +00:00
}
2019-03-11 16:20:41 +00:00
// OnStartedLeading indicates the pod was elected as the leader
func (cm *Controller) OnStartedLeading(electionID string) {
cm.leaderElection.WithLabelValues(electionID).Set(1.0)
}
2019-03-11 16:20:41 +00:00
// OnStoppedLeading indicates the pod stopped being the leader
func (cm *Controller) OnStoppedLeading(electionID string) {
cm.leaderElection.WithLabelValues(electionID).Set(0)
}
// IncCheckCount increment the check counter
func (cm *Controller) IncCheckCount(namespace, name string) {
labels := prometheus.Labels{
"namespace": namespace,
"ingress": name,
}
cm.checkIngressOperation.MustCurryWith(cm.constLabels).With(labels).Inc()
}
// IncCheckErrorCount increment the check error counter
func (cm *Controller) IncCheckErrorCount(namespace, name string) {
labels := prometheus.Labels{
"namespace": namespace,
"ingress": name,
}
cm.checkIngressOperationErrors.MustCurryWith(cm.constLabels).With(labels).Inc()
}
2024-09-06 14:59:43 +00:00
// IncOrphanIngress sets the orphaned ingress gauge to one
func (cm *Controller) IncOrphanIngress(namespace, name, orphanityType string) {
labels := prometheus.Labels{
"namespace": namespace,
"ingress": name,
"type": orphanityType,
}
cm.OrphanIngress.MustCurryWith(cm.constLabels).With(labels).Set(1.0)
}
2024-09-06 14:59:43 +00:00
// DecOrphanIngress sets the orphaned ingress gauge to zero (all services has their endpoints)
func (cm *Controller) DecOrphanIngress(namespace, name, orphanityType string) {
labels := prometheus.Labels{
"namespace": namespace,
"ingress": name,
"type": orphanityType,
}
cm.OrphanIngress.MustCurryWith(cm.constLabels).With(labels).Set(0.0)
}
2018-07-07 17:46:18 +00:00
// ConfigSuccess set a boolean flag according to the output of the controller configuration reload
func (cm *Controller) ConfigSuccess(hash uint64, success bool) {
if success {
cm.configSuccessTime.Set(float64(time.Now().Unix()))
cm.configSuccess.Set(1)
cm.configHash.Set(float64(hash))
return
}
cm.configSuccess.Set(0)
cm.configHash.Set(0)
}
// Describe implements prometheus.Collector
func (cm *Controller) Describe(ch chan<- *prometheus.Desc) {
2018-07-07 17:46:18 +00:00
cm.configHash.Describe(ch)
cm.configSuccess.Describe(ch)
cm.configSuccessTime.Describe(ch)
cm.reloadOperation.Describe(ch)
cm.reloadOperationErrors.Describe(ch)
cm.checkIngressOperation.Describe(ch)
cm.checkIngressOperationErrors.Describe(ch)
2018-07-07 17:46:18 +00:00
cm.sslExpireTime.Describe(ch)
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
cm.sslInfo.Describe(ch)
cm.leaderElection.Describe(ch)
2021-09-26 18:02:22 +00:00
cm.buildInfo.Describe(ch)
cm.OrphanIngress.Describe(ch)
2018-07-07 17:46:18 +00:00
}
// Collect implements the prometheus.Collector interface.
func (cm *Controller) Collect(ch chan<- prometheus.Metric) {
2018-07-07 17:46:18 +00:00
cm.configHash.Collect(ch)
cm.configSuccess.Collect(ch)
cm.configSuccessTime.Collect(ch)
cm.reloadOperation.Collect(ch)
cm.reloadOperationErrors.Collect(ch)
cm.checkIngressOperation.Collect(ch)
cm.checkIngressOperationErrors.Collect(ch)
2018-07-07 17:46:18 +00:00
cm.sslExpireTime.Collect(ch)
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
cm.sslInfo.Collect(ch)
cm.leaderElection.Collect(ch)
2021-09-26 18:02:22 +00:00
cm.buildInfo.Collect(ch)
cm.OrphanIngress.Collect(ch)
2018-07-07 17:46:18 +00:00
}
// SetSSLExpireTime sets the expiration time of SSL Certificates
func (cm *Controller) SetSSLExpireTime(servers []*ingress.Server) {
for _, s := range servers {
if !(s.Hostname != "" && s.SSLCert != nil && s.SSLCert.ExpireTime.Unix() > 0) {
continue
2018-07-07 17:46:18 +00:00
}
labels := make(prometheus.Labels, len(cm.labels)+1)
for k, v := range cm.labels {
labels[k] = v
}
labels["host"] = s.Hostname
labels["secret_name"] = s.SSLCert.Name
labels["identifier"] = s.SSLCert.Identifier()
cm.sslExpireTime.With(labels).Set(float64(s.SSLCert.ExpireTime.Unix()))
2018-07-07 17:46:18 +00:00
}
}
2024-09-06 14:59:43 +00:00
// SetSSLInfo creates a metric with all certificate information
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
func (cm *Controller) SetSSLInfo(servers []*ingress.Server) {
for _, s := range servers {
if s.SSLCert == nil || s.SSLCert.Certificate == nil || s.SSLCert.Certificate.SerialNumber == nil {
continue
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
}
labels := make(prometheus.Labels, len(cm.labels)+1)
for k, v := range cm.labels {
labels[k] = v
}
labels["identifier"] = s.SSLCert.Identifier()
labels["host"] = s.Hostname
labels["secret_name"] = s.SSLCert.Name
labels["namespace"] = s.SSLCert.Namespace
labels["issuer_common_name"] = s.SSLCert.Certificate.Issuer.CommonName
labels["issuer_organization"] = ""
if len(s.SSLCert.Certificate.Issuer.Organization) > 0 {
labels["issuer_organization"] = s.SSLCert.Certificate.Issuer.Organization[0]
}
labels["serial_number"] = s.SSLCert.Certificate.SerialNumber.String()
labels["public_key_algorithm"] = s.SSLCert.Certificate.PublicKeyAlgorithm.String()
cm.sslInfo.With(labels).Set(1)
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
}
}
// RemoveMetrics removes metrics for certificates not available anymore by identifier
func (cm *Controller) RemoveMetrics(certificates []string, registry prometheus.Gatherer) {
cm.removeSSLExpireMetrics(true, certificates, registry)
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
cm.removeCertificatesMetrics(true, certificates, registry)
}
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
// RemoveAllSSLMetrics removes metrics for expiration of SSL Certificates
func (cm *Controller) RemoveAllSSLMetrics(registry prometheus.Gatherer) {
cm.removeSSLExpireMetrics(false, []string{}, registry)
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
cm.removeCertificatesMetrics(false, []string{}, registry)
}
func (cm *Controller) removeCertificatesMetrics(onlyDefinedHosts bool, certificates []string, registry prometheus.Gatherer) {
mfs, err := registry.Gather()
if err != nil {
klog.Errorf("Error gathering metrics: %v", err)
return
}
toRemove := sets.NewString(certificates...)
for _, mf := range mfs {
metricName := mf.GetName()
if fmt.Sprintf("%v_ssl_certificate_info", PrometheusNamespace) != metricName {
continue
}
for _, m := range mf.GetMetric() {
labels := make(map[string]string, len(m.GetLabel()))
for _, labelPair := range m.GetLabel() {
labels[*labelPair.Name] = *labelPair.Value
}
// remove labels that are constant
deleteConstants(labels)
identifier, ok := labels["identifier"]
if !ok {
continue
}
if onlyDefinedHosts && !toRemove.Has(identifier) {
continue
}
klog.V(2).Infof("Removing prometheus metric from gauge %v for identifier %v", metricName, identifier)
removed := cm.sslInfo.Delete(labels)
if !removed {
klog.V(2).Infof("metric %v for identifier %v with labels not removed: %v", metricName, identifier, labels)
}
}
}
}
func (cm *Controller) removeSSLExpireMetrics(onlyDefinedCerts bool, certificates []string, registry prometheus.Gatherer) {
2018-07-07 17:46:18 +00:00
mfs, err := registry.Gather()
if err != nil {
2020-09-27 20:32:40 +00:00
klog.ErrorS(err, "Error gathering metrics")
2018-07-07 17:46:18 +00:00
return
}
toRemove := sets.NewString(certificates...)
2018-07-07 17:46:18 +00:00
for _, mf := range mfs {
metricName := mf.GetName()
2018-07-27 16:41:37 +00:00
if fmt.Sprintf("%v_ssl_expire_time_seconds", PrometheusNamespace) != metricName {
2018-07-07 17:46:18 +00:00
continue
}
for _, m := range mf.GetMetric() {
labels := make(map[string]string, len(m.GetLabel()))
for _, labelPair := range m.GetLabel() {
labels[*labelPair.Name] = *labelPair.Value
}
// remove labels that are constant
deleteConstants(labels)
identifier, ok := labels["identifier"]
if !ok {
continue
}
2018-07-07 17:46:18 +00:00
host, ok := labels["host"]
if !ok {
continue
}
if onlyDefinedCerts && !toRemove.Has(identifier) {
2018-07-07 17:46:18 +00:00
continue
}
klog.V(2).InfoS("Removing prometheus metric", "gauge", metricName, "host", host, "identifier", identifier)
2018-07-07 17:46:18 +00:00
removed := cm.sslExpireTime.Delete(labels)
if !removed {
klog.V(2).InfoS("metric removed", "metric", metricName, "host", host, "identifier", identifier, "labels", labels)
2018-07-07 17:46:18 +00:00
}
}
}
}