ingress-nginx-helm/internal/ingress/metric/collectors/controller_test.go

321 lines
9.2 KiB
Go
Raw Normal View History

2018-07-07 17:46:18 +00:00
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collectors
import (
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
"crypto/x509"
"crypto/x509/pkix"
"math/big"
2018-07-07 17:46:18 +00:00
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/ingress-nginx/internal/ingress"
)
func TestControllerCounters(t *testing.T) {
const metadata = `
2018-08-03 13:50:53 +00:00
# HELP nginx_ingress_controller_config_last_reload_successful Whether the last configuration reload attempt was successful
2018-07-07 17:46:18 +00:00
# TYPE nginx_ingress_controller_config_last_reload_successful gauge
# HELP nginx_ingress_controller_success Cumulative number of Ingress controller reload operations
# TYPE nginx_ingress_controller_success counter
`
cases := []struct {
name string
test func(*Controller)
metrics []string
want string
}{
{
name: "should return not increment in metrics if no operations are invoked",
test: func(cm *Controller) {
},
want: metadata + `
nginx_ingress_controller_config_last_reload_successful{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 0
`,
metrics: []string{"nginx_ingress_controller_config_last_reload_successful", "nginx_ingress_controller_success"},
},
{
name: "single increase in reload count should return 1",
test: func(cm *Controller) {
cm.IncReloadCount()
cm.ConfigSuccess(0, true)
},
want: metadata + `
nginx_ingress_controller_config_last_reload_successful{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 1
2018-07-27 16:41:37 +00:00
nginx_ingress_controller_success{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 1
2018-07-07 17:46:18 +00:00
`,
metrics: []string{"nginx_ingress_controller_config_last_reload_successful", "nginx_ingress_controller_success"},
},
{
name: "single increase in error reload count should return 1",
test: func(cm *Controller) {
cm.IncReloadErrorCount()
},
want: `
# HELP nginx_ingress_controller_errors Cumulative number of Ingress controller errors during reload operations
# TYPE nginx_ingress_controller_errors counter
2018-07-27 16:41:37 +00:00
nginx_ingress_controller_errors{controller_class="nginx",controller_namespace="default",controller_pod="pod"} 1
2018-07-07 17:46:18 +00:00
`,
metrics: []string{"nginx_ingress_controller_errors"},
},
{
name: "should set SSL certificates metrics",
test: func(cm *Controller) {
t1, _ := time.Parse(
time.RFC3339,
"2012-11-01T22:08:41+00:00")
servers := []*ingress.Server{
{
Hostname: "demo",
2019-08-13 21:14:55 +00:00
SSLCert: &ingress.SSLCert{
2018-07-07 17:46:18 +00:00
ExpireTime: t1,
},
},
{
Hostname: "invalid",
2019-08-13 21:14:55 +00:00
SSLCert: &ingress.SSLCert{
2018-07-07 17:46:18 +00:00
ExpireTime: time.Unix(0, 0),
},
},
}
cm.SetSSLExpireTime(servers)
},
want: `
# HELP nginx_ingress_controller_ssl_expire_time_seconds Number of seconds since 1970 to the SSL Certificate expire.\n An example to check if this certificate will expire in 10 days is: "nginx_ingress_controller_ssl_expire_time_seconds < (time() + (10 * 24 * 3600))"
# TYPE nginx_ingress_controller_ssl_expire_time_seconds gauge
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
nginx_ingress_controller_ssl_expire_time_seconds{class="nginx",host="demo",namespace="default",secret_name=""} 1.351807721e+09
2018-07-07 17:46:18 +00:00
`,
metrics: []string{"nginx_ingress_controller_ssl_expire_time_seconds"},
},
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
{
name: "should set SSL certificates infos metrics",
test: func(cm *Controller) {
servers := []*ingress.Server{
{
Hostname: "demo",
SSLCert: &ingress.SSLCert{
Name: "secret-name",
Namespace: "ingress-namespace",
Certificate: &x509.Certificate{
PublicKeyAlgorithm: x509.ECDSA,
Issuer: pkix.Name{
CommonName: "certificate issuer",
SerialNumber: "abcd1234",
Organization: []string{"issuer org"},
},
SerialNumber: big.NewInt(100),
},
},
},
{
Hostname: "invalid",
SSLCert: &ingress.SSLCert{
ExpireTime: time.Unix(0, 0),
},
},
}
cm.SetSSLInfo(servers)
},
want: `
# HELP nginx_ingress_controller_ssl_certificate_info Hold all labels associated to a certificate
# TYPE nginx_ingress_controller_ssl_certificate_info gauge
nginx_ingress_controller_ssl_certificate_info{class="nginx",host="demo",identifier="abcd1234-100",issuer_common_name="certificate issuer",issuer_organization="issuer org",namespace="ingress-namespace",public_key_algorithm="ECDSA",secret_name="secret-name",serial_number="100"} 1
`,
metrics: []string{"nginx_ingress_controller_ssl_certificate_info"},
},
{
name: "should ignore certificates without serial number",
test: func(cm *Controller) {
servers := []*ingress.Server{
{
Hostname: "demo",
SSLCert: &ingress.SSLCert{
Name: "secret-name",
Namespace: "ingress-namespace",
Certificate: &x509.Certificate{
PublicKeyAlgorithm: x509.ECDSA,
Issuer: pkix.Name{
CommonName: "certificate issuer",
SerialNumber: "abcd1234",
},
},
},
},
}
cm.SetSSLInfo(servers)
},
want: ``,
metrics: []string{"nginx_ingress_controller_ssl_certificate_info"},
},
{
name: "should ignore certificates with nil x509 pointer",
test: func(cm *Controller) {
servers := []*ingress.Server{
{
Hostname: "demo",
SSLCert: &ingress.SSLCert{
Name: "secret-name",
Namespace: "ingress-namespace",
Certificate: &x509.Certificate{
PublicKeyAlgorithm: x509.ECDSA,
Issuer: pkix.Name{
CommonName: "certificate issuer",
SerialNumber: "abcd1234",
},
},
},
},
}
cm.SetSSLInfo(servers)
},
want: ``,
metrics: []string{"nginx_ingress_controller_ssl_certificate_info"},
},
{
name: "should ignore servers without certificates",
test: func(cm *Controller) {
servers := []*ingress.Server{
{
Hostname: "demo",
},
}
cm.SetSSLInfo(servers)
},
want: ``,
metrics: []string{"nginx_ingress_controller_ssl_certificate_info"},
},
2018-07-07 17:46:18 +00:00
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cm := NewController("pod", "default", "nginx")
reg := prometheus.NewPedanticRegistry()
if err := reg.Register(cm); err != nil {
t.Errorf("registering collector failed: %s", err)
}
c.test(cm)
if err := GatherAndCompare(cm, c.want, c.metrics, reg); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
reg.Unregister(cm)
})
}
}
2018-07-27 16:41:37 +00:00
func TestRemoveMetrics(t *testing.T) {
cm := NewController("pod", "default", "nginx")
reg := prometheus.NewPedanticRegistry()
if err := reg.Register(cm); err != nil {
t.Errorf("registering collector failed: %s", err)
}
t1, _ := time.Parse(
time.RFC3339,
"2012-11-01T22:08:41+00:00")
servers := []*ingress.Server{
{
Hostname: "demo",
2019-08-13 21:14:55 +00:00
SSLCert: &ingress.SSLCert{
2018-07-27 16:41:37 +00:00
ExpireTime: t1,
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
Certificate: &x509.Certificate{
Issuer: pkix.Name{
CommonName: "certificate issuer",
SerialNumber: "abcd1234",
},
SerialNumber: big.NewInt(100),
},
2018-07-27 16:41:37 +00:00
},
},
{
Hostname: "invalid",
2019-08-13 21:14:55 +00:00
SSLCert: &ingress.SSLCert{
2018-07-27 16:41:37 +00:00
ExpireTime: time.Unix(0, 0),
},
},
}
cm.SetSSLExpireTime(servers)
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
cm.SetSSLInfo(servers)
2018-07-27 16:41:37 +00:00
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
cm.RemoveMetrics([]string{"demo"}, []string{"abcd1234-100"}, reg)
2018-07-27 16:41:37 +00:00
if err := GatherAndCompare(cm, "", []string{"nginx_ingress_controller_ssl_expire_time_seconds"}, reg); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
Add a certificate info metric (#8253) When the ingress controller loads certificates (new ones or following a secret update), it performs a series of check to ensure its validity. In our systems, we detected a case where, when the secret object is compromised, for example when the certificate does not match the secret key, different pods of the ingress controller are serving a different version of the certificate. This behaviour is due to the cache mechanism of the ingress controller, keeping the last known certificate in case of corruption. When this happens, old ingress-controller pods will keep serving the old one, while new pods, by failing to load the corrupted certificates, would use the default certificate, causing invalid certificates for its clients. This generates a random error on the client side, depending on the actual pod instance it reaches. In order to allow detecting occurences of those situations, add a metric to expose, for all ingress controlller pods, detailed informations of the currently loaded certificate. This will, for example, allow setting an alert when there is a certificate discrepency across all ingress controller pods using a query similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)` This also allows to catch other exceptions loading certificates (failing to load the certificate from the k8s API, ... Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com> Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
if err := GatherAndCompare(cm, "", []string{"nginx_ingress_controller_ssl_certificate_info"}, reg); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
reg.Unregister(cm)
}
func TestRemoveAllSSLMetrics(t *testing.T) {
cm := NewController("pod", "default", "nginx")
reg := prometheus.NewPedanticRegistry()
if err := reg.Register(cm); err != nil {
t.Errorf("registering collector failed: %s", err)
}
t1, _ := time.Parse(
time.RFC3339,
"2012-11-01T22:08:41+00:00")
servers := []*ingress.Server{
{
Hostname: "demo",
SSLCert: &ingress.SSLCert{
ExpireTime: t1,
Certificate: &x509.Certificate{
Issuer: pkix.Name{
CommonName: "certificate issuer",
SerialNumber: "abcd1234",
},
SerialNumber: big.NewInt(100),
},
},
},
{
Hostname: "invalid",
SSLCert: &ingress.SSLCert{
ExpireTime: time.Unix(0, 0),
},
},
}
cm.SetSSLExpireTime(servers)
cm.SetSSLInfo(servers)
cm.RemoveAllSSLMetrics(reg)
if err := GatherAndCompare(cm, "", []string{"nginx_ingress_controller_ssl_expire_time_seconds"}, reg); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
if err := GatherAndCompare(cm, "", []string{"nginx_ingress_controller_ssl_certificate_info"}, reg); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
2018-07-27 16:41:37 +00:00
reg.Unregister(cm)
}