2016-11-10 22:56:29 +00:00
/ *
Copyright 2015 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package controller
import (
"fmt"
"sort"
"strconv"
2018-11-16 16:48:47 +00:00
"strings"
2016-11-10 22:56:29 +00:00
"time"
2023-02-22 15:27:57 +00:00
"github.com/mitchellh/hashstructure/v2"
2017-09-17 18:42:31 +00:00
apiv1 "k8s.io/api/core/v1"
2021-08-21 20:42:00 +00:00
networking "k8s.io/api/networking/v1"
2020-04-27 11:03:07 +00:00
apiequality "k8s.io/apimachinery/pkg/api/equality"
2021-11-12 19:46:28 +00:00
"k8s.io/apimachinery/pkg/labels"
2017-04-01 14:39:42 +00:00
"k8s.io/apimachinery/pkg/util/intstr"
2017-08-15 12:32:26 +00:00
"k8s.io/apimachinery/pkg/util/sets"
2018-10-12 13:16:33 +00:00
"k8s.io/apimachinery/pkg/util/wait"
2017-04-01 14:39:42 +00:00
clientset "k8s.io/client-go/kubernetes"
2019-03-12 18:18:09 +00:00
"k8s.io/ingress-nginx/internal/ingress/annotations"
2023-03-21 13:47:09 +00:00
"k8s.io/ingress-nginx/internal/ingress/annotations/canary"
2019-02-21 19:45:21 +00:00
"k8s.io/ingress-nginx/internal/ingress/annotations/log"
2020-09-09 20:01:49 +00:00
"k8s.io/ingress-nginx/internal/ingress/annotations/parser"
2017-11-07 22:02:12 +00:00
"k8s.io/ingress-nginx/internal/ingress/annotations/proxy"
ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config"
2021-08-21 20:42:00 +00:00
"k8s.io/ingress-nginx/internal/ingress/controller/ingressclass"
2020-09-25 21:45:13 +00:00
"k8s.io/ingress-nginx/internal/ingress/controller/store"
"k8s.io/ingress-nginx/internal/ingress/errors"
2022-04-11 14:06:07 +00:00
"k8s.io/ingress-nginx/internal/ingress/inspector"
2022-01-15 01:27:41 +00:00
"k8s.io/ingress-nginx/internal/ingress/metric/collectors"
2017-11-07 22:02:12 +00:00
"k8s.io/ingress-nginx/internal/k8s"
2019-09-01 18:21:24 +00:00
"k8s.io/ingress-nginx/internal/nginx"
2022-07-22 00:32:48 +00:00
"k8s.io/ingress-nginx/pkg/apis/ingress"
2022-08-21 21:21:51 +00:00
utilingress "k8s.io/ingress-nginx/pkg/util/ingress"
2020-08-08 23:31:02 +00:00
"k8s.io/klog/v2"
2016-11-10 22:56:29 +00:00
)
const (
2023-01-16 12:22:51 +00:00
defUpstreamName = "upstream-default-backend"
defServerName = "_"
rootLocation = "/"
emptyZone = ""
orphanMetricLabelNoService = "no-service"
orphanMetricLabelNoEndpoint = "no-endpoint"
2016-11-10 22:56:29 +00:00
)
// Configuration contains all the settings required by an Ingress controller
type Configuration struct {
2019-12-05 22:12:54 +00:00
APIServerHost string
RootCAFile string
2017-11-05 01:18:28 +00:00
KubeConfigFile string
2019-12-05 22:12:54 +00:00
Client clientset . Interface
2016-11-10 22:56:29 +00:00
2017-11-05 01:18:28 +00:00
ResyncPeriod time . Duration
ConfigMapName string
2016-11-10 22:56:29 +00:00
DefaultService string
2017-11-22 13:40:54 +00:00
Namespace string
2017-04-13 01:50:54 +00:00
2021-11-12 19:46:28 +00:00
WatchNamespaceSelector labels . Selector
2018-11-16 16:48:47 +00:00
// +optional
TCPConfigMapName string
// +optional
UDPConfigMapName string
2017-11-05 01:18:28 +00:00
DefaultSSLCertificate string
2018-06-13 18:15:45 +00:00
// +optional
2018-02-27 03:02:19 +00:00
PublishService string
PublishStatusAddress string
2017-01-20 22:01:37 +00:00
2017-06-20 01:22:08 +00:00
UpdateStatus bool
2017-10-08 17:29:19 +00:00
UseNodeInternalIP bool
2017-06-20 01:22:08 +00:00
ElectionID string
UpdateStatusOnShutdown bool
2017-10-08 17:29:19 +00:00
2021-08-26 12:13:23 +00:00
HealthCheckHost string
ListenPorts * ngx_config . ListenPorts
2017-08-25 15:50:08 +00:00
2021-07-06 04:50:18 +00:00
DisableServiceExternalName bool
2017-11-05 01:18:28 +00:00
EnableSSLPassthrough bool
2016-11-10 22:56:29 +00:00
2017-11-05 01:18:28 +00:00
EnableProfiling bool
2016-11-10 22:56:29 +00:00
2022-05-21 18:18:00 +00:00
EnableMetrics bool
MetricsPerHost bool
MetricsBuckets * collectors . HistogramBuckets
ReportStatusClasses bool
2018-12-04 19:59:54 +00:00
2019-04-12 04:38:03 +00:00
FakeCertificate * ingress . SSLCert
2017-12-05 19:07:34 +00:00
SyncRateLimit float32
2018-03-18 13:13:41 +00:00
2018-12-11 21:57:46 +00:00
DisableCatchAll bool
2019-02-21 19:45:21 +00:00
2021-08-21 20:42:00 +00:00
IngressClassConfiguration * ingressclass . IngressClassConfiguration
2019-02-21 19:45:21 +00:00
ValidationWebhook string
ValidationWebhookCertPath string
ValidationWebhookKeyPath string
This PR: (#7514)
<!--- Provide a general summary of your changes in the Title above --->
<!--- Why is this change required? What problem does it solve? -->
Introduces the CLI command flag `--disable-full-test`
By default, it doesn't alter the current behavior of the tests performed by the admission controller.
With or Without the flag, a full checkOverlap is actioned, without any alteration
and the object `pcfg` is created with the whole set of ingreses.
If the flag is set to true, it does manipulate the size of `pcfg` up to the content of $this single ingress.
This is achieved by overriding pcfg content by just the last slice that got recently appended to the object `ings`
```
if n.cfg.DisableFullValidationTest {
_, _, pcfg = n.getConfiguration(ings[len(ings)-1:])
}
```
The following steps of generateTemplate and testTemplate are significally reduced to a signle scenario
```
content, err := n.generateTemplate(cfg, *pcfg)
...
err = n.testTemplate(content)
```
This flag doesn't avoid the proper testing of collisions, neither bad syntaxis within the rendered
configuration of the ingress.
But it does eliminate a scenario, which I wasn't able to produce, where by for some reason even proper rendering
and valid values, without collisions of host/path may end into an invalid nginx.conf
The reasoning for this Feature is:
- Test duration increases by the number of ingresses in the cluster.
- File size grows to very important numbers 150-200Mb on clusters with just 2000~ ingresses.
- Tests in that scenario, takes approximately 20s using the last 0.48.1 improvements
- Produces a considerable memory consumption, as well as CPU, compute, that affects directly the containers
that serve traffic.
Since the flag is trully optional, and by default is disabled I fell as a good thing to have that can definitively
help on large-scale scenarios that still want to have a reasonable set of tests in place at a lower cost.
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [X ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
<!--- Please describe in detail how you tested your changes. -->
<!--- Include details of your testing environment, and the tests you ran to -->
<!--- see how your change affects other areas of the code, etc. -->
Tested with the build kit the following scenarios on a cluster with 1000~ ingresses:
- With Flag Disabled or Flag, not present (current status as per 0.48.1)
collision scenario (wrong snippet content):
`kubectl apply -f ../collision-syntax.yaml 0.18s user 0.05s system 3% cpu 6.639 total`
collisions scenario (duplicated host):
`kubectl apply -f ../collision-host.yaml 0.17s user 0.05s system 3% cpu 6.245 total`
create/update:
`kubectl apply -f ing-215.yaml 0.16s user 0.05s system 3% cpu 5.845 total`
- With Flag Enabled (true):
collision scenario (wrong snippet content):
`kubectl apply -f ../collision.yaml 0.18s user 0.02s system 57% cpu 0.347 total`
collision scenario (duplicated host):
`kubectl apply -f ../collision.yaml 0.21s user 0.06s system 85% cpu 0.318 total`
create/update:
`kubectl apply -f ing-973.yaml 0.17s user 0.03s system 72% cpu 0.271 total`
As part of the test, I did verified that the created nginx for the test was of a smaller size, and that it didnt affect negatively the final nginx.conf (of a much larger side) where this was merged by the next steps in place after the validation. I couldn't observe any other change in the behaviour and so far the routine looks simple and non harmful.
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
- [x] My change requires a change to the documentation.
- [x] I have updated the documentation accordingly.
- [x] I've read the [CONTRIBUTION](https://github.com/kubernetes/ingress-nginx/blob/main/CONTRIBUTING.md) guide
- [ ] I have added tests to cover my changes.
- [ ] All new and existing tests passed.
For the test part, I would need to understand the placement and test case that this would require, I wasn't able to see an existing scenario for this
2021-09-07 17:53:16 +00:00
DisableFullValidationTest bool
2018-11-27 16:12:17 +00:00
2020-03-16 07:26:33 +00:00
GlobalExternalAuth * ngx_config . GlobalExternalAuth
2021-07-09 00:16:53 +00:00
MaxmindEditionFiles * [ ] string
2020-06-20 06:58:14 +00:00
MonitorMaxBatchSize int
2020-07-06 19:05:16 +00:00
2022-01-17 23:24:49 +00:00
PostShutdownGracePeriod int
ShutdownGracePeriod int
2022-04-09 04:48:04 +00:00
InternalLoggerAddress string
IsChroot bool
2022-04-11 14:06:07 +00:00
DeepInspector bool
2022-04-11 18:42:06 +00:00
DynamicConfigurationRetries int
2023-01-09 14:37:31 +00:00
DisableSyncEvents bool
2023-01-16 02:46:50 +00:00
EnableTopologyAwareRouting bool
}
func getIngressPodZone ( svc * apiv1 . Service ) string {
svcKey := k8s . MetaNamespaceKey ( svc )
if svcZoneAnnotation , ok := svc . ObjectMeta . GetAnnotations ( ) [ apiv1 . AnnotationTopologyAwareHints ] ; ok {
if strings . ToLower ( svcZoneAnnotation ) == "auto" {
if foundZone , ok := k8s . IngressNodeDetails . GetLabels ( ) [ apiv1 . LabelTopologyZone ] ; ok {
klog . V ( 3 ) . Infof ( "Svc has topology aware annotation enabled, try to use zone %q where controller pod is running for Service %q " , foundZone , svcKey )
return foundZone
}
}
}
return emptyZone
2016-11-10 22:56:29 +00:00
}
2018-06-13 18:15:45 +00:00
// GetPublishService returns the Service used to set the load-balancer status of Ingresses.
2017-11-05 01:18:28 +00:00
func ( n NGINXController ) GetPublishService ( ) * apiv1 . Service {
2018-01-18 19:14:42 +00:00
s , err := n . store . GetService ( n . cfg . PublishService )
2017-09-17 16:34:29 +00:00
if err != nil {
return nil
}
return s
}
2018-06-13 18:15:45 +00:00
// syncIngress collects all the pieces required to assemble the NGINX
// configuration file and passes the resulting data structures to the backend
// (OnUpdate) when a reload is deemed necessary.
2018-04-12 22:26:10 +00:00
func ( n * NGINXController ) syncIngress ( interface { } ) error {
2017-11-05 01:18:28 +00:00
n . syncRateLimiter . Accept ( )
2016-11-10 22:56:29 +00:00
2017-11-05 01:18:28 +00:00
if n . syncQueue . IsShuttingDown ( ) {
2016-11-10 22:56:29 +00:00
return nil
}
2020-09-25 21:45:13 +00:00
ings := n . store . ListIngresses ( )
2019-02-21 19:45:21 +00:00
hosts , servers , pcfg := n . getConfiguration ( ings )
2017-06-14 21:33:12 +00:00
2019-06-28 22:29:58 +00:00
n . metricCollector . SetSSLExpireTime ( servers )
Add a certificate info metric (#8253)
When the ingress controller loads certificates (new ones or following a
secret update), it performs a series of check to ensure its validity.
In our systems, we detected a case where, when the secret object is
compromised, for example when the certificate does not match the secret
key, different pods of the ingress controller are serving a different
version of the certificate.
This behaviour is due to the cache mechanism of the ingress controller,
keeping the last known certificate in case of corruption. When this
happens, old ingress-controller pods will keep serving the old one,
while new pods, by failing to load the corrupted certificates, would
use the default certificate, causing invalid certificates for its
clients.
This generates a random error on the client side, depending on the
actual pod instance it reaches.
In order to allow detecting occurences of those situations, add a metric
to expose, for all ingress controlller pods, detailed informations of
the currently loaded certificate.
This will, for example, allow setting an alert when there is a
certificate discrepency across all ingress controller pods using a query
similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)`
This also allows to catch other exceptions loading certificates (failing
to load the certificate from the k8s API, ...
Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
n . metricCollector . SetSSLInfo ( servers )
2019-06-05 15:04:27 +00:00
2018-07-07 17:46:18 +00:00
if n . runningConfig . Equal ( pcfg ) {
2020-09-27 20:32:40 +00:00
klog . V ( 3 ) . Infof ( "No configuration change detected, skipping backend reload" )
2017-06-14 21:33:12 +00:00
return nil
}
2019-06-05 15:04:27 +00:00
n . metricCollector . SetHosts ( hosts )
2022-08-21 21:21:51 +00:00
if ! utilingress . IsDynamicConfigurationEnough ( pcfg , n . runningConfig ) {
2020-09-27 20:32:40 +00:00
klog . InfoS ( "Configuration changes detected, backend reload required" )
2016-11-10 22:56:29 +00:00
2023-02-22 15:27:57 +00:00
hash , _ := hashstructure . Hash ( pcfg , hashstructure . FormatV1 , & hashstructure . HashOptions {
2018-07-07 17:46:18 +00:00
TagName : "json" ,
} )
pcfg . ConfigurationChecksum = fmt . Sprintf ( "%v" , hash )
err := n . OnUpdate ( * pcfg )
2018-04-01 20:09:27 +00:00
if err != nil {
2018-07-07 17:46:18 +00:00
n . metricCollector . IncReloadErrorCount ( )
n . metricCollector . ConfigSuccess ( hash , false )
2018-12-05 16:27:55 +00:00
klog . Errorf ( "Unexpected failure reloading the backend:\n%v" , err )
2020-11-14 02:40:28 +00:00
n . recorder . Eventf ( k8s . IngressPodDetails , apiv1 . EventTypeWarning , "RELOAD" , fmt . Sprintf ( "Error reloading NGINX: %v" , err ) )
2018-04-01 20:09:27 +00:00
return err
}
2020-09-27 20:32:40 +00:00
klog . InfoS ( "Backend successfully reloaded" )
2018-07-07 17:46:18 +00:00
n . metricCollector . ConfigSuccess ( hash , true )
n . metricCollector . IncReloadCount ( )
2020-09-26 23:27:19 +00:00
2020-11-14 02:40:28 +00:00
n . recorder . Eventf ( k8s . IngressPodDetails , apiv1 . EventTypeNormal , "RELOAD" , "NGINX reload triggered due to a change in configuration" )
2016-11-10 22:56:29 +00:00
}
2017-06-11 19:56:30 +00:00
2018-11-20 18:19:20 +00:00
isFirstSync := n . runningConfig . Equal ( & ingress . Configuration { } )
if isFirstSync {
2019-01-21 14:29:36 +00:00
// For the initial sync it always takes some time for NGINX to start listening
// For large configurations it might take a while so we loop and back off
2020-09-27 20:32:40 +00:00
klog . InfoS ( "Initial sync, sleeping for 1 second" )
2018-11-20 18:19:20 +00:00
time . Sleep ( 1 * time . Second )
}
2018-10-12 17:08:31 +00:00
retry := wait . Backoff {
2022-04-11 18:42:06 +00:00
Steps : 1 + n . cfg . DynamicConfigurationRetries ,
Duration : time . Second ,
Factor : 1.3 ,
2018-10-12 17:08:31 +00:00
Jitter : 0.1 ,
}
2018-10-12 03:47:50 +00:00
2022-04-11 18:42:06 +00:00
retriesRemaining := retry . Steps
2018-10-12 17:08:31 +00:00
err := wait . ExponentialBackoff ( retry , func ( ) ( bool , error ) {
2019-08-15 18:57:51 +00:00
err := n . configureDynamically ( pcfg )
2018-10-12 17:08:31 +00:00
if err == nil {
2018-12-05 16:27:55 +00:00
klog . V ( 2 ) . Infof ( "Dynamic reconfiguration succeeded." )
2018-10-12 17:08:31 +00:00
return true , nil
2018-10-12 13:16:33 +00:00
}
2022-04-11 18:42:06 +00:00
retriesRemaining --
if retriesRemaining > 0 {
klog . Warningf ( "Dynamic reconfiguration failed (retrying; %d retries left): %v" , retriesRemaining , err )
return false , nil
}
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Dynamic reconfiguration failed: %v" , err )
2018-10-12 17:08:31 +00:00
return false , err
} )
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Errorf ( "Unexpected failure reconfiguring NGINX:\n%v" , err )
2018-10-12 17:08:31 +00:00
return err
}
2018-03-18 13:13:41 +00:00
2022-08-21 21:21:51 +00:00
ri := utilingress . GetRemovedIngresses ( n . runningConfig , pcfg )
re := utilingress . GetRemovedHosts ( n . runningConfig , pcfg )
rc := utilingress . GetRemovedCertificateSerialNumbers ( n . runningConfig , pcfg )
Add a certificate info metric (#8253)
When the ingress controller loads certificates (new ones or following a
secret update), it performs a series of check to ensure its validity.
In our systems, we detected a case where, when the secret object is
compromised, for example when the certificate does not match the secret
key, different pods of the ingress controller are serving a different
version of the certificate.
This behaviour is due to the cache mechanism of the ingress controller,
keeping the last known certificate in case of corruption. When this
happens, old ingress-controller pods will keep serving the old one,
while new pods, by failing to load the corrupted certificates, would
use the default certificate, causing invalid certificates for its
clients.
This generates a random error on the client side, depending on the
actual pod instance it reaches.
In order to allow detecting occurences of those situations, add a metric
to expose, for all ingress controlller pods, detailed informations of
the currently loaded certificate.
This will, for example, allow setting an alert when there is a
certificate discrepency across all ingress controller pods using a query
similar to `sum(nginx_ingress_controller_ssl_certificate_info{host="name.tld"})by(serial_number)`
This also allows to catch other exceptions loading certificates (failing
to load the certificate from the k8s API, ...
Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
Co-authored-by: Daniel Ricart <danielricart@users.noreply.github.com>
2022-02-24 15:08:32 +00:00
n . metricCollector . RemoveMetrics ( ri , re , rc )
2018-07-07 17:46:18 +00:00
n . runningConfig = pcfg
2017-06-14 21:33:12 +00:00
2016-11-10 22:56:29 +00:00
return nil
}
2019-02-21 19:45:21 +00:00
// CheckIngress returns an error in case the provided ingress, when added
// to the current configuration, generates an invalid configuration
2019-06-09 22:49:59 +00:00
func ( n * NGINXController ) CheckIngress ( ing * networking . Ingress ) error {
2021-11-02 17:54:34 +00:00
startCheck := time . Now ( ) . UnixNano ( ) / 1000000
2019-02-21 19:45:21 +00:00
if ing == nil {
// no ingress to add, no state change
return nil
}
2019-05-18 10:08:05 +00:00
2021-06-08 16:27:34 +00:00
// Skip checks if the ingress is marked as deleted
if ! ing . DeletionTimestamp . IsZero ( ) {
return nil
}
2022-04-11 14:06:07 +00:00
if n . cfg . DeepInspector {
if err := inspector . DeepInspect ( ing ) ; err != nil {
return fmt . Errorf ( "invalid object: %w" , err )
}
}
2022-02-06 20:28:51 +00:00
// Do not attempt to validate an ingress that's not meant to be controlled by the current instance of the controller.
if ingressClass , err := n . store . GetIngressClass ( ing , n . cfg . IngressClassConfiguration ) ; ingressClass == "" {
klog . Warningf ( "ignoring ingress %v in %v based on annotation %v: %v" , ing . Name , ing . ObjectMeta . Namespace , ingressClass , err )
return nil
}
2019-02-21 19:45:21 +00:00
if n . cfg . Namespace != "" && ing . ObjectMeta . Namespace != n . cfg . Namespace {
2020-04-28 15:14:27 +00:00
klog . Warningf ( "ignoring ingress %v in namespace %v different from the namespace watched %s" , ing . Name , ing . ObjectMeta . Namespace , n . cfg . Namespace )
2019-02-21 19:45:21 +00:00
return nil
}
2021-08-21 20:42:00 +00:00
if n . cfg . DisableCatchAll && ing . Spec . DefaultBackend != nil {
2022-08-22 23:20:16 +00:00
return fmt . Errorf ( "This deployment is trying to create a catch-all ingress while DisableCatchAll flag is set to true. Remove '.spec.defaultBackend' or set DisableCatchAll flag to false." )
2021-04-24 15:49:45 +00:00
}
2021-11-02 17:54:34 +00:00
startRender := time . Now ( ) . UnixNano ( ) / 1000000
2021-09-19 19:40:08 +00:00
cfg := n . store . GetBackendConfiguration ( )
cfg . Resolver = n . resolver
2021-11-23 17:06:17 +00:00
var arrayBadWords [ ] string
if cfg . AnnotationValueWordBlocklist != "" {
arrayBadWords = strings . Split ( strings . TrimSpace ( cfg . AnnotationValueWordBlocklist ) , "," )
}
2021-11-12 19:40:30 +00:00
for key , value := range ing . ObjectMeta . GetAnnotations ( ) {
2021-09-19 19:40:08 +00:00
if parser . AnnotationsPrefix != parser . DefaultAnnotationsPrefix {
2020-09-09 20:01:49 +00:00
if strings . HasPrefix ( key , fmt . Sprintf ( "%s/" , parser . DefaultAnnotationsPrefix ) ) {
return fmt . Errorf ( "This deployment has a custom annotation prefix defined. Use '%s' instead of '%s'" , parser . AnnotationsPrefix , parser . DefaultAnnotationsPrefix )
}
}
2021-11-23 17:06:17 +00:00
if strings . HasPrefix ( key , fmt . Sprintf ( "%s/" , parser . AnnotationsPrefix ) ) && len ( arrayBadWords ) != 0 {
for _ , forbiddenvalue := range arrayBadWords {
2021-11-15 23:37:29 +00:00
if strings . Contains ( value , strings . TrimSpace ( forbiddenvalue ) ) {
2021-11-12 19:40:30 +00:00
return fmt . Errorf ( "%s annotation contains invalid word %s" , key , forbiddenvalue )
}
}
}
2020-09-09 20:01:49 +00:00
2021-09-20 23:52:23 +00:00
if ! cfg . AllowSnippetAnnotations && strings . HasSuffix ( key , "-snippet" ) {
2021-09-19 19:40:08 +00:00
return fmt . Errorf ( "%s annotation cannot be used. Snippet directives are disabled by the Ingress administrator" , key )
}
2020-12-24 16:39:12 +00:00
2021-09-19 19:40:08 +00:00
if len ( cfg . GlobalRateLimitMemcachedHost ) == 0 && strings . HasPrefix ( key , fmt . Sprintf ( "%s/%s" , parser . AnnotationsPrefix , "global-rate-limit" ) ) {
return fmt . Errorf ( "'global-rate-limit*' annotations require 'global-rate-limit-memcached-host' settings configured in the global configmap" )
2020-12-24 16:39:12 +00:00
}
2021-09-19 19:40:08 +00:00
2020-12-24 16:39:12 +00:00
}
2021-09-19 19:40:08 +00:00
k8s . SetDefaultNGINXPathType ( ing )
2020-09-25 21:45:13 +00:00
allIngresses := n . store . ListIngresses ( )
2019-05-18 10:08:05 +00:00
filter := func ( toCheck * ingress . Ingress ) bool {
return toCheck . ObjectMeta . Namespace == ing . ObjectMeta . Namespace &&
toCheck . ObjectMeta . Name == ing . ObjectMeta . Name
2019-02-21 19:45:21 +00:00
}
2020-09-25 21:45:13 +00:00
ings := store . FilterIngresses ( allIngresses , filter )
2019-05-18 10:08:05 +00:00
ings = append ( ings , & ingress . Ingress {
Ingress : * ing ,
ParsedAnnotations : annotations . NewAnnotationExtractor ( n . store ) . Extract ( ing ) ,
} )
2021-11-02 17:54:34 +00:00
startTest := time . Now ( ) . UnixNano ( ) / 1000000
2020-09-25 21:45:13 +00:00
_ , servers , pcfg := n . getConfiguration ( ings )
err := checkOverlap ( ing , allIngresses , servers )
if err != nil {
n . metricCollector . IncCheckErrorCount ( ing . ObjectMeta . Namespace , ing . Name )
return err
}
2021-11-02 17:54:34 +00:00
testedSize := len ( ings )
This PR: (#7514)
<!--- Provide a general summary of your changes in the Title above --->
<!--- Why is this change required? What problem does it solve? -->
Introduces the CLI command flag `--disable-full-test`
By default, it doesn't alter the current behavior of the tests performed by the admission controller.
With or Without the flag, a full checkOverlap is actioned, without any alteration
and the object `pcfg` is created with the whole set of ingreses.
If the flag is set to true, it does manipulate the size of `pcfg` up to the content of $this single ingress.
This is achieved by overriding pcfg content by just the last slice that got recently appended to the object `ings`
```
if n.cfg.DisableFullValidationTest {
_, _, pcfg = n.getConfiguration(ings[len(ings)-1:])
}
```
The following steps of generateTemplate and testTemplate are significally reduced to a signle scenario
```
content, err := n.generateTemplate(cfg, *pcfg)
...
err = n.testTemplate(content)
```
This flag doesn't avoid the proper testing of collisions, neither bad syntaxis within the rendered
configuration of the ingress.
But it does eliminate a scenario, which I wasn't able to produce, where by for some reason even proper rendering
and valid values, without collisions of host/path may end into an invalid nginx.conf
The reasoning for this Feature is:
- Test duration increases by the number of ingresses in the cluster.
- File size grows to very important numbers 150-200Mb on clusters with just 2000~ ingresses.
- Tests in that scenario, takes approximately 20s using the last 0.48.1 improvements
- Produces a considerable memory consumption, as well as CPU, compute, that affects directly the containers
that serve traffic.
Since the flag is trully optional, and by default is disabled I fell as a good thing to have that can definitively
help on large-scale scenarios that still want to have a reasonable set of tests in place at a lower cost.
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [X ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
<!--- Please describe in detail how you tested your changes. -->
<!--- Include details of your testing environment, and the tests you ran to -->
<!--- see how your change affects other areas of the code, etc. -->
Tested with the build kit the following scenarios on a cluster with 1000~ ingresses:
- With Flag Disabled or Flag, not present (current status as per 0.48.1)
collision scenario (wrong snippet content):
`kubectl apply -f ../collision-syntax.yaml 0.18s user 0.05s system 3% cpu 6.639 total`
collisions scenario (duplicated host):
`kubectl apply -f ../collision-host.yaml 0.17s user 0.05s system 3% cpu 6.245 total`
create/update:
`kubectl apply -f ing-215.yaml 0.16s user 0.05s system 3% cpu 5.845 total`
- With Flag Enabled (true):
collision scenario (wrong snippet content):
`kubectl apply -f ../collision.yaml 0.18s user 0.02s system 57% cpu 0.347 total`
collision scenario (duplicated host):
`kubectl apply -f ../collision.yaml 0.21s user 0.06s system 85% cpu 0.318 total`
create/update:
`kubectl apply -f ing-973.yaml 0.17s user 0.03s system 72% cpu 0.271 total`
As part of the test, I did verified that the created nginx for the test was of a smaller size, and that it didnt affect negatively the final nginx.conf (of a much larger side) where this was merged by the next steps in place after the validation. I couldn't observe any other change in the behaviour and so far the routine looks simple and non harmful.
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
- [x] My change requires a change to the documentation.
- [x] I have updated the documentation accordingly.
- [x] I've read the [CONTRIBUTION](https://github.com/kubernetes/ingress-nginx/blob/main/CONTRIBUTING.md) guide
- [ ] I have added tests to cover my changes.
- [ ] All new and existing tests passed.
For the test part, I would need to understand the placement and test case that this would require, I wasn't able to see an existing scenario for this
2021-09-07 17:53:16 +00:00
if n . cfg . DisableFullValidationTest {
_ , _ , pcfg = n . getConfiguration ( ings [ len ( ings ) - 1 : ] )
2021-11-02 17:54:34 +00:00
testedSize = 1
This PR: (#7514)
<!--- Provide a general summary of your changes in the Title above --->
<!--- Why is this change required? What problem does it solve? -->
Introduces the CLI command flag `--disable-full-test`
By default, it doesn't alter the current behavior of the tests performed by the admission controller.
With or Without the flag, a full checkOverlap is actioned, without any alteration
and the object `pcfg` is created with the whole set of ingreses.
If the flag is set to true, it does manipulate the size of `pcfg` up to the content of $this single ingress.
This is achieved by overriding pcfg content by just the last slice that got recently appended to the object `ings`
```
if n.cfg.DisableFullValidationTest {
_, _, pcfg = n.getConfiguration(ings[len(ings)-1:])
}
```
The following steps of generateTemplate and testTemplate are significally reduced to a signle scenario
```
content, err := n.generateTemplate(cfg, *pcfg)
...
err = n.testTemplate(content)
```
This flag doesn't avoid the proper testing of collisions, neither bad syntaxis within the rendered
configuration of the ingress.
But it does eliminate a scenario, which I wasn't able to produce, where by for some reason even proper rendering
and valid values, without collisions of host/path may end into an invalid nginx.conf
The reasoning for this Feature is:
- Test duration increases by the number of ingresses in the cluster.
- File size grows to very important numbers 150-200Mb on clusters with just 2000~ ingresses.
- Tests in that scenario, takes approximately 20s using the last 0.48.1 improvements
- Produces a considerable memory consumption, as well as CPU, compute, that affects directly the containers
that serve traffic.
Since the flag is trully optional, and by default is disabled I fell as a good thing to have that can definitively
help on large-scale scenarios that still want to have a reasonable set of tests in place at a lower cost.
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [X ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
<!--- Please describe in detail how you tested your changes. -->
<!--- Include details of your testing environment, and the tests you ran to -->
<!--- see how your change affects other areas of the code, etc. -->
Tested with the build kit the following scenarios on a cluster with 1000~ ingresses:
- With Flag Disabled or Flag, not present (current status as per 0.48.1)
collision scenario (wrong snippet content):
`kubectl apply -f ../collision-syntax.yaml 0.18s user 0.05s system 3% cpu 6.639 total`
collisions scenario (duplicated host):
`kubectl apply -f ../collision-host.yaml 0.17s user 0.05s system 3% cpu 6.245 total`
create/update:
`kubectl apply -f ing-215.yaml 0.16s user 0.05s system 3% cpu 5.845 total`
- With Flag Enabled (true):
collision scenario (wrong snippet content):
`kubectl apply -f ../collision.yaml 0.18s user 0.02s system 57% cpu 0.347 total`
collision scenario (duplicated host):
`kubectl apply -f ../collision.yaml 0.21s user 0.06s system 85% cpu 0.318 total`
create/update:
`kubectl apply -f ing-973.yaml 0.17s user 0.03s system 72% cpu 0.271 total`
As part of the test, I did verified that the created nginx for the test was of a smaller size, and that it didnt affect negatively the final nginx.conf (of a much larger side) where this was merged by the next steps in place after the validation. I couldn't observe any other change in the behaviour and so far the routine looks simple and non harmful.
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
- [x] My change requires a change to the documentation.
- [x] I have updated the documentation accordingly.
- [x] I've read the [CONTRIBUTION](https://github.com/kubernetes/ingress-nginx/blob/main/CONTRIBUTING.md) guide
- [ ] I have added tests to cover my changes.
- [ ] All new and existing tests passed.
For the test part, I would need to understand the placement and test case that this would require, I wasn't able to see an existing scenario for this
2021-09-07 17:53:16 +00:00
}
2019-02-21 19:45:21 +00:00
content , err := n . generateTemplate ( cfg , * pcfg )
if err != nil {
n . metricCollector . IncCheckErrorCount ( ing . ObjectMeta . Namespace , ing . Name )
return err
}
2019-05-18 10:08:05 +00:00
2019-02-21 19:45:21 +00:00
err = n . testTemplate ( content )
if err != nil {
n . metricCollector . IncCheckErrorCount ( ing . ObjectMeta . Namespace , ing . Name )
2020-09-25 21:45:13 +00:00
return err
2019-02-21 19:45:21 +00:00
}
2020-09-25 21:45:13 +00:00
n . metricCollector . IncCheckCount ( ing . ObjectMeta . Namespace , ing . Name )
2021-11-02 17:54:34 +00:00
endCheck := time . Now ( ) . UnixNano ( ) / 1000000
n . metricCollector . SetAdmissionMetrics (
float64 ( testedSize ) ,
float64 ( endCheck - startTest ) / 1000 ,
float64 ( len ( ings ) ) ,
float64 ( startTest - startRender ) / 1000 ,
float64 ( len ( content ) ) ,
float64 ( endCheck - startCheck ) / 1000 ,
)
2020-09-25 21:45:13 +00:00
return nil
2019-02-21 19:45:21 +00:00
}
2018-11-16 16:48:47 +00:00
func ( n * NGINXController ) getStreamServices ( configmapName string , proto apiv1 . Protocol ) [ ] ingress . L4Service {
if configmapName == "" {
return [ ] ingress . L4Service { }
}
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Obtaining information about %v stream services from ConfigMap %q" , proto , configmapName )
2018-11-16 16:48:47 +00:00
_ , _ , err := k8s . ParseNameNS ( configmapName )
if err != nil {
2019-10-11 23:48:46 +00:00
klog . Warningf ( "Error parsing ConfigMap reference %q: %v" , configmapName , err )
2018-11-16 16:48:47 +00:00
return [ ] ingress . L4Service { }
}
configmap , err := n . store . GetConfigMap ( configmapName )
if err != nil {
2019-10-11 23:48:46 +00:00
klog . Warningf ( "Error getting ConfigMap %q: %v" , configmapName , err )
2018-11-16 16:48:47 +00:00
return [ ] ingress . L4Service { }
}
2019-09-28 20:30:57 +00:00
2018-11-16 16:48:47 +00:00
var svcs [ ] ingress . L4Service
var svcProxyProtocol ingress . ProxyProtocol
2019-09-28 20:30:57 +00:00
2018-11-16 16:48:47 +00:00
rp := [ ] int {
n . cfg . ListenPorts . HTTP ,
n . cfg . ListenPorts . HTTPS ,
n . cfg . ListenPorts . SSLProxy ,
n . cfg . ListenPorts . Health ,
n . cfg . ListenPorts . Default ,
2019-09-28 20:30:57 +00:00
nginx . ProfilerPort ,
2019-09-01 18:21:24 +00:00
nginx . StatusPort ,
2019-09-28 20:30:57 +00:00
nginx . StreamPort ,
2018-11-16 16:48:47 +00:00
}
2019-09-28 20:30:57 +00:00
2020-11-27 17:26:53 +00:00
reservedPorts := sets . NewInt ( rp ... )
2018-11-16 16:48:47 +00:00
// svcRef format: <(str)namespace>/<(str)service>:<(intstr)port>[:<("PROXY")decode>:<("PROXY")encode>]
for port , svcRef := range configmap . Data {
2020-12-04 12:40:42 +00:00
externalPort , err := strconv . Atoi ( port ) // #nosec
2018-11-16 16:48:47 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "%q is not a valid %v port number" , port , proto )
2018-11-16 16:48:47 +00:00
continue
}
2020-11-27 17:26:53 +00:00
if reservedPorts . Has ( externalPort ) {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Port %d cannot be used for %v stream services. It is reserved for the Ingress controller." , externalPort , proto )
2018-11-16 16:48:47 +00:00
continue
}
nsSvcPort := strings . Split ( svcRef , ":" )
if len ( nsSvcPort ) < 2 {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Invalid Service reference %q for %v port %d" , svcRef , proto , externalPort )
2018-11-16 16:48:47 +00:00
continue
}
nsName := nsSvcPort [ 0 ]
svcPort := nsSvcPort [ 1 ]
svcProxyProtocol . Decode = false
svcProxyProtocol . Encode = false
// Proxy Protocol is only compatible with TCP Services
if len ( nsSvcPort ) >= 3 && proto == apiv1 . ProtocolTCP {
if len ( nsSvcPort ) >= 3 && strings . ToUpper ( nsSvcPort [ 2 ] ) == "PROXY" {
svcProxyProtocol . Decode = true
}
if len ( nsSvcPort ) == 4 && strings . ToUpper ( nsSvcPort [ 3 ] ) == "PROXY" {
svcProxyProtocol . Encode = true
}
}
svcNs , svcName , err := k8s . ParseNameNS ( nsName )
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "%v" , err )
2018-11-16 16:48:47 +00:00
continue
}
svc , err := n . store . GetService ( nsName )
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error getting Service %q: %v" , nsName , err )
2018-11-16 16:48:47 +00:00
continue
}
var endps [ ] ingress . Endpoint
2020-12-04 12:40:42 +00:00
/* #nosec */
targetPort , err := strconv . Atoi ( svcPort ) // #nosec
2023-01-16 02:46:50 +00:00
var zone string
if n . cfg . EnableTopologyAwareRouting {
zone = getIngressPodZone ( svc )
} else {
zone = emptyZone
}
2018-11-16 16:48:47 +00:00
if err != nil {
// not a port number, fall back to using port name
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Searching Endpoints with %v port name %q for Service %q" , proto , svcPort , nsName )
2020-12-04 12:40:42 +00:00
for i := range svc . Spec . Ports {
sp := svc . Spec . Ports [ i ]
2018-11-16 16:48:47 +00:00
if sp . Name == svcPort {
if sp . Protocol == proto {
2023-01-16 02:46:50 +00:00
endps = getEndpointsFromSlices ( svc , & sp , proto , zone , n . store . GetServiceEndpointsSlices )
2018-11-16 16:48:47 +00:00
break
}
}
}
} else {
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Searching Endpoints with %v port number %d for Service %q" , proto , targetPort , nsName )
2020-12-04 12:40:42 +00:00
for i := range svc . Spec . Ports {
sp := svc . Spec . Ports [ i ]
2018-11-16 16:48:47 +00:00
if sp . Port == int32 ( targetPort ) {
if sp . Protocol == proto {
2023-01-16 02:46:50 +00:00
endps = getEndpointsFromSlices ( svc , & sp , proto , zone , n . store . GetServiceEndpointsSlices )
2018-11-16 16:48:47 +00:00
break
}
}
}
}
// stream services cannot contain empty upstreams and there is
// no default backend equivalent
if len ( endps ) == 0 {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Service %q does not have any active Endpoint for %v port %v" , nsName , proto , svcPort )
2018-11-16 16:48:47 +00:00
continue
}
svcs = append ( svcs , ingress . L4Service {
Port : externalPort ,
Backend : ingress . L4Backend {
Name : svcName ,
Namespace : svcNs ,
Port : intstr . FromString ( svcPort ) ,
Protocol : proto ,
ProxyProtocol : svcProxyProtocol ,
} ,
Endpoints : endps ,
2019-01-02 04:35:17 +00:00
Service : svc ,
2018-11-16 16:48:47 +00:00
} )
}
// Keep upstream order sorted to reduce unnecessary nginx config reloads.
sort . SliceStable ( svcs , func ( i , j int ) bool {
return svcs [ i ] . Port < svcs [ j ] . Port
} )
return svcs
}
2018-06-13 18:15:45 +00:00
// getDefaultUpstream returns the upstream associated with the default backend.
// Configures the upstream to return HTTP code 503 in case of error.
2017-11-05 01:18:28 +00:00
func ( n * NGINXController ) getDefaultUpstream ( ) * ingress . Backend {
2016-11-11 23:43:35 +00:00
upstream := & ingress . Backend {
2016-11-10 22:56:29 +00:00
Name : defUpstreamName ,
}
2017-11-05 01:18:28 +00:00
svcKey := n . cfg . DefaultService
2018-09-25 03:33:13 +00:00
if len ( svcKey ) == 0 {
upstream . Endpoints = append ( upstream . Endpoints , n . DefaultEndpoint ( ) )
return upstream
}
2018-01-18 19:14:42 +00:00
svc , err := n . store . GetService ( svcKey )
2016-11-10 22:56:29 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error getting default backend %q: %v" , svcKey , err )
2017-11-05 01:18:28 +00:00
upstream . Endpoints = append ( upstream . Endpoints , n . DefaultEndpoint ( ) )
2016-11-10 22:56:29 +00:00
return upstream
}
2023-01-16 02:46:50 +00:00
var zone string
if n . cfg . EnableTopologyAwareRouting {
zone = getIngressPodZone ( svc )
} else {
zone = emptyZone
}
endps := getEndpointsFromSlices ( svc , & svc . Spec . Ports [ 0 ] , apiv1 . ProtocolTCP , zone , n . store . GetServiceEndpointsSlices )
2016-11-10 22:56:29 +00:00
if len ( endps ) == 0 {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Service %q does not have any active Endpoint" , svcKey )
2017-11-05 01:18:28 +00:00
endps = [ ] ingress . Endpoint { n . DefaultEndpoint ( ) }
2016-11-10 22:56:29 +00:00
}
2017-08-11 02:51:32 +00:00
upstream . Service = svc
2016-11-11 23:43:35 +00:00
upstream . Endpoints = append ( upstream . Endpoints , endps ... )
2016-11-10 22:56:29 +00:00
return upstream
}
2019-02-21 19:45:21 +00:00
// getConfiguration returns the configuration matching the standard kubernetes ingress
2023-02-16 14:05:48 +00:00
func ( n * NGINXController ) getConfiguration ( ingresses [ ] * ingress . Ingress ) ( sets . Set [ string ] , [ ] * ingress . Server , * ingress . Configuration ) {
2019-02-21 19:45:21 +00:00
upstreams , servers := n . getBackendServers ( ingresses )
var passUpstreams [ ] * ingress . SSLPassthroughBackend
2023-02-16 14:05:48 +00:00
hosts := sets . New [ string ] ( )
2019-02-21 19:45:21 +00:00
for _ , server := range servers {
2020-11-08 14:30:43 +00:00
// If a location is defined by a prefix string that ends with the slash character, and requests are processed by one of
// proxy_pass, fastcgi_pass, uwsgi_pass, scgi_pass, memcached_pass, or grpc_pass, then the special processing is performed.
// In response to a request with URI equal to // this string, but without the trailing slash, a permanent redirect with the
// code 301 will be returned to the requested URI with the slash appended. If this is not desired, an exact match of the
// URIand location could be defined like this:
//
// location /user/ {
// proxy_pass http://user.example.com;
// }
// location = /user {
// proxy_pass http://login.example.com;
// }
server . Locations = updateServerLocations ( server . Locations )
2019-02-21 19:45:21 +00:00
if ! hosts . Has ( server . Hostname ) {
hosts . Insert ( server . Hostname )
}
2019-08-26 14:58:44 +00:00
for _ , alias := range server . Aliases {
if ! hosts . Has ( alias ) {
hosts . Insert ( alias )
}
2019-02-21 19:45:21 +00:00
}
if ! server . SSLPassthrough {
continue
}
for _ , loc := range server . Locations {
if loc . Path != rootLocation {
klog . Warningf ( "Ignoring SSL Passthrough for location %q in server %q" , loc . Path , server . Hostname )
continue
}
passUpstreams = append ( passUpstreams , & ingress . SSLPassthroughBackend {
Backend : loc . Backend ,
Hostname : server . Hostname ,
Service : loc . Service ,
Port : loc . Port ,
} )
break
}
}
return hosts , servers , & ingress . Configuration {
Backends : upstreams ,
Servers : servers ,
TCPEndpoints : n . getStreamServices ( n . cfg . TCPConfigMapName , apiv1 . ProtocolTCP ) ,
UDPEndpoints : n . getStreamServices ( n . cfg . UDPConfigMapName , apiv1 . ProtocolUDP ) ,
PassthroughBackends : passUpstreams ,
BackendConfigChecksum : n . store . GetBackendConfiguration ( ) . Checksum ,
2021-01-21 20:27:06 +00:00
DefaultSSLCertificate : n . getDefaultSSLCertificate ( ) ,
2021-12-23 19:46:30 +00:00
StreamSnippets : n . getStreamSnippets ( ingresses ) ,
2019-02-21 19:45:21 +00:00
}
}
2021-09-19 19:40:08 +00:00
func dropSnippetDirectives ( anns * annotations . Ingress , ingKey string ) {
if anns != nil {
if anns . ConfigurationSnippet != "" {
klog . V ( 3 ) . Infof ( "Ingress %q tried to use configuration-snippet and the annotation is disabled by the admin. Removing the annotation" , ingKey )
anns . ConfigurationSnippet = ""
}
if anns . ServerSnippet != "" {
klog . V ( 3 ) . Infof ( "Ingress %q tried to use server-snippet and the annotation is disabled by the admin. Removing the annotation" , ingKey )
anns . ServerSnippet = ""
}
if anns . ModSecurity . Snippet != "" {
klog . V ( 3 ) . Infof ( "Ingress %q tried to use modsecurity-snippet and the annotation is disabled by the admin. Removing the annotation" , ingKey )
anns . ModSecurity . Snippet = ""
}
if anns . ExternalAuth . AuthSnippet != "" {
klog . V ( 3 ) . Infof ( "Ingress %q tried to use auth-snippet and the annotation is disabled by the admin. Removing the annotation" , ingKey )
anns . ExternalAuth . AuthSnippet = ""
}
2021-12-23 19:46:30 +00:00
if anns . StreamSnippet != "" {
klog . V ( 3 ) . Infof ( "Ingress %q tried to use stream-snippet and the annotation is disabled by the admin. Removing the annotation" , ingKey )
anns . StreamSnippet = ""
}
2021-09-19 19:40:08 +00:00
}
}
2018-06-13 18:15:45 +00:00
// getBackendServers returns a list of Upstream and Server to be used by the
// backend. An upstream can be used in multiple servers if the namespace,
// service name and port are the same.
2018-11-19 21:52:10 +00:00
func ( n * NGINXController ) getBackendServers ( ingresses [ ] * ingress . Ingress ) ( [ ] * ingress . Backend , [ ] * ingress . Server ) {
2017-11-05 01:18:28 +00:00
du := n . getDefaultUpstream ( )
upstreams := n . createUpstreams ( ingresses , du )
servers := n . createServers ( ingresses , upstreams , du )
2016-11-10 22:56:29 +00:00
2018-12-05 20:12:55 +00:00
var canaryIngresses [ ] * ingress . Ingress
2017-09-25 19:40:55 +00:00
for _ , ing := range ingresses {
2018-07-02 20:59:54 +00:00
ingKey := k8s . MetaNamespaceKey ( ing )
2018-11-19 21:52:10 +00:00
anns := ing . ParsedAnnotations
2016-11-10 22:56:29 +00:00
2021-09-20 23:52:23 +00:00
if ! n . store . GetBackendConfiguration ( ) . AllowSnippetAnnotations {
2021-09-19 19:40:08 +00:00
dropSnippetDirectives ( anns , ingKey )
}
2016-11-10 22:56:29 +00:00
for _ , rule := range ing . Spec . Rules {
host := rule . Host
if host == "" {
host = defServerName
}
2018-09-18 18:05:32 +00:00
2016-11-10 22:56:29 +00:00
server := servers [ host ]
if server == nil {
server = servers [ defServerName ]
}
if rule . HTTP == nil &&
host != defServerName {
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Ingress %q does not contain any HTTP rule, using default backend" , ingKey )
2016-11-10 22:56:29 +00:00
continue
}
2018-02-25 20:20:14 +00:00
if server . AuthTLSError == "" && anns . CertificateAuth . AuthTLSError != "" {
server . AuthTLSError = anns . CertificateAuth . AuthTLSError
}
2017-08-22 20:16:59 +00:00
if server . CertificateAuth . CAFileName == "" {
2018-03-28 23:33:03 +00:00
server . CertificateAuth = anns . CertificateAuth
2018-06-13 18:15:45 +00:00
if server . CertificateAuth . Secret != "" && server . CertificateAuth . CAFileName == "" {
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Secret %q has no 'ca.crt' key, mutual authentication disabled for Ingress %q" ,
2018-07-02 20:59:54 +00:00
server . CertificateAuth . Secret , ingKey )
2017-08-22 20:16:59 +00:00
}
} else {
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Server %q is already configured for mutual authentication (Ingress %q)" ,
2018-07-02 20:59:54 +00:00
server . Hostname , ingKey )
2017-08-22 20:16:59 +00:00
}
2020-01-29 08:46:18 +00:00
if ! n . store . GetBackendConfiguration ( ) . ProxySSLLocationOnly {
if server . ProxySSL . CAFileName == "" {
server . ProxySSL = anns . ProxySSL
if server . ProxySSL . Secret != "" && server . ProxySSL . CAFileName == "" {
klog . V ( 3 ) . Infof ( "Secret %q has no 'ca.crt' key, client cert authentication disabled for Ingress %q" ,
server . ProxySSL . Secret , ingKey )
}
} else {
klog . V ( 3 ) . Infof ( "Server %q is already configured for client cert authentication (Ingress %q)" ,
server . Hostname , ingKey )
2019-07-17 00:23:32 +00:00
}
}
2018-07-09 21:42:14 +00:00
if rule . HTTP == nil {
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Ingress %q does not contain any HTTP rule, using default backend" , ingKey )
2018-07-09 21:42:14 +00:00
continue
}
2016-11-10 22:56:29 +00:00
for _ , path := range rule . HTTP . Paths {
2021-08-21 20:42:00 +00:00
if path . Backend . Service == nil {
// skip non-service backends
klog . V ( 3 ) . Infof ( "Ingress %q and path %q does not contain a service backend, using default backend" , ingKey , path . Path )
continue
}
upsName := upstreamName ( ing . Namespace , path . Backend . Service )
2016-11-10 22:56:29 +00:00
ups := upstreams [ upsName ]
2018-09-18 18:05:32 +00:00
// Backend is not referenced to by a server
if ups . NoServer {
continue
}
2016-11-10 22:56:29 +00:00
nginxPath := rootLocation
if path . Path != "" {
nginxPath = path . Path
}
addLoc := true
for _ , loc := range server . Locations {
2020-12-05 02:37:47 +00:00
if loc . Path != nginxPath {
continue
}
// Same paths but different types are allowed
// (same type means overlap in the path definition)
if ! apiequality . Semantic . DeepEqual ( loc . PathType , path . PathType ) {
break
}
2020-04-22 16:24:57 +00:00
2020-12-05 02:37:47 +00:00
addLoc = false
2016-11-10 22:56:29 +00:00
2020-12-05 02:37:47 +00:00
if ! loc . IsDefBackend {
klog . V ( 3 ) . Infof ( "Location %q already configured for server %q with upstream %q (Ingress %q)" ,
loc . Path , server . Hostname , loc . Backend , ingKey )
break
}
2016-11-10 22:56:29 +00:00
2020-12-05 02:37:47 +00:00
klog . V ( 3 ) . Infof ( "Replacing location %q for server %q with upstream %q to use upstream %q (Ingress %q)" ,
loc . Path , server . Hostname , loc . Backend , ups . Name , ingKey )
2018-07-02 20:59:54 +00:00
2020-12-05 02:37:47 +00:00
loc . Backend = ups . Name
loc . IsDefBackend = false
loc . Port = ups . Port
loc . Service = ups . Service
loc . Ingress = ing
2020-04-22 16:24:57 +00:00
2020-12-05 02:37:47 +00:00
locationApplyAnnotations ( loc , anns )
2017-11-07 16:36:51 +00:00
2020-12-05 02:37:47 +00:00
if loc . Redirect . FromToWWW {
server . RedirectFromToWWW = true
2016-11-10 22:56:29 +00:00
}
2020-12-05 02:37:47 +00:00
break
2016-11-10 22:56:29 +00:00
}
2018-06-13 18:15:45 +00:00
// new location
2016-11-10 22:56:29 +00:00
if addLoc {
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Adding location %q for server %q with upstream %q (Ingress %q)" ,
2018-07-02 20:59:54 +00:00
nginxPath , server . Hostname , ups . Name , ingKey )
2016-12-29 20:02:06 +00:00
loc := & ingress . Location {
2019-03-12 18:18:09 +00:00
Path : nginxPath ,
2020-04-22 16:24:57 +00:00
PathType : path . PathType ,
2019-03-12 18:18:09 +00:00
Backend : ups . Name ,
IsDefBackend : false ,
Service : ups . Service ,
Port : ups . Port ,
Ingress : ing ,
2016-12-29 20:02:06 +00:00
}
2019-03-12 18:18:09 +00:00
locationApplyAnnotations ( loc , anns )
2017-11-07 16:36:51 +00:00
2017-08-19 21:13:02 +00:00
if loc . Redirect . FromToWWW {
server . RedirectFromToWWW = true
}
2016-12-29 20:02:06 +00:00
server . Locations = append ( server . Locations , loc )
2016-11-10 22:56:29 +00:00
}
2017-06-26 01:30:30 +00:00
if ups . SessionAffinity . AffinityType == "" {
2017-11-07 16:36:51 +00:00
ups . SessionAffinity . AffinityType = anns . SessionAffinity . Type
2017-06-26 01:30:30 +00:00
}
2019-08-30 09:40:29 +00:00
if ups . SessionAffinity . AffinityMode == "" {
ups . SessionAffinity . AffinityMode = anns . SessionAffinity . Mode
}
2017-11-07 16:36:51 +00:00
if anns . SessionAffinity . Type == "cookie" {
2018-11-19 14:15:24 +00:00
cookiePath := anns . SessionAffinity . Cookie . Path
if anns . Rewrite . UseRegex && cookiePath == "" {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "session-cookie-path should be set when use-regex is true" )
2018-11-19 14:15:24 +00:00
}
2017-11-07 16:36:51 +00:00
ups . SessionAffinity . CookieSessionAffinity . Name = anns . SessionAffinity . Cookie . Name
2018-10-29 07:21:10 +00:00
ups . SessionAffinity . CookieSessionAffinity . Expires = anns . SessionAffinity . Cookie . Expires
ups . SessionAffinity . CookieSessionAffinity . MaxAge = anns . SessionAffinity . Cookie . MaxAge
2021-09-01 22:23:40 +00:00
ups . SessionAffinity . CookieSessionAffinity . Secure = anns . SessionAffinity . Cookie . Secure
2018-11-19 14:15:24 +00:00
ups . SessionAffinity . CookieSessionAffinity . Path = cookiePath
2022-09-28 14:28:37 +00:00
ups . SessionAffinity . CookieSessionAffinity . Domain = anns . SessionAffinity . Cookie . Domain
2020-01-22 20:19:16 +00:00
ups . SessionAffinity . CookieSessionAffinity . SameSite = anns . SessionAffinity . Cookie . SameSite
ups . SessionAffinity . CookieSessionAffinity . ConditionalSameSiteNone = anns . SessionAffinity . Cookie . ConditionalSameSiteNone
2019-04-29 13:20:30 +00:00
ups . SessionAffinity . CookieSessionAffinity . ChangeOnFailure = anns . SessionAffinity . Cookie . ChangeOnFailure
2017-06-26 01:30:30 +00:00
locs := ups . SessionAffinity . CookieSessionAffinity . Locations
if _ , ok := locs [ host ] ; ! ok {
locs [ host ] = [ ] string { }
}
locs [ host ] = append ( locs [ host ] , path . Path )
2020-12-14 10:22:01 +00:00
if len ( server . Aliases ) > 0 {
for _ , alias := range server . Aliases {
if _ , ok := locs [ alias ] ; ! ok {
locs [ alias ] = [ ] string { }
}
locs [ alias ] = append ( locs [ alias ] , path . Path )
}
}
2017-06-26 01:30:30 +00:00
}
2016-11-10 22:56:29 +00:00
}
}
2018-09-18 18:05:32 +00:00
2018-12-05 20:12:55 +00:00
// set aside canary ingresses to merge later
2018-09-18 18:05:32 +00:00
if anns . Canary . Enabled {
2018-12-05 20:12:55 +00:00
canaryIngresses = append ( canaryIngresses , ing )
}
}
if nonCanaryIngressExists ( ingresses , canaryIngresses ) {
for _ , canaryIng := range canaryIngresses {
mergeAlternativeBackends ( canaryIng , upstreams , servers )
2018-09-18 18:05:32 +00:00
}
2016-11-10 22:56:29 +00:00
}
2017-08-25 15:50:08 +00:00
aUpstreams := make ( [ ] * ingress . Backend , 0 , len ( upstreams ) )
2017-04-02 02:32:22 +00:00
for _ , upstream := range upstreams {
2018-11-05 18:48:33 +00:00
aUpstreams = append ( aUpstreams , upstream )
2019-09-27 13:21:28 +00:00
if upstream . Name == defUpstreamName {
continue
}
2017-04-02 02:32:22 +00:00
isHTTPSfrom := [ ] * ingress . Server { }
for _ , server := range servers {
for _ , location := range server . Locations {
2019-09-27 13:21:28 +00:00
// use default backend
if ! shouldCreateUpstreamForLocationDefaultBackend ( upstream , location ) {
continue
}
2019-02-05 14:28:37 +00:00
2020-07-06 23:12:32 +00:00
if len ( location . DefaultBackend . Spec . Ports ) == 0 {
klog . Errorf ( "Custom default backend service %v/%v has no ports. Ignoring" , location . DefaultBackend . Namespace , location . DefaultBackend . Name )
continue
}
2019-09-27 13:21:28 +00:00
sp := location . DefaultBackend . Spec . Ports [ 0 ]
2023-01-16 02:46:50 +00:00
var zone string
if n . cfg . EnableTopologyAwareRouting {
zone = getIngressPodZone ( location . DefaultBackend )
} else {
zone = emptyZone
}
endps := getEndpointsFromSlices ( location . DefaultBackend , & sp , apiv1 . ProtocolTCP , zone , n . store . GetServiceEndpointsSlices )
2019-09-27 13:21:28 +00:00
// custom backend is valid only if contains at least one endpoint
if len ( endps ) > 0 {
2021-08-11 06:53:44 +00:00
name := fmt . Sprintf ( "custom-default-backend-%v-%v" , location . DefaultBackend . GetNamespace ( ) , location . DefaultBackend . GetName ( ) )
2019-09-27 13:21:28 +00:00
klog . V ( 3 ) . Infof ( "Creating \"%v\" upstream based on default backend annotation" , name )
2019-02-05 14:28:37 +00:00
2019-09-27 13:21:28 +00:00
nb := upstream . DeepCopy ( )
nb . Name = name
nb . Endpoints = endps
aUpstreams = append ( aUpstreams , nb )
location . DefaultBackendUpstreamName = name
2019-02-05 14:28:37 +00:00
2019-09-27 13:21:28 +00:00
if len ( upstream . Endpoints ) == 0 {
klog . V ( 3 ) . Infof ( "Upstream %q has no active Endpoint, so using custom default backend for location %q in server %q (Service \"%v/%v\")" ,
upstream . Name , location . Path , server . Hostname , location . DefaultBackend . Namespace , location . DefaultBackend . Name )
2019-02-05 14:28:37 +00:00
2019-09-27 13:21:28 +00:00
location . Backend = name
2017-08-25 15:50:08 +00:00
}
2019-09-27 13:21:28 +00:00
}
2017-08-25 15:50:08 +00:00
2019-09-27 13:21:28 +00:00
if server . SSLPassthrough {
if location . Path == rootLocation {
if location . Backend == defUpstreamName {
klog . Warningf ( "Server %q has no default backend, ignoring SSL Passthrough." , server . Hostname )
continue
2017-04-02 02:32:22 +00:00
}
2019-09-27 13:21:28 +00:00
isHTTPSfrom = append ( isHTTPSfrom , server )
2017-04-02 02:32:22 +00:00
}
}
}
}
2017-08-25 15:50:08 +00:00
2017-04-02 02:32:22 +00:00
if len ( isHTTPSfrom ) > 0 {
2017-04-09 23:51:38 +00:00
upstream . SSLPassthrough = true
2017-04-02 02:32:22 +00:00
}
}
2016-11-10 22:56:29 +00:00
aServers := make ( [ ] * ingress . Server , 0 , len ( servers ) )
for _ , value := range servers {
2017-09-25 19:40:55 +00:00
sort . SliceStable ( value . Locations , func ( i , j int ) bool {
2017-09-18 23:53:26 +00:00
return value . Locations [ i ] . Path > value . Locations [ j ] . Path
} )
2018-10-01 17:54:11 +00:00
sort . SliceStable ( value . Locations , func ( i , j int ) bool {
return len ( value . Locations [ i ] . Path ) > len ( value . Locations [ j ] . Path )
} )
2016-11-10 22:56:29 +00:00
aServers = append ( aServers , value )
}
2017-09-18 23:53:26 +00:00
2018-06-02 19:17:14 +00:00
sort . SliceStable ( aUpstreams , func ( a , b int ) bool {
return aUpstreams [ a ] . Name < aUpstreams [ b ] . Name
} )
2017-09-25 19:40:55 +00:00
sort . SliceStable ( aServers , func ( i , j int ) bool {
2017-09-18 23:53:26 +00:00
return aServers [ i ] . Hostname < aServers [ j ] . Hostname
} )
2016-11-10 22:56:29 +00:00
return aUpstreams , aServers
}
2018-06-13 18:15:45 +00:00
// createUpstreams creates the NGINX upstreams (Endpoints) for each Service
// referenced in Ingress rules.
2018-11-19 21:52:10 +00:00
func ( n * NGINXController ) createUpstreams ( data [ ] * ingress . Ingress , du * ingress . Backend ) map [ string ] * ingress . Backend {
2016-11-11 23:43:35 +00:00
upstreams := make ( map [ string ] * ingress . Backend )
2017-09-18 23:53:26 +00:00
upstreams [ defUpstreamName ] = du
2016-11-10 22:56:29 +00:00
2017-09-25 19:40:55 +00:00
for _ , ing := range data {
2021-08-21 20:42:00 +00:00
ingKey := k8s . MetaNamespaceKey ( ing )
2018-11-19 21:52:10 +00:00
anns := ing . ParsedAnnotations
2016-11-10 22:56:29 +00:00
2021-09-20 23:52:23 +00:00
if ! n . store . GetBackendConfiguration ( ) . AllowSnippetAnnotations {
2021-09-19 19:40:08 +00:00
dropSnippetDirectives ( anns , ingKey )
}
2016-11-10 22:56:29 +00:00
var defBackend string
2021-08-21 20:42:00 +00:00
if ing . Spec . DefaultBackend != nil && ing . Spec . DefaultBackend . Service != nil {
defBackend = upstreamName ( ing . Namespace , ing . Spec . DefaultBackend . Service )
2016-11-10 22:56:29 +00:00
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Creating upstream %q" , defBackend )
2016-11-10 22:56:29 +00:00
upstreams [ defBackend ] = newUpstream ( defBackend )
2018-11-10 23:30:06 +00:00
2019-04-01 19:55:36 +00:00
upstreams [ defBackend ] . UpstreamHashBy . UpstreamHashBy = anns . UpstreamHashBy . UpstreamHashBy
upstreams [ defBackend ] . UpstreamHashBy . UpstreamHashBySubset = anns . UpstreamHashBy . UpstreamHashBySubset
upstreams [ defBackend ] . UpstreamHashBy . UpstreamHashBySubsetSize = anns . UpstreamHashBy . UpstreamHashBySubsetSize
2018-11-10 23:30:06 +00:00
2019-04-01 19:55:36 +00:00
upstreams [ defBackend ] . LoadBalancing = anns . LoadBalancing
2018-03-09 21:09:41 +00:00
if upstreams [ defBackend ] . LoadBalancing == "" {
2019-04-01 19:55:36 +00:00
upstreams [ defBackend ] . LoadBalancing = n . store . GetBackendConfiguration ( ) . LoadBalancing
2018-03-09 21:09:41 +00:00
}
2017-11-29 18:04:51 +00:00
2021-08-21 20:42:00 +00:00
svcKey := fmt . Sprintf ( "%v/%v" , ing . Namespace , ing . Spec . DefaultBackend . Service . Name )
2017-07-16 20:19:45 +00:00
2018-06-13 18:15:45 +00:00
// add the service ClusterIP as a single Endpoint instead of individual Endpoints
2017-11-07 16:36:51 +00:00
if anns . ServiceUpstream {
2021-08-21 20:42:00 +00:00
endpoint , err := n . getServiceClusterEndpoint ( svcKey , ing . Spec . DefaultBackend )
2017-09-13 19:15:47 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Errorf ( "Failed to determine a suitable ClusterIP Endpoint for Service %q: %v" , svcKey , err )
2017-09-13 19:15:47 +00:00
} else {
upstreams [ defBackend ] . Endpoints = [ ] ingress . Endpoint { endpoint }
}
}
2018-09-18 18:05:32 +00:00
// configure traffic shaping for canary
if anns . Canary . Enabled {
upstreams [ defBackend ] . NoServer = true
2023-03-21 13:47:09 +00:00
upstreams [ defBackend ] . TrafficShapingPolicy = newTrafficShapingPolicy ( anns . Canary )
2018-09-18 18:05:32 +00:00
}
2017-09-13 19:15:47 +00:00
if len ( upstreams [ defBackend ] . Endpoints ) == 0 {
2021-08-21 20:42:00 +00:00
_ , port := upstreamServiceNameAndPort ( ing . Spec . DefaultBackend . Service )
endps , err := n . serviceEndpoints ( svcKey , port . String ( ) )
2017-09-13 19:15:47 +00:00
upstreams [ defBackend ] . Endpoints = append ( upstreams [ defBackend ] . Endpoints , endps ... )
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error creating upstream %q: %v" , defBackend , err )
2017-09-13 19:15:47 +00:00
}
}
2018-11-05 18:48:33 +00:00
s , err := n . store . GetService ( svcKey )
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error obtaining Service %q: %v" , svcKey , err )
2018-11-05 18:48:33 +00:00
}
upstreams [ defBackend ] . Service = s
2016-11-10 22:56:29 +00:00
}
for _ , rule := range ing . Spec . Rules {
if rule . HTTP == nil {
continue
}
for _ , path := range rule . HTTP . Paths {
2021-08-21 20:42:00 +00:00
if path . Backend . Service == nil {
// skip non-service backends
klog . V ( 3 ) . Infof ( "Ingress %q and path %q does not contain a service backend, using default backend" , ingKey , path . Path )
continue
}
2016-11-10 22:56:29 +00:00
2021-08-21 20:42:00 +00:00
name := upstreamName ( ing . Namespace , path . Backend . Service )
svcName , svcPort := upstreamServiceNameAndPort ( path . Backend . Service )
2017-04-09 23:51:38 +00:00
if _ , ok := upstreams [ name ] ; ok {
continue
2016-11-10 22:56:29 +00:00
}
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Creating upstream %q" , name )
2017-04-09 23:51:38 +00:00
upstreams [ name ] = newUpstream ( name )
2021-08-21 20:42:00 +00:00
upstreams [ name ] . Port = svcPort
2017-08-11 02:51:32 +00:00
2019-04-01 19:55:36 +00:00
upstreams [ name ] . UpstreamHashBy . UpstreamHashBy = anns . UpstreamHashBy . UpstreamHashBy
upstreams [ name ] . UpstreamHashBy . UpstreamHashBySubset = anns . UpstreamHashBy . UpstreamHashBySubset
upstreams [ name ] . UpstreamHashBy . UpstreamHashBySubsetSize = anns . UpstreamHashBy . UpstreamHashBySubsetSize
2017-09-30 21:29:16 +00:00
2019-04-01 19:55:36 +00:00
upstreams [ name ] . LoadBalancing = anns . LoadBalancing
2018-03-09 21:09:41 +00:00
if upstreams [ name ] . LoadBalancing == "" {
2019-04-01 19:55:36 +00:00
upstreams [ name ] . LoadBalancing = n . store . GetBackendConfiguration ( ) . LoadBalancing
2018-03-09 21:09:41 +00:00
}
2021-08-21 20:42:00 +00:00
svcKey := fmt . Sprintf ( "%v/%v" , ing . Namespace , svcName )
2017-07-16 20:19:45 +00:00
2018-06-13 18:15:45 +00:00
// add the service ClusterIP as a single Endpoint instead of individual Endpoints
2017-11-07 16:36:51 +00:00
if anns . ServiceUpstream {
2017-11-05 01:18:28 +00:00
endpoint , err := n . getServiceClusterEndpoint ( svcKey , & path . Backend )
2017-09-13 19:15:47 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Errorf ( "Failed to determine a suitable ClusterIP Endpoint for Service %q: %v" , svcKey , err )
2017-09-13 19:15:47 +00:00
} else {
upstreams [ name ] . Endpoints = [ ] ingress . Endpoint { endpoint }
}
}
2018-09-18 18:05:32 +00:00
// configure traffic shaping for canary
if anns . Canary . Enabled {
upstreams [ name ] . NoServer = true
2023-03-21 13:47:09 +00:00
upstreams [ name ] . TrafficShapingPolicy = newTrafficShapingPolicy ( anns . Canary )
2018-09-18 18:05:32 +00:00
}
2017-09-13 19:15:47 +00:00
if len ( upstreams [ name ] . Endpoints ) == 0 {
2021-08-21 20:42:00 +00:00
_ , port := upstreamServiceNameAndPort ( path . Backend . Service )
endp , err := n . serviceEndpoints ( svcKey , port . String ( ) )
2017-09-13 19:15:47 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error obtaining Endpoints for Service %q: %v" , svcKey , err )
2023-01-16 12:22:51 +00:00
n . metricCollector . IncOrphanIngress ( ing . Namespace , ing . Name , orphanMetricLabelNoService )
2017-09-13 19:15:47 +00:00
continue
}
2023-01-16 12:22:51 +00:00
n . metricCollector . DecOrphanIngress ( ing . Namespace , ing . Name , orphanMetricLabelNoService )
if len ( endp ) == 0 {
n . metricCollector . IncOrphanIngress ( ing . Namespace , ing . Name , orphanMetricLabelNoEndpoint )
} else {
n . metricCollector . DecOrphanIngress ( ing . Namespace , ing . Name , orphanMetricLabelNoEndpoint )
}
2017-09-13 19:15:47 +00:00
upstreams [ name ] . Endpoints = endp
}
2017-04-09 23:51:38 +00:00
2018-01-18 19:14:42 +00:00
s , err := n . store . GetService ( svcKey )
2017-04-09 23:51:38 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error obtaining Service %q: %v" , svcKey , err )
2017-04-09 23:51:38 +00:00
continue
}
2017-09-18 23:53:26 +00:00
upstreams [ name ] . Service = s
2016-11-10 22:56:29 +00:00
}
}
}
return upstreams
}
2018-06-13 18:15:45 +00:00
// getServiceClusterEndpoint returns an Endpoint corresponding to the ClusterIP
// field of a Service.
2019-06-09 22:49:59 +00:00
func ( n * NGINXController ) getServiceClusterEndpoint ( svcKey string , backend * networking . IngressBackend ) ( endpoint ingress . Endpoint , err error ) {
2018-01-18 19:14:42 +00:00
svc , err := n . store . GetService ( svcKey )
if err != nil {
2018-06-13 18:15:45 +00:00
return endpoint , fmt . Errorf ( "service %q does not exist" , svcKey )
2017-07-16 20:19:45 +00:00
}
2017-10-26 23:23:48 +00:00
if svc . Spec . ClusterIP == "" || svc . Spec . ClusterIP == "None" {
2018-06-13 18:15:45 +00:00
return endpoint , fmt . Errorf ( "no ClusterIP found for Service %q" , svcKey )
2017-07-16 20:19:45 +00:00
}
endpoint . Address = svc . Spec . ClusterIP
2017-10-26 22:48:50 +00:00
2018-06-13 18:15:45 +00:00
// if the Service port is referenced by name in the Ingress, lookup the
// actual port in the service spec
2021-08-21 20:42:00 +00:00
if backend . Service != nil {
_ , svcportintorstr := upstreamServiceNameAndPort ( backend . Service )
if svcportintorstr . Type == intstr . String {
var port int32 = - 1
for _ , svcPort := range svc . Spec . Ports {
if svcPort . Name == svcportintorstr . String ( ) {
port = svcPort . Port
break
}
2017-10-26 22:48:50 +00:00
}
2021-08-21 20:42:00 +00:00
if port == - 1 {
return endpoint , fmt . Errorf ( "service %q does not have a port named %q" , svc . Name , svcportintorstr . String ( ) )
}
endpoint . Port = fmt . Sprintf ( "%d" , port )
} else {
endpoint . Port = svcportintorstr . String ( )
2017-10-26 22:48:50 +00:00
}
}
2017-07-16 20:19:45 +00:00
return endpoint , err
}
2018-10-09 01:29:58 +00:00
// serviceEndpoints returns the upstream servers (Endpoints) associated with a Service.
func ( n * NGINXController ) serviceEndpoints ( svcKey , backendPort string ) ( [ ] ingress . Endpoint , error ) {
2017-09-13 19:15:47 +00:00
var upstreams [ ] ingress . Endpoint
2019-08-15 16:09:42 +00:00
svc , err := n . store . GetService ( svcKey )
2016-11-10 22:56:29 +00:00
if err != nil {
2018-07-02 20:59:54 +00:00
return upstreams , err
2016-11-10 22:56:29 +00:00
}
2023-01-16 02:46:50 +00:00
var zone string
if n . cfg . EnableTopologyAwareRouting {
zone = getIngressPodZone ( svc )
} else {
zone = emptyZone
}
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Obtaining ports information for Service %q" , svcKey )
2018-06-13 18:15:45 +00:00
// Ingress with an ExternalName Service and no port defined for that Service
2019-06-18 21:17:43 +00:00
if svc . Spec . Type == apiv1 . ServiceTypeExternalName {
2021-07-06 04:50:18 +00:00
if n . cfg . DisableServiceExternalName {
klog . Warningf ( "Service %q of type ExternalName not allowed due to Ingress configuration." , svcKey )
return upstreams , nil
}
2020-02-21 02:06:05 +00:00
servicePort := externalNamePorts ( backendPort , svc )
2023-01-16 02:46:50 +00:00
endps := getEndpointsFromSlices ( svc , servicePort , apiv1 . ProtocolTCP , zone , n . store . GetServiceEndpointsSlices )
2017-10-26 22:17:18 +00:00
if len ( endps ) == 0 {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Service %q does not have any active Endpoint." , svcKey )
2017-10-26 22:17:18 +00:00
return upstreams , nil
}
upstreams = append ( upstreams , endps ... )
return upstreams , nil
}
2020-12-04 12:40:42 +00:00
for i := range svc . Spec . Ports {
servicePort := svc . Spec . Ports [ i ]
2019-06-18 21:17:43 +00:00
// targetPort could be a string, use either the port name or number (int)
if strconv . Itoa ( int ( servicePort . Port ) ) == backendPort ||
servicePort . TargetPort . String ( ) == backendPort ||
servicePort . Name == backendPort {
2023-01-16 02:46:50 +00:00
endps := getEndpointsFromSlices ( svc , & servicePort , apiv1 . ProtocolTCP , zone , n . store . GetServiceEndpointsSlices )
2019-06-18 21:17:43 +00:00
if len ( endps ) == 0 {
klog . Warningf ( "Service %q does not have any active Endpoint." , svcKey )
}
upstreams = append ( upstreams , endps ... )
break
}
}
2016-11-10 22:56:29 +00:00
return upstreams , nil
}
2019-08-13 21:14:55 +00:00
func ( n * NGINXController ) getDefaultSSLCertificate ( ) * ingress . SSLCert {
// read custom default SSL certificate, fall back to generated default certificate
if n . cfg . DefaultSSLCertificate != "" {
certificate , err := n . store . GetLocalSSLCert ( n . cfg . DefaultSSLCertificate )
if err == nil {
return certificate
}
klog . Warningf ( "Error loading custom default certificate, falling back to generated default:\n%v" , err )
}
return n . cfg . FakeCertificate
2019-04-12 04:38:03 +00:00
}
2018-06-13 18:15:45 +00:00
// createServers builds a map of host name to Server structs from a map of
// already computed Upstream structs. Each Server is configured with at least
// one root location, which uses a default backend if left unspecified.
2018-11-19 21:52:10 +00:00
func ( n * NGINXController ) createServers ( data [ ] * ingress . Ingress ,
2017-09-18 23:53:26 +00:00
upstreams map [ string ] * ingress . Backend ,
du * ingress . Backend ) map [ string ] * ingress . Server {
servers := make ( map [ string ] * ingress . Server , len ( data ) )
2019-08-26 14:58:44 +00:00
allAliases := make ( map [ string ] [ ] string , len ( data ) )
2017-09-13 19:15:47 +00:00
2018-01-18 19:14:42 +00:00
bdef := n . store . GetDefaultBackend ( )
2017-11-07 16:36:51 +00:00
ngxProxy := proxy . Config {
2019-08-12 17:27:05 +00:00
BodySize : bdef . ProxyBodySize ,
ConnectTimeout : bdef . ProxyConnectTimeout ,
SendTimeout : bdef . ProxySendTimeout ,
ReadTimeout : bdef . ProxyReadTimeout ,
BuffersNumber : bdef . ProxyBuffersNumber ,
BufferSize : bdef . ProxyBufferSize ,
CookieDomain : bdef . ProxyCookieDomain ,
CookiePath : bdef . ProxyCookiePath ,
NextUpstream : bdef . ProxyNextUpstream ,
NextUpstreamTimeout : bdef . ProxyNextUpstreamTimeout ,
NextUpstreamTries : bdef . ProxyNextUpstreamTries ,
RequestBuffering : bdef . ProxyRequestBuffering ,
ProxyRedirectFrom : bdef . ProxyRedirectFrom ,
ProxyBuffering : bdef . ProxyBuffering ,
ProxyHTTPVersion : bdef . ProxyHTTPVersion ,
ProxyMaxTempFileSize : bdef . ProxyMaxTempFileSize ,
2017-01-26 18:51:55 +00:00
}
2018-06-13 18:15:45 +00:00
// initialize default server and root location
2020-04-22 16:24:57 +00:00
pathTypePrefix := networking . PathTypePrefix
2016-11-10 22:56:29 +00:00
servers [ defServerName ] = & ingress . Server {
2018-06-11 17:42:40 +00:00
Hostname : defServerName ,
2019-08-13 21:14:55 +00:00
SSLCert : n . getDefaultSSLCertificate ( ) ,
2016-11-10 22:56:29 +00:00
Locations : [ ] * ingress . Location {
{
Path : rootLocation ,
2020-04-22 16:24:57 +00:00
PathType : & pathTypePrefix ,
2016-11-10 22:56:29 +00:00
IsDefBackend : true ,
2017-08-11 02:51:32 +00:00
Backend : du . Name ,
2016-11-10 22:56:29 +00:00
Proxy : ngxProxy ,
2017-08-11 02:51:32 +00:00
Service : du . Service ,
2019-02-19 23:02:01 +00:00
Logs : log . Config {
Access : n . store . GetBackendConfiguration ( ) . EnableAccessLogForDefaultBackend ,
Rewrite : false ,
} ,
2016-11-10 22:56:29 +00:00
} ,
2017-09-13 19:15:47 +00:00
} }
2016-11-10 22:56:29 +00:00
2018-06-13 18:15:45 +00:00
// initialize all other servers
2017-09-25 19:40:55 +00:00
for _ , ing := range data {
2018-07-02 20:59:54 +00:00
ingKey := k8s . MetaNamespaceKey ( ing )
2018-11-19 21:52:10 +00:00
anns := ing . ParsedAnnotations
2017-09-17 14:54:00 +00:00
2021-09-20 23:52:23 +00:00
if ! n . store . GetBackendConfiguration ( ) . AllowSnippetAnnotations {
2021-09-19 19:40:08 +00:00
dropSnippetDirectives ( anns , ingKey )
}
2018-06-13 18:15:45 +00:00
// default upstream name
2017-08-11 02:51:32 +00:00
un := du . Name
2017-09-17 14:54:00 +00:00
2018-12-05 14:39:19 +00:00
if anns . Canary . Enabled {
klog . V ( 2 ) . Infof ( "Ingress %v is marked as Canary, ignoring" , ingKey )
continue
}
2021-08-21 20:42:00 +00:00
if ing . Spec . DefaultBackend != nil && ing . Spec . DefaultBackend . Service != nil {
defUpstream := upstreamName ( ing . Namespace , ing . Spec . DefaultBackend . Service )
2018-06-13 18:15:45 +00:00
2017-02-16 17:26:58 +00:00
if backendUpstream , ok := upstreams [ defUpstream ] ; ok {
2018-06-13 18:15:45 +00:00
// use backend specified in Ingress as the default backend for all its rules
2017-08-11 02:51:32 +00:00
un = backendUpstream . Name
2017-09-17 14:54:00 +00:00
defLoc := servers [ defServerName ] . Locations [ 0 ]
if defLoc . IsDefBackend && len ( ing . Spec . Rules ) == 0 {
2020-12-05 02:37:47 +00:00
klog . V ( 2 ) . Infof ( "Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q" , ingKey , defServerName )
2018-06-13 18:15:45 +00:00
2017-09-17 14:54:00 +00:00
defLoc . IsDefBackend = false
2022-08-22 23:26:23 +00:00
// special "catch all" case, Ingress with a backend but no rule
defLoc . Backend = backendUpstream . Name
defLoc . Service = backendUpstream . Service
defLoc . Ingress = ing
2018-06-13 18:15:45 +00:00
// TODO: Redirect and rewrite can affect the catch all behavior, skip for now
2019-03-12 18:18:09 +00:00
originalRedirect := defLoc . Redirect
originalRewrite := defLoc . Rewrite
locationApplyAnnotations ( defLoc , anns )
defLoc . Redirect = originalRedirect
defLoc . Rewrite = originalRewrite
2018-06-13 18:15:45 +00:00
} else {
2020-12-05 02:37:47 +00:00
klog . V ( 3 ) . Infof ( "Ingress %q defines both a backend and rules. Using its backend as default upstream for all its rules." , ingKey )
2017-09-17 14:54:00 +00:00
}
2017-02-16 17:26:58 +00:00
}
}
2016-11-10 22:56:29 +00:00
for _ , rule := range ing . Spec . Rules {
host := rule . Host
if host == "" {
host = defServerName
}
2019-08-13 21:14:55 +00:00
2016-11-10 22:56:29 +00:00
if _ , ok := servers [ host ] ; ok {
// server already configured
continue
}
2017-04-09 23:51:38 +00:00
2019-03-12 18:18:09 +00:00
loc := & ingress . Location {
Path : rootLocation ,
2020-04-22 16:24:57 +00:00
PathType : & pathTypePrefix ,
2019-03-12 18:18:09 +00:00
IsDefBackend : true ,
Backend : un ,
2020-12-05 02:37:47 +00:00
Ingress : ing ,
2019-03-12 18:18:09 +00:00
Service : & apiv1 . Service { } ,
}
locationApplyAnnotations ( loc , anns )
2016-11-10 22:56:29 +00:00
servers [ host ] = & ingress . Server {
2016-11-16 18:24:26 +00:00
Hostname : host ,
2016-11-10 22:56:29 +00:00
Locations : [ ] * ingress . Location {
2019-03-12 18:18:09 +00:00
loc ,
2017-09-18 23:53:26 +00:00
} ,
2020-05-11 08:31:08 +00:00
SSLPassthrough : anns . SSLPassthrough ,
SSLCiphers : anns . SSLCipher . SSLCiphers ,
SSLPreferServerCiphers : anns . SSLCipher . SSLPreferServerCiphers ,
2017-09-18 23:53:26 +00:00
}
2016-11-10 22:56:29 +00:00
}
}
2017-08-10 03:22:54 +00:00
// configure default location, alias, and SSL
2017-09-25 19:40:55 +00:00
for _ , ing := range data {
2018-07-02 20:59:54 +00:00
ingKey := k8s . MetaNamespaceKey ( ing )
2018-11-19 21:52:10 +00:00
anns := ing . ParsedAnnotations
2017-08-10 03:22:54 +00:00
2021-09-20 23:52:23 +00:00
if ! n . store . GetBackendConfiguration ( ) . AllowSnippetAnnotations {
2021-09-19 19:40:08 +00:00
dropSnippetDirectives ( anns , ingKey )
}
2018-12-05 14:39:19 +00:00
if anns . Canary . Enabled {
klog . V ( 2 ) . Infof ( "Ingress %v is marked as Canary, ignoring" , ingKey )
continue
}
2016-11-10 22:56:29 +00:00
for _ , rule := range ing . Spec . Rules {
host := rule . Host
if host == "" {
host = defServerName
}
2019-08-26 14:58:44 +00:00
if len ( servers [ host ] . Aliases ) == 0 {
servers [ host ] . Aliases = anns . Aliases
2020-04-01 01:32:03 +00:00
if aliases := allAliases [ host ] ; len ( aliases ) == 0 {
2019-08-26 14:58:44 +00:00
allAliases [ host ] = anns . Aliases
2017-09-18 23:53:26 +00:00
}
2019-08-26 14:58:44 +00:00
} else {
klog . Warningf ( "Aliases already configured for server %q, skipping (Ingress %q)" , host , ingKey )
2017-09-18 23:53:26 +00:00
}
2017-08-10 03:22:54 +00:00
2018-06-13 18:15:45 +00:00
if anns . ServerSnippet != "" {
if servers [ host ] . ServerSnippet == "" {
servers [ host ] . ServerSnippet = anns . ServerSnippet
} else {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Server snippet already configured for server %q, skipping (Ingress %q)" ,
2018-07-02 20:59:54 +00:00
host , ingKey )
2018-06-13 18:15:45 +00:00
}
2017-09-27 06:59:10 +00:00
}
2018-06-13 18:15:45 +00:00
// only add SSL ciphers if the server does not have them previously configured
2020-05-11 08:31:08 +00:00
if servers [ host ] . SSLCiphers == "" && anns . SSLCipher . SSLCiphers != "" {
servers [ host ] . SSLCiphers = anns . SSLCipher . SSLCiphers
}
// only add SSLPreferServerCiphers if the server does not have them previously configured
if servers [ host ] . SSLPreferServerCiphers == "" && anns . SSLCipher . SSLPreferServerCiphers != "" {
servers [ host ] . SSLPreferServerCiphers = anns . SSLCipher . SSLPreferServerCiphers
2018-01-31 16:53:07 +00:00
}
2016-11-24 00:14:14 +00:00
// only add a certificate if the server does not have one previously configured
2019-08-13 21:14:55 +00:00
if servers [ host ] . SSLCert != nil {
2017-08-29 19:40:03 +00:00
continue
}
if len ( ing . Spec . TLS ) == 0 {
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Ingress %q does not contains a TLS section." , ingKey )
2017-07-12 17:31:15 +00:00
continue
}
2016-12-25 19:48:10 +00:00
2018-04-21 21:25:53 +00:00
tlsSecretName := extractTLSSecretName ( host , ing , n . store . GetLocalSSLCert )
2017-07-12 17:31:15 +00:00
if tlsSecretName == "" {
2020-09-27 20:32:40 +00:00
klog . V ( 3 ) . Infof ( "Host %q is listed in the TLS section but secretName is empty. Using default certificate" , host )
2019-12-06 10:40:04 +00:00
servers [ host ] . SSLCert = n . getDefaultSSLCertificate ( )
2017-07-12 17:31:15 +00:00
continue
}
2017-06-12 12:45:08 +00:00
2018-07-02 20:59:54 +00:00
secrKey := fmt . Sprintf ( "%v/%v" , ing . Namespace , tlsSecretName )
cert , err := n . store . GetLocalSSLCert ( secrKey )
2018-01-18 19:14:42 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error getting SSL certificate %q: %v. Using default certificate" , secrKey , err )
2019-12-06 10:40:04 +00:00
servers [ host ] . SSLCert = n . getDefaultSSLCertificate ( )
2017-07-12 17:31:15 +00:00
continue
}
2017-05-12 00:50:43 +00:00
2020-03-08 00:15:24 +00:00
if cert . Certificate == nil {
klog . Warningf ( "SSL certificate %q does not contain a valid SSL certificate for server %q" , secrKey , host )
klog . Warningf ( "Using default certificate" )
servers [ host ] . SSLCert = n . getDefaultSSLCertificate ( )
continue
}
2017-08-10 03:27:57 +00:00
err = cert . Certificate . VerifyHostname ( host )
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Unexpected error validating SSL certificate %q for server %q: %v" , secrKey , host , err )
2020-09-27 20:32:40 +00:00
klog . Warning ( "Validating certificate against DNS names. This will be deprecated in a future version" )
2018-06-13 18:15:45 +00:00
// check the Common Name field
2017-11-29 21:02:10 +00:00
// https://github.com/golang/go/issues/22922
err := verifyHostname ( host , cert . Certificate )
if err != nil {
2020-09-27 20:32:40 +00:00
klog . Warningf ( "SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v" , secrKey , host , err )
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Using default certificate" )
2019-12-06 10:40:04 +00:00
servers [ host ] . SSLCert = n . getDefaultSSLCertificate ( )
2017-11-29 21:02:10 +00:00
continue
}
2017-07-12 17:31:15 +00:00
}
2017-05-12 00:50:43 +00:00
2019-08-13 21:14:55 +00:00
servers [ host ] . SSLCert = cert
2017-07-12 17:31:15 +00:00
2021-12-23 20:02:29 +00:00
now := time . Now ( )
if cert . ExpireTime . Before ( now ) {
klog . Warningf ( "SSL certificate for server %q expired (%v)" , host , cert . ExpireTime )
} else if cert . ExpireTime . Before ( now . Add ( 240 * time . Hour ) ) {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "SSL certificate for server %q is about to expire (%v)" , host , cert . ExpireTime )
2016-11-10 22:56:29 +00:00
}
}
}
2019-08-26 14:58:44 +00:00
for host , hostAliases := range allAliases {
2020-02-02 22:08:55 +00:00
if _ , ok := servers [ host ] ; ! ok {
continue
}
uniqAliases := sets . NewString ( )
for _ , alias := range hostAliases {
if alias == host {
continue
}
2019-08-26 14:58:44 +00:00
if _ , ok := servers [ alias ] ; ok {
2020-02-02 22:08:55 +00:00
continue
}
if uniqAliases . Has ( alias ) {
continue
2019-08-26 14:58:44 +00:00
}
2020-02-02 22:08:55 +00:00
uniqAliases . Insert ( alias )
2017-09-18 23:53:26 +00:00
}
2020-02-02 22:08:55 +00:00
servers [ host ] . Aliases = uniqAliases . List ( )
2017-09-18 23:53:26 +00:00
}
2017-09-20 09:35:16 +00:00
2016-11-10 22:56:29 +00:00
return servers
}
2019-03-12 18:18:09 +00:00
func locationApplyAnnotations ( loc * ingress . Location , anns * annotations . Ingress ) {
loc . BasicDigestAuth = anns . BasicDigestAuth
loc . ClientBodyBufferSize = anns . ClientBodyBufferSize
loc . ConfigurationSnippet = anns . ConfigurationSnippet
loc . CorsConfig = anns . CorsConfig
loc . ExternalAuth = anns . ExternalAuth
2018-11-27 16:12:17 +00:00
loc . EnableGlobalAuth = anns . EnableGlobalAuth
2019-03-12 18:18:09 +00:00
loc . HTTP2PushPreload = anns . HTTP2PushPreload
2019-10-30 03:30:01 +00:00
loc . Opentracing = anns . Opentracing
2023-03-22 18:58:22 +00:00
loc . Opentelemetry = anns . Opentelemetry
2019-03-12 18:18:09 +00:00
loc . Proxy = anns . Proxy
2019-10-17 07:23:42 +00:00
loc . ProxySSL = anns . ProxySSL
2019-03-12 18:18:09 +00:00
loc . RateLimit = anns . RateLimit
2020-12-24 16:39:12 +00:00
loc . GlobalRateLimit = anns . GlobalRateLimit
2019-03-12 18:18:09 +00:00
loc . Redirect = anns . Redirect
loc . Rewrite = anns . Rewrite
loc . UpstreamVhost = anns . UpstreamVhost
2023-01-08 22:43:28 +00:00
loc . Denylist = anns . Denylist
2019-03-12 18:18:09 +00:00
loc . Whitelist = anns . Whitelist
loc . Denied = anns . Denied
loc . XForwardedPrefix = anns . XForwardedPrefix
loc . UsePortInRedirects = anns . UsePortInRedirects
loc . Connection = anns . Connection
loc . Logs = anns . Logs
loc . InfluxDB = anns . InfluxDB
loc . DefaultBackend = anns . DefaultBackend
loc . BackendProtocol = anns . BackendProtocol
2019-07-31 14:39:21 +00:00
loc . FastCGI = anns . FastCGI
2019-03-12 18:18:09 +00:00
loc . CustomHTTPErrors = anns . CustomHTTPErrors
loc . ModSecurity = anns . ModSecurity
loc . Satisfy = anns . Satisfy
2019-07-30 19:43:13 +00:00
loc . Mirror = anns . Mirror
2019-09-27 13:21:28 +00:00
loc . DefaultBackendUpstreamName = defUpstreamName
2019-03-12 18:18:09 +00:00
}
2018-12-05 20:12:55 +00:00
// OK to merge canary ingresses iff there exists one or more ingresses to potentially merge into
func nonCanaryIngressExists ( ingresses [ ] * ingress . Ingress , canaryIngresses [ ] * ingress . Ingress ) bool {
return len ( ingresses ) - len ( canaryIngresses ) > 0
}
// ensure that the following conditions are met
// 1) names of backends do not match and canary doesn't merge into itself
// 2) primary name is not the default upstream
// 3) the primary has a server
2018-11-20 22:13:21 +00:00
func canMergeBackend ( primary * ingress . Backend , alternative * ingress . Backend ) bool {
2019-03-04 11:20:54 +00:00
return alternative != nil && primary . Name != alternative . Name && primary . Name != defUpstreamName && ! primary . NoServer
2018-11-20 22:13:21 +00:00
}
// Performs the merge action and checks to ensure that one two alternative backends do not merge into each other
2021-07-29 21:23:19 +00:00
func mergeAlternativeBackend ( ing * ingress . Ingress , priUps * ingress . Backend , altUps * ingress . Backend ) bool {
2018-11-20 22:13:21 +00:00
if priUps . NoServer {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "unable to merge alternative backend %v into primary backend %v because %v is a primary backend" ,
2018-11-20 22:13:21 +00:00
altUps . Name , priUps . Name , priUps . Name )
return false
}
2019-01-08 16:52:08 +00:00
for _ , ab := range priUps . AlternativeBackends {
if ab == altUps . Name {
klog . V ( 2 ) . Infof ( "skip merge alternative backend %v into %v, it's already present" , altUps . Name , priUps . Name )
return true
}
}
2021-07-29 21:23:19 +00:00
if ing . ParsedAnnotations != nil && ing . ParsedAnnotations . SessionAffinity . CanaryBehavior != "legacy" {
priUps . SessionAffinity . DeepCopyInto ( & altUps . SessionAffinity )
}
2018-11-20 22:13:21 +00:00
priUps . AlternativeBackends =
append ( priUps . AlternativeBackends , altUps . Name )
return true
}
2018-09-18 18:05:32 +00:00
// Compares an Ingress of a potential alternative backend's rules with each existing server and finds matching host + path pairs.
// If a match is found, we know that this server should back the alternative backend and add the alternative backend
// to a backend's alternative list.
// If no match is found, then the serverless backend is deleted.
2018-11-19 21:52:10 +00:00
func mergeAlternativeBackends ( ing * ingress . Ingress , upstreams map [ string ] * ingress . Backend ,
2018-09-18 18:05:32 +00:00
servers map [ string ] * ingress . Server ) {
// merge catch-all alternative backends
2021-08-21 20:42:00 +00:00
if ing . Spec . DefaultBackend != nil {
upsName := upstreamName ( ing . Namespace , ing . Spec . DefaultBackend . Service )
2018-09-18 18:05:32 +00:00
2018-11-20 22:13:21 +00:00
altUps := upstreams [ upsName ]
2019-03-12 12:52:52 +00:00
if altUps == nil {
klog . Warningf ( "alternative backend %s has already been removed" , upsName )
} else {
merged := false
2019-08-29 06:32:43 +00:00
altEqualsPri := false
2018-11-20 22:13:21 +00:00
2019-03-12 12:52:52 +00:00
for _ , loc := range servers [ defServerName ] . Locations {
priUps := upstreams [ loc . Backend ]
2019-08-29 06:32:43 +00:00
altEqualsPri = altUps . Name == priUps . Name
if altEqualsPri {
klog . Warningf ( "alternative upstream %s in Ingress %s/%s is primary upstream in Other Ingress for location %s%s!" ,
altUps . Name , ing . Namespace , ing . Name , servers [ defServerName ] . Hostname , loc . Path )
break
}
2018-09-18 18:05:32 +00:00
2019-03-12 12:52:52 +00:00
if canMergeBackend ( priUps , altUps ) {
klog . V ( 2 ) . Infof ( "matching backend %v found for alternative backend %v" ,
priUps . Name , altUps . Name )
2018-09-18 18:05:32 +00:00
2021-07-29 21:23:19 +00:00
merged = mergeAlternativeBackend ( ing , priUps , altUps )
2019-03-12 12:52:52 +00:00
}
2018-11-13 18:20:15 +00:00
}
2018-11-20 22:13:21 +00:00
2019-08-29 06:32:43 +00:00
if ! altEqualsPri && ! merged {
2019-03-12 12:52:52 +00:00
klog . Warningf ( "unable to find real backend for alternative backend %v. Deleting." , altUps . Name )
delete ( upstreams , altUps . Name )
}
2018-11-20 22:13:21 +00:00
}
2018-09-18 18:05:32 +00:00
}
for _ , rule := range ing . Spec . Rules {
2021-05-11 10:04:23 +00:00
host := rule . Host
if host == "" {
host = defServerName
}
2018-09-18 18:05:32 +00:00
for _ , path := range rule . HTTP . Paths {
2021-08-21 20:42:00 +00:00
if path . Backend . Service == nil {
// skip non-service backends
klog . V ( 3 ) . Infof ( "Ingress %q and path %q does not contain a service backend, using default backend" , k8s . MetaNamespaceKey ( ing ) , path . Path )
continue
}
upsName := upstreamName ( ing . Namespace , path . Backend . Service )
2018-09-18 18:05:32 +00:00
2018-11-20 22:13:21 +00:00
altUps := upstreams [ upsName ]
2018-09-18 18:05:32 +00:00
2019-03-04 11:20:54 +00:00
if altUps == nil {
2019-03-12 12:52:52 +00:00
klog . Warningf ( "alternative backend %s has already been removed" , upsName )
2019-03-04 11:20:54 +00:00
continue
}
2018-09-18 18:05:32 +00:00
merged := false
2019-08-29 06:32:43 +00:00
altEqualsPri := false
2018-09-18 18:05:32 +00:00
2021-05-11 10:04:23 +00:00
server , ok := servers [ host ]
2018-11-20 22:13:21 +00:00
if ! ok {
2018-12-05 16:27:55 +00:00
klog . Errorf ( "cannot merge alternative backend %s into hostname %s that does not exist" ,
2018-11-20 22:13:21 +00:00
altUps . Name ,
2021-05-11 10:04:23 +00:00
host )
2018-11-20 22:13:21 +00:00
continue
}
2018-09-18 18:05:32 +00:00
// find matching paths
2018-11-20 22:13:21 +00:00
for _ , loc := range server . Locations {
priUps := upstreams [ loc . Backend ]
2019-08-29 06:32:43 +00:00
altEqualsPri = altUps . Name == priUps . Name
if altEqualsPri {
klog . Warningf ( "alternative upstream %s in Ingress %s/%s is primary upstream in Other Ingress for location %s%s!" ,
altUps . Name , ing . Namespace , ing . Name , server . Hostname , loc . Path )
break
}
2018-09-18 18:05:32 +00:00
2020-04-22 16:24:57 +00:00
if canMergeBackend ( priUps , altUps ) && loc . Path == path . Path && * loc . PathType == * path . PathType {
2019-02-07 18:22:02 +00:00
klog . V ( 2 ) . Infof ( "matching backend %v found for alternative backend %v" ,
2018-11-20 22:13:21 +00:00
priUps . Name , altUps . Name )
2018-09-18 18:05:32 +00:00
2021-07-29 21:23:19 +00:00
merged = mergeAlternativeBackend ( ing , priUps , altUps )
2018-09-18 18:05:32 +00:00
}
}
2019-08-29 06:32:43 +00:00
if ! altEqualsPri && ! merged {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "unable to find real backend for alternative backend %v. Deleting." , altUps . Name )
2018-11-20 22:13:21 +00:00
delete ( upstreams , altUps . Name )
2018-09-18 18:05:32 +00:00
}
}
}
}
2018-06-13 18:15:45 +00:00
// extractTLSSecretName returns the name of the Secret containing a SSL
// certificate for the given host name, or an empty string.
2018-11-19 21:52:10 +00:00
func extractTLSSecretName ( host string , ing * ingress . Ingress ,
2018-04-21 21:25:53 +00:00
getLocalSSLCert func ( string ) ( * ingress . SSLCert , error ) ) string {
2018-06-13 18:15:45 +00:00
2018-04-21 21:25:53 +00:00
if ing == nil {
return ""
}
2018-06-13 18:15:45 +00:00
// naively return Secret name from TLS spec if host name matches
2020-04-28 09:07:04 +00:00
lowercaseHost := toLowerCaseASCII ( host )
2018-04-21 21:25:53 +00:00
for _ , tls := range ing . Spec . TLS {
2020-04-28 09:07:04 +00:00
for _ , tlsHost := range tls . Hosts {
if toLowerCaseASCII ( tlsHost ) == lowercaseHost {
return tls . SecretName
}
2018-04-21 21:25:53 +00:00
}
}
2018-06-24 21:34:25 +00:00
// no TLS host matching host name, try each TLS host for matching SAN or CN
2018-04-21 21:25:53 +00:00
for _ , tls := range ing . Spec . TLS {
2018-09-06 10:35:16 +00:00
if tls . SecretName == "" {
// There's no secretName specified, so it will never be available
continue
}
2018-07-02 20:59:54 +00:00
secrKey := fmt . Sprintf ( "%v/%v" , ing . Namespace , tls . SecretName )
cert , err := getLocalSSLCert ( secrKey )
2018-04-21 21:25:53 +00:00
if err != nil {
2018-12-05 16:27:55 +00:00
klog . Warningf ( "Error getting SSL certificate %q: %v" , secrKey , err )
2018-04-21 21:25:53 +00:00
continue
}
2021-08-24 01:58:14 +00:00
if cert == nil || cert . Certificate == nil {
2018-04-21 21:25:53 +00:00
continue
}
2018-06-24 21:34:25 +00:00
err = cert . Certificate . VerifyHostname ( host )
if err != nil {
continue
2018-04-21 21:25:53 +00:00
}
2018-12-05 16:27:55 +00:00
klog . V ( 3 ) . Infof ( "Found SSL certificate matching host %q: %q" , host , secrKey )
2018-06-24 21:34:25 +00:00
return tls . SecretName
2018-04-21 21:25:53 +00:00
}
return ""
}
2018-07-07 17:46:18 +00:00
2019-02-05 14:28:37 +00:00
// checks conditions for whether or not an upstream should be created for a custom default backend
func shouldCreateUpstreamForLocationDefaultBackend ( upstream * ingress . Backend , location * ingress . Location ) bool {
return ( upstream . Name == location . Backend ) &&
( len ( upstream . Endpoints ) == 0 || len ( location . CustomHTTPErrors ) != 0 ) &&
location . DefaultBackend != nil
}
2020-02-21 02:06:05 +00:00
func externalNamePorts ( name string , svc * apiv1 . Service ) * apiv1 . ServicePort {
2020-12-04 12:40:42 +00:00
port , err := strconv . Atoi ( name ) // #nosec
2020-02-21 02:06:05 +00:00
if err != nil {
// not a number. check port names.
for _ , svcPort := range svc . Spec . Ports {
if svcPort . Name != name {
continue
}
tp := svcPort . TargetPort
if tp . IntValue ( ) == 0 {
tp = intstr . FromInt ( int ( svcPort . Port ) )
}
return & apiv1 . ServicePort {
Protocol : "TCP" ,
Port : svcPort . Port ,
TargetPort : tp ,
}
}
}
for _ , svcPort := range svc . Spec . Ports {
if svcPort . Port != int32 ( port ) {
continue
}
tp := svcPort . TargetPort
if tp . IntValue ( ) == 0 {
tp = intstr . FromInt ( port )
}
return & apiv1 . ServicePort {
Protocol : "TCP" ,
Port : svcPort . Port ,
TargetPort : svcPort . TargetPort ,
}
}
// ExternalName without port
return & apiv1 . ServicePort {
Protocol : "TCP" ,
Port : int32 ( port ) ,
TargetPort : intstr . FromInt ( port ) ,
}
}
2020-09-25 21:45:13 +00:00
func checkOverlap ( ing * networking . Ingress , ingresses [ ] * ingress . Ingress , servers [ ] * ingress . Server ) error {
for _ , rule := range ing . Spec . Rules {
if rule . HTTP == nil {
continue
}
if rule . Host == "" {
rule . Host = defServerName
}
for _ , path := range rule . HTTP . Paths {
2021-08-21 20:42:00 +00:00
if path . Backend . Service == nil {
// skip non-service backends
klog . V ( 3 ) . Infof ( "Ingress %q and path %q does not contain a service backend, using default backend" , k8s . MetaNamespaceKey ( ing ) , path . Path )
continue
}
2020-09-25 21:45:13 +00:00
if path . Path == "" {
path . Path = rootLocation
}
existingIngresses := ingressForHostPath ( rule . Host , path . Path , servers )
// no previous ingress
if len ( existingIngresses ) == 0 {
continue
}
// same ingress
for _ , existing := range existingIngresses {
if existing . ObjectMeta . Namespace == ing . ObjectMeta . Namespace && existing . ObjectMeta . Name == ing . ObjectMeta . Name {
return nil
}
}
// path overlap. Check if one of the ingresses has a canary annotation
isCanaryEnabled , annotationErr := parser . GetBoolAnnotation ( "canary" , ing )
for _ , existing := range existingIngresses {
isExistingCanaryEnabled , existingAnnotationErr := parser . GetBoolAnnotation ( "canary" , existing )
if isCanaryEnabled && isExistingCanaryEnabled {
return fmt . Errorf ( ` host "%s" and path "%s" is already defined in ingress %s/%s ` , rule . Host , path . Path , existing . Namespace , existing . Name )
}
2021-10-03 21:05:07 +00:00
if annotationErr == errors . ErrMissingAnnotations && existingAnnotationErr == errors . ErrMissingAnnotations {
2020-09-25 21:45:13 +00:00
return fmt . Errorf ( ` host "%s" and path "%s" is already defined in ingress %s/%s ` , rule . Host , path . Path , existing . Namespace , existing . Name )
}
}
// no overlap
return nil
}
}
return nil
}
func ingressForHostPath ( hostname , path string , servers [ ] * ingress . Server ) [ ] * networking . Ingress {
ingresses := make ( [ ] * networking . Ingress , 0 )
for _ , server := range servers {
if hostname != server . Hostname {
continue
}
for _ , location := range server . Locations {
if location . Path != path {
continue
}
2021-06-21 19:32:51 +00:00
if location . IsDefBackend {
continue
}
2020-09-25 21:45:13 +00:00
ingresses = append ( ingresses , & location . Ingress . Ingress )
}
}
return ingresses
}
2021-12-23 19:46:30 +00:00
func ( n * NGINXController ) getStreamSnippets ( ingresses [ ] * ingress . Ingress ) [ ] string {
snippets := make ( [ ] string , 0 , len ( ingresses ) )
for _ , i := range ingresses {
if i . ParsedAnnotations . StreamSnippet == "" {
continue
}
snippets = append ( snippets , i . ParsedAnnotations . StreamSnippet )
}
return snippets
}
2023-03-21 13:47:09 +00:00
// newTrafficShapingPolicy creates new ingress.TrafficShapingPolicy instance using canary configuration
func newTrafficShapingPolicy ( cfg canary . Config ) ingress . TrafficShapingPolicy {
return ingress . TrafficShapingPolicy {
Weight : cfg . Weight ,
WeightTotal : cfg . WeightTotal ,
Header : cfg . Header ,
HeaderValue : cfg . HeaderValue ,
HeaderPattern : cfg . HeaderPattern ,
Cookie : cfg . Cookie ,
}
}