Clarify log messages in controller pkg

This commit is contained in:
Antoine Cotten 2018-06-13 20:15:45 +02:00
parent 55679aa268
commit 47bea35492
No known key found for this signature in database
GPG key ID: 94637E68D4A79DD0
6 changed files with 254 additions and 273 deletions

View file

@ -61,15 +61,15 @@ type Configuration struct {
ForceNamespaceIsolation bool
// optional
// +optional
TCPConfigMapName string
// optional
// +optional
UDPConfigMapName string
DefaultHealthzURL string
DefaultSSLCertificate string
// optional
// +optional
PublishService string
PublishStatusAddress string
@ -98,7 +98,7 @@ type Configuration struct {
DisableLua bool
}
// GetPublishService returns the configured service used to set ingress status
// GetPublishService returns the Service used to set the load-balancer status of Ingresses.
func (n NGINXController) GetPublishService() *apiv1.Service {
s, err := n.store.GetService(n.cfg.PublishService)
if err != nil {
@ -108,9 +108,9 @@ func (n NGINXController) GetPublishService() *apiv1.Service {
return s
}
// sync collects all the pieces required to assemble the configuration file and
// then sends the content to the backend (OnUpdate) receiving the populated
// template as response reloading the backend if is required.
// syncIngress collects all the pieces required to assemble the NGINX
// configuration file and passes the resulting data structures to the backend
// (OnUpdate) when a reload is deemed necessary.
func (n *NGINXController) syncIngress(interface{}) error {
n.syncRateLimiter.Accept()
@ -118,7 +118,7 @@ func (n *NGINXController) syncIngress(interface{}) error {
return nil
}
// Sort ingress rules using the ResourceVersion field
// sort Ingresses using the ResourceVersion field
ings := n.store.ListIngresses()
sort.SliceStable(ings, func(i, j int) bool {
ir := ings[i].ResourceVersion
@ -136,7 +136,7 @@ func (n *NGINXController) syncIngress(interface{}) error {
for _, loc := range server.Locations {
if loc.Path != rootLocation {
glog.Warningf("ignoring path %v of ssl passthrough host %v", loc.Path, server.Hostname)
glog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname)
continue
}
passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{
@ -158,24 +158,24 @@ func (n *NGINXController) syncIngress(interface{}) error {
}
if !n.isForceReload() && n.runningConfig.Equal(&pcfg) {
glog.V(3).Infof("skipping backend reload (no changes detected)")
glog.V(3).Infof("No configuration change detected, skipping backend reload.")
return nil
}
if n.cfg.DynamicConfigurationEnabled && n.IsDynamicConfigurationEnough(&pcfg) && !n.isForceReload() {
glog.Infof("skipping reload")
glog.Infof("Changes handled by the dynamic configuration, skipping backend reload.")
} else {
glog.Infof("backend reload required")
glog.Infof("Configuration changes detected, backend reload required.")
err := n.OnUpdate(pcfg)
if err != nil {
IncReloadErrorCount()
ConfigSuccess(false)
glog.Errorf("unexpected failure restarting the backend: \n%v", err)
glog.Errorf("Unexpected failure reloading the backend:\n%v", err)
return err
}
glog.Infof("ingress backend successfully reloaded...")
glog.Infof("Backend successfully reloaded.")
ConfigSuccess(true)
IncReloadCount()
setSSLExpireTime(servers)
@ -185,16 +185,16 @@ func (n *NGINXController) syncIngress(interface{}) error {
isFirstSync := n.runningConfig.Equal(&ingress.Configuration{})
go func(isFirstSync bool) {
if isFirstSync {
glog.Infof("first sync of Nginx configuration")
glog.Infof("Initial synchronization of the NGINX configuration.")
// it takes time for Nginx to start listening on the port
// it takes time for NGINX to start listening on the configured ports
time.Sleep(1 * time.Second)
}
err := configureDynamically(&pcfg, n.cfg.ListenPorts.Status)
if err == nil {
glog.Infof("dynamic reconfiguration succeeded")
glog.Infof("Dynamic reconfiguration succeeded.")
} else {
glog.Warningf("could not dynamically reconfigure: %v", err)
glog.Warningf("Dynamic reconfiguration failed: %v", err)
}
}(isFirstSync)
}
@ -206,28 +206,25 @@ func (n *NGINXController) syncIngress(interface{}) error {
}
func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Protocol) []ingress.L4Service {
glog.V(3).Infof("obtaining information about stream services of type %v located in configmap %v", proto, configmapName)
glog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName)
if configmapName == "" {
// no configmap configured
return []ingress.L4Service{}
}
_, _, err := k8s.ParseNameNS(configmapName)
if err != nil {
glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err)
glog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err)
return []ingress.L4Service{}
}
configmap, err := n.store.GetConfigMap(configmapName)
if err != nil {
glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err)
glog.Errorf("Error reading ConfigMap %q: %v", configmapName, err)
return []ingress.L4Service{}
}
var svcs []ingress.L4Service
var svcProxyProtocol ingress.ProxyProtocol
// k -> port to expose
// v -> <namespace>/<service name>:<port from service to be used>
rp := []int{
n.cfg.ListenPorts.HTTP,
@ -239,21 +236,22 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
}
reserverdPorts := sets.NewInt(rp...)
for k, v := range configmap.Data {
externalPort, err := strconv.Atoi(k)
// svcRef format: <(str)namespace>/<(str)service>:<(intstr)port>[:<(bool)decode>:<(bool)encode>]
for port, svcRef := range configmap.Data {
externalPort, err := strconv.Atoi(port)
if err != nil {
glog.Warningf("%v is not valid as a TCP/UDP port", k)
glog.Warningf("%q is not a valid %v port number", port, proto)
continue
}
if reserverdPorts.Has(externalPort) {
glog.Warningf("port %v cannot be used for TCP or UDP services. It is reserved for the Ingress controller", k)
glog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto)
continue
}
nsSvcPort := strings.Split(v, ":")
nsSvcPort := strings.Split(svcRef, ":")
if len(nsSvcPort) < 2 {
glog.Warningf("invalid format (namespace/name:port:[PROXY]:[PROXY]) '%v'", k)
glog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort)
continue
}
@ -262,7 +260,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
svcProxyProtocol.Decode = false
svcProxyProtocol.Encode = false
// Proxy protocol is possible if the service is TCP
// Proxy Protocol is only compatible with TCP Services
if len(nsSvcPort) >= 3 && proto == apiv1.ProtocolTCP {
if len(nsSvcPort) >= 3 && strings.ToUpper(nsSvcPort[2]) == "PROXY" {
svcProxyProtocol.Decode = true
@ -280,14 +278,15 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
svc, err := n.store.GetService(nsName)
if err != nil {
glog.Warningf("error getting service %v: %v", nsName, err)
glog.Warningf("Error getting Service %q from local store: %v", nsName, err)
continue
}
var endps []ingress.Endpoint
targetPort, err := strconv.Atoi(svcPort)
if err != nil {
glog.V(3).Infof("searching service %v endpoints using the name '%v'", svcNs, svcName, svcPort)
// not a port number, fall back to using port name
glog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName)
for _, sp := range svc.Spec.Ports {
if sp.Name == svcPort {
if sp.Protocol == proto {
@ -297,8 +296,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
}
}
} else {
// we need to use the TargetPort (where the endpoints are running)
glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v'", svcNs, svcName, targetPort)
glog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName)
for _, sp := range svc.Spec.Ports {
if sp.Port == int32(targetPort) {
if sp.Protocol == proto {
@ -309,10 +307,10 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
}
}
// stream services cannot contain empty upstreams and there is no
// default backend equivalent
// stream services cannot contain empty upstreams and there is
// no default backend equivalent
if len(endps) == 0 {
glog.Warningf("service %v/%v does not have any active endpoints for port %v and protocol %v", svcNs, svcName, svcPort, proto)
glog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort)
continue
}
@ -332,9 +330,8 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
return svcs
}
// getDefaultUpstream returns an upstream associated with the
// default backend service. In case of error retrieving information
// configure the upstream to return http code 503.
// getDefaultUpstream returns the upstream associated with the default backend.
// Configures the upstream to return HTTP code 503 in case of error.
func (n *NGINXController) getDefaultUpstream() *ingress.Backend {
upstream := &ingress.Backend{
Name: defUpstreamName,
@ -342,14 +339,14 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend {
svcKey := n.cfg.DefaultService
svc, err := n.store.GetService(svcKey)
if err != nil {
glog.Warningf("unexpected error searching the default backend %v: %v", n.cfg.DefaultService, err)
glog.Warningf("Unexpected error getting default backend %q from local store: %v", n.cfg.DefaultService, err)
upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint())
return upstream
}
endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints)
if len(endps) == 0 {
glog.Warningf("service %v does not have any active endpoints", svcKey)
glog.Warningf("Service %q does not have any active Endpoint", svcKey)
endps = []ingress.Endpoint{n.DefaultEndpoint()}
}
@ -358,8 +355,9 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend {
return upstream
}
// getBackendServers returns a list of Upstream and Server to be used by the backend
// An upstream can be used in multiple servers if the namespace, service name and port are the same
// getBackendServers returns a list of Upstream and Server to be used by the
// backend. An upstream can be used in multiple servers if the namespace,
// service name and port are the same.
func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]*ingress.Backend, []*ingress.Server) {
du := n.getDefaultUpstream()
upstreams := n.createUpstreams(ingresses, du)
@ -368,7 +366,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
for _, ing := range ingresses {
anns, err := n.store.GetIngressAnnotations(ing)
if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err)
glog.Errorf("Unexpected error reading annotations for Ingress %q from local store: %v", ing.Name, err)
}
for _, rule := range ing.Spec.Rules {
@ -383,7 +381,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
if rule.HTTP == nil &&
host != defServerName {
glog.V(3).Infof("ingress rule %v/%v does not contain HTTP rules, using default backend", ing.Namespace, ing.Name)
glog.V(3).Infof("Ingress \"%v/%v\" does not contain any HTTP rule, using default backend.", ing.Namespace, ing.Name)
continue
}
@ -393,23 +391,21 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
if server.CertificateAuth.CAFileName == "" {
server.CertificateAuth = anns.CertificateAuth
// It is possible that no CAFileName is found in the secret
if server.CertificateAuth.CAFileName == "" {
glog.V(3).Infof("secret %v does not contain 'ca.crt', mutual authentication not enabled - ingress rule %v/%v.", server.CertificateAuth.Secret, ing.Namespace, ing.Name)
if server.CertificateAuth.Secret != "" && server.CertificateAuth.CAFileName == "" {
glog.V(3).Infof("Secret %q does not contain 'ca.crt' key, mutual authentication disabled for Ingress \"%v/%v\"", server.CertificateAuth.Secret, ing.Namespace, ing.Name)
}
} else {
glog.V(3).Infof("server %v already contains a mutual authentication configuration - ingress rule %v/%v", server.Hostname, ing.Namespace, ing.Name)
glog.V(3).Infof("Server %v is already configured for mutual authentication (Ingress \"%v/%v\")", server.Hostname, ing.Namespace, ing.Name)
}
for _, path := range rule.HTTP.Paths {
upsName := fmt.Sprintf("%v-%v-%v",
ing.GetNamespace(),
ing.Namespace,
path.Backend.ServiceName,
path.Backend.ServicePort.String())
ups := upstreams[upsName]
// if there's no path defined we assume /
nginxPath := rootLocation
if path.Path != "" {
nginxPath = path.Path
@ -421,11 +417,11 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
addLoc = false
if !loc.IsDefBackend {
glog.V(3).Infof("avoiding replacement of ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend)
glog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress \"%v/%v\")", loc.Path, server.Hostname, loc.Backend, ing.Namespace, ing.Name)
break
}
glog.V(3).Infof("replacing ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend)
glog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress \"%v/%v\")", loc.Path, server.Hostname, loc.Backend, ups.Name, ing.Namespace, ing.Name)
loc.Backend = ups.Name
loc.IsDefBackend = false
loc.Port = ups.Port
@ -459,9 +455,10 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
break
}
}
// is a new location
// new location
if addLoc {
glog.V(3).Infof("adding location %v in ingress rule %v/%v upstream %v", nginxPath, ing.Namespace, ing.Name, ups.Name)
glog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress \"%v/%v\")", nginxPath, server.Hostname, ups.Name, ing.Namespace, ing.Name)
loc := &ingress.Location{
Path: nginxPath,
Backend: ups.Name,
@ -525,15 +522,15 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
for _, location := range server.Locations {
if upstream.Name == location.Backend {
if len(upstream.Endpoints) == 0 {
glog.V(3).Infof("upstream %v does not have any active endpoints.", upstream.Name)
glog.V(3).Infof("Upstream %q does not have any active endpoints.", upstream.Name)
// check if the location contains endpoints and a custom default backend
if location.DefaultBackend != nil {
sp := location.DefaultBackend.Spec.Ports[0]
endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints)
if len(endps) > 0 {
glog.V(3).Infof("using custom default backend in server %v location %v (service %v/%v)",
server.Hostname, location.Path, location.DefaultBackend.Namespace, location.DefaultBackend.Name)
glog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")",
location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name)
nb := upstream.DeepCopy()
name := fmt.Sprintf("custom-default-backend-%v", upstream.Name)
nb.Name = name
@ -544,14 +541,12 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
}
}
// Configure Backends[].SSLPassthrough
if server.SSLPassthrough {
if location.Path == rootLocation {
if location.Backend == defUpstreamName {
glog.Warningf("ignoring ssl passthrough of %v as it doesn't have a default backend (root context)", server.Hostname)
glog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname)
continue
}
isHTTPSfrom = append(isHTTPSfrom, server)
}
}
@ -564,7 +559,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
}
}
// create the list of upstreams and skip those without endpoints
// create the list of upstreams and skip those without Endpoints
for _, upstream := range upstreams {
if len(upstream.Endpoints) == 0 {
continue
@ -591,8 +586,8 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
return aUpstreams, aServers
}
// createUpstreams creates the NGINX upstreams for each service referenced in
// Ingress rules. The servers inside the upstream are endpoints.
// createUpstreams creates the NGINX upstreams (Endpoints) for each Service
// referenced in Ingress rules.
func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingress.Backend) map[string]*ingress.Backend {
upstreams := make(map[string]*ingress.Backend)
upstreams[defUpstreamName] = du
@ -600,17 +595,17 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
for _, ing := range data {
anns, err := n.store.GetIngressAnnotations(ing)
if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err)
glog.Errorf("Error reading Ingress annotations: %v", err)
}
var defBackend string
if ing.Spec.Backend != nil {
defBackend = fmt.Sprintf("%v-%v-%v",
ing.GetNamespace(),
ing.Namespace,
ing.Spec.Backend.ServiceName,
ing.Spec.Backend.ServicePort.String())
glog.V(3).Infof("creating upstream %v", defBackend)
glog.V(3).Infof("Creating upstream %q", defBackend)
upstreams[defBackend] = newUpstream(defBackend)
if !upstreams[defBackend].Secure {
upstreams[defBackend].Secure = anns.SecureUpstream.Secure
@ -625,14 +620,13 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
upstreams[defBackend].LoadBalancing = anns.LoadBalancing
}
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName)
svcKey := fmt.Sprintf("%v/%v", ing.Namespace, ing.Spec.Backend.ServiceName)
// Add the service cluster endpoint as the upstream instead of individual endpoints
// if the serviceUpstream annotation is enabled
// add the service ClusterIP as a single Endpoint instead of individual Endpoints
if anns.ServiceUpstream {
endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend)
if err != nil {
glog.Errorf("Failed to get service cluster endpoint for service %s: %v", svcKey, err)
glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err)
} else {
upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint}
}
@ -642,7 +636,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String(), &anns.HealthCheck)
upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...)
if err != nil {
glog.Warningf("error creating upstream %v: %v", defBackend, err)
glog.Warningf("Error creating upstream %q: %v", defBackend, err)
}
}
@ -655,7 +649,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
for _, path := range rule.HTTP.Paths {
name := fmt.Sprintf("%v-%v-%v",
ing.GetNamespace(),
ing.Namespace,
path.Backend.ServiceName,
path.Backend.ServicePort.String())
@ -663,7 +657,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
continue
}
glog.V(3).Infof("creating upstream %v", name)
glog.V(3).Infof("Creating upstream %q", name)
upstreams[name] = newUpstream(name)
upstreams[name].Port = path.Backend.ServicePort
@ -683,14 +677,13 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
upstreams[name].LoadBalancing = anns.LoadBalancing
}
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
svcKey := fmt.Sprintf("%v/%v", ing.Namespace, path.Backend.ServiceName)
// Add the service cluster endpoint as the upstream instead of individual endpoints
// if the serviceUpstream annotation is enabled
// add the service ClusterIP as a single Endpoint instead of individual Endpoints
if anns.ServiceUpstream {
endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend)
if err != nil {
glog.Errorf("failed to get service cluster endpoint for service %s: %v", svcKey, err)
glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err)
} else {
upstreams[name].Endpoints = []ingress.Endpoint{endpoint}
}
@ -699,7 +692,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
if len(upstreams[name].Endpoints) == 0 {
endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), &anns.HealthCheck)
if err != nil {
glog.Warningf("error obtaining service endpoints: %v", err)
glog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err)
continue
}
upstreams[name].Endpoints = endp
@ -707,7 +700,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
s, err := n.store.GetService(svcKey)
if err != nil {
glog.Warningf("error obtaining service: %v", err)
glog.Warningf("Error obtaining Service %q: %v", svcKey, err)
continue
}
@ -719,20 +712,22 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
return upstreams
}
// getServiceClusterEndpoint returns an Endpoint corresponding to the ClusterIP
// field of a Service.
func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) {
svc, err := n.store.GetService(svcKey)
if err != nil {
return endpoint, fmt.Errorf("service %v does not exist", svcKey)
return endpoint, fmt.Errorf("service %q does not exist", svcKey)
}
if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" {
return endpoint, fmt.Errorf("No ClusterIP found for service %s", svcKey)
return endpoint, fmt.Errorf("no ClusterIP found for Service %q", svcKey)
}
endpoint.Address = svc.Spec.ClusterIP
// If the service port in the ingress uses a name, lookup
// the actual port in the service spec
// if the Service port is referenced by name in the Ingress, lookup the
// actual port in the service spec
if backend.ServicePort.Type == intstr.String {
var port int32 = -1
for _, svcPort := range svc.Spec.Ports {
@ -742,7 +737,7 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte
}
}
if port == -1 {
return endpoint, fmt.Errorf("no port mapped for service %s and port name %s", svc.Name, backend.ServicePort.String())
return endpoint, fmt.Errorf("service %q does not have a port named %q", svc.Name, backend.ServicePort)
}
endpoint.Port = fmt.Sprintf("%d", port)
} else {
@ -752,27 +747,27 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte
return endpoint, err
}
// serviceEndpoints returns the upstream servers (endpoints) associated
// to a service.
// serviceEndpoints returns the upstream servers (Endpoints) associated with a
// Service.
func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
hz *healthcheck.Config) ([]ingress.Endpoint, error) {
svc, err := n.store.GetService(svcKey)
var upstreams []ingress.Endpoint
if err != nil {
return upstreams, fmt.Errorf("error getting service %v from the cache: %v", svcKey, err)
return upstreams, fmt.Errorf("error getting Service %q from local store: %v", svcKey, err)
}
glog.V(3).Infof("obtaining port information for service %v", svcKey)
glog.V(3).Infof("Obtaining ports information for Service %q", svcKey)
for _, servicePort := range svc.Spec.Ports {
// targetPort could be a string, use the name or the port (int)
// targetPort could be a string, use either the port name or number (int)
if strconv.Itoa(int(servicePort.Port)) == backendPort ||
servicePort.TargetPort.String() == backendPort ||
servicePort.Name == backendPort {
endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints)
if len(endps) == 0 {
glog.Warningf("service %v does not have any active endpoints", svcKey)
glog.Warningf("Service %q does not have any active Endpoint.", svcKey)
}
if n.cfg.SortBackends {
@ -791,11 +786,11 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
}
}
// Ingress with an ExternalName service and no port defined in the service.
// Ingress with an ExternalName Service and no port defined for that Service
if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName {
externalPort, err := strconv.Atoi(backendPort)
if err != nil {
glog.Warningf("only numeric ports are allowed in ExternalName services: %v is not valid as a TCP/UDP port", backendPort)
glog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort)
return upstreams, nil
}
@ -806,7 +801,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
}
endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints)
if len(endps) == 0 {
glog.Warningf("service %v does not have any active endpoints", svcKey)
glog.Warningf("Service %q does not have any active Endpoint.", svcKey)
return upstreams, nil
}
@ -825,17 +820,14 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
return upstreams, nil
}
// createServers initializes a map that contains information about the list of
// FDQN referenced by ingress rules and the common name field in the referenced
// SSL certificates. Each server is configured with location / using a default
// backend specified by the user or the one inside the ingress spec.
// createServers builds a map of host name to Server structs from a map of
// already computed Upstream structs. Each Server is configured with at least
// one root location, which uses a default backend if left unspecified.
func (n *NGINXController) createServers(data []*extensions.Ingress,
upstreams map[string]*ingress.Backend,
du *ingress.Backend) map[string]*ingress.Server {
servers := make(map[string]*ingress.Server, len(data))
// If a server has a hostname equivalent to a pre-existing alias, then we
// remove the alias to avoid conflicts.
aliases := make(map[string]string, len(data))
bdef := n.store.GetDefaultBackend()
@ -858,15 +850,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
defaultPemFileName := n.cfg.FakeCertificatePath
defaultPemSHA := n.cfg.FakeCertificateSHA
// Tries to fetch the default Certificate from nginx configuration.
// If it does not exists, use the ones generated on Start()
// read custom default SSL certificate, fall back to generated default certificate
defaultCertificate, err := n.store.GetLocalSSLCert(n.cfg.DefaultSSLCertificate)
if err == nil {
defaultPemFileName = defaultCertificate.PemFileName
defaultPemSHA = defaultCertificate.PemSHA
}
// initialize the default server
// initialize default server and root location
servers[defServerName] = &ingress.Server{
Hostname: defServerName,
SSLCertificate: defaultPemFileName,
@ -881,33 +872,34 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
},
}}
// initialize all the servers
// initialize all other servers
for _, ing := range data {
anns, err := n.store.GetIngressAnnotations(ing)
if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err)
glog.Errorf("Error reading Ingress %q annotations from local store: %v", ing.Name, err)
}
// default upstream server
// default upstream name
un := du.Name
if ing.Spec.Backend != nil {
// replace default backend
defUpstream := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String())
defUpstream := fmt.Sprintf("%v-%v-%v", ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String())
if backendUpstream, ok := upstreams[defUpstream]; ok {
// use backend specified in Ingress as the default backend for all its rules
un = backendUpstream.Name
// Special case:
// ingress only with a backend and no rules
// this case defines a "catch all" server
// special "catch all" case, Ingress with a backend but no rule
defLoc := servers[defServerName].Locations[0]
if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 {
glog.Infof("Ingress \"%v/%v\" defines a backend but no rule. Using it to configure the catch-all server %q", ing.Namespace, ing.Name, defServerName)
defLoc.IsDefBackend = false
defLoc.Backend = backendUpstream.Name
defLoc.Service = backendUpstream.Service
defLoc.Ingress = ing
// we need to use the ingress annotations
// customize using Ingress annotations
defLoc.Logs = anns.Logs
defLoc.BasicDigestAuth = anns.BasicDigestAuth
defLoc.ClientBodyBufferSize = anns.ClientBodyBufferSize
@ -916,7 +908,7 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
defLoc.ExternalAuth = anns.ExternalAuth
defLoc.Proxy = anns.Proxy
defLoc.RateLimit = anns.RateLimit
// TODO: Redirect and rewrite can affect the catch all behavior. Don't use this annotations for now
// TODO: Redirect and rewrite can affect the catch all behavior, skip for now
// defLoc.Redirect = anns.Redirect
// defLoc.Rewrite = anns.Rewrite
defLoc.UpstreamVhost = anns.UpstreamVhost
@ -926,6 +918,8 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
defLoc.GRPC = anns.GRPC
defLoc.LuaRestyWAF = anns.LuaRestyWAF
defLoc.InfluxDB = anns.InfluxDB
} else {
glog.V(3).Infof("Ingress \"%v/%v\" defines both a backend and rules. Using its backend as default upstream for all its rules.", ing.Namespace, ing.Name)
}
}
}
@ -961,7 +955,7 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
for _, ing := range data {
anns, err := n.store.GetIngressAnnotations(ing)
if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err)
glog.Errorf("Error reading Ingress %q annotations from local store: %v", ing.Name, err)
}
for _, rule := range ing.Spec.Rules {
@ -970,7 +964,6 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
host = defServerName
}
// setup server aliases
if anns.Alias != "" {
if servers[host].Alias == "" {
servers[host].Alias = anns.Alias
@ -978,23 +971,21 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
aliases["Alias"] = host
}
} else {
glog.Warningf("ingress %v/%v for host %v contains an Alias but one has already been configured.",
ing.Namespace, ing.Name, host)
glog.Warningf("Aliases already configured for server %q, skipping (Ingress \"%v/%v\")",
host, ing.Namespace, ing.Name)
}
}
//notifying the user that it has already been configured.
if servers[host].ServerSnippet != "" && anns.ServerSnippet != "" {
glog.Warningf("ingress %v/%v for host %v contains a Server Snippet section that it has already been configured.",
ing.Namespace, ing.Name, host)
}
// only add a server snippet if the server does not have one previously configured
if servers[host].ServerSnippet == "" && anns.ServerSnippet != "" {
if anns.ServerSnippet != "" {
if servers[host].ServerSnippet == "" {
servers[host].ServerSnippet = anns.ServerSnippet
} else {
glog.Warningf("Server snippet already configured for server %q, skipping (Ingress \"%v/%v\")",
host, ing.Namespace, ing.Name)
}
}
// only add ssl ciphers if the server does not have one previously configured
// only add SSL ciphers if the server does not have them previously configured
if servers[host].SSLCiphers == "" && anns.SSLCiphers != "" {
servers[host].SSLCiphers = anns.SSLCiphers
}
@ -1005,14 +996,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
}
if len(ing.Spec.TLS) == 0 {
glog.V(3).Infof("ingress %v/%v for host %v does not contains a TLS section", ing.Namespace, ing.Name, host)
glog.V(3).Infof("Ingress \"%v/%v\" does not contains a TLS section.", ing.Namespace, ing.Name)
continue
}
tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert)
if tlsSecretName == "" {
glog.V(3).Infof("host %v is listed on tls section but secretName is empty. Using default cert", host)
glog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host)
servers[host].SSLCertificate = defaultPemFileName
servers[host].SSLPemChecksum = defaultPemSHA
continue
@ -1021,19 +1012,19 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
key := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName)
cert, err := n.store.GetLocalSSLCert(key)
if err != nil {
glog.Warningf("ssl certificate \"%v\" does not exist in local store", key)
glog.Warningf("SSL certificate %q does not exist in local store.", key)
continue
}
err = cert.Certificate.VerifyHostname(host)
if err != nil {
glog.Warningf("unexpected error validating SSL certificate %v for host %v. Reason: %v", key, host, err)
glog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", key, host, err)
glog.Warningf("Validating certificate against DNS names. This will be deprecated in a future version.")
// check the common name field
// check the Common Name field
// https://github.com/golang/go/issues/22922
err := verifyHostname(host, cert.Certificate)
if err != nil {
glog.Warningf("ssl certificate %v does not contain a Common Name or Subject Alternative Name for host %v. Reason: %v", key, host, err)
glog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v", key, host, err)
continue
}
}
@ -1044,14 +1035,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
servers[host].SSLExpireTime = cert.ExpireTime
if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) {
glog.Warningf("ssl certificate for host %v is about to expire in 10 days", host)
glog.Warningf("SSL certificate for server %q is about to expire (%v)", cert.ExpireTime)
}
}
}
for alias, host := range aliases {
if _, ok := servers[alias]; ok {
glog.Warningf("There is a conflict with server hostname '%v' and alias '%v' (in server %v). Removing alias to avoid conflicts.", alias, host)
glog.Warningf("Conflicting hostname (%v) and alias (%v) in server %q. Removing alias to avoid conflicts.", alias, host)
servers[host].Alias = ""
}
}
@ -1063,7 +1054,8 @@ func (n *NGINXController) isForceReload() bool {
return atomic.LoadInt32(&n.forceReload) != 0
}
// SetForceReload sets if the ingress controller should be reloaded or not
// SetForceReload sets whether the backend should be reloaded regardless of
// configuration changes.
func (n *NGINXController) SetForceReload(shouldReload bool) {
if shouldReload {
atomic.StoreInt32(&n.forceReload, 1)
@ -1073,29 +1065,28 @@ func (n *NGINXController) SetForceReload(shouldReload bool) {
}
}
// extractTLSSecretName returns the name of the secret that
// contains a SSL certificate for a particular hostname.
// In case there is no match, an empty string is returned.
// extractTLSSecretName returns the name of the Secret containing a SSL
// certificate for the given host name, or an empty string.
func extractTLSSecretName(host string, ing *extensions.Ingress,
getLocalSSLCert func(string) (*ingress.SSLCert, error)) string {
if ing == nil {
return ""
}
// naively return Secret name from TLS spec if host name matches
for _, tls := range ing.Spec.TLS {
if sets.NewString(tls.Hosts...).Has(host) {
return tls.SecretName
}
}
// contains a TLS section but none of the host match or there
// is no hosts in the TLS section. As last resort we valide
// the host against the certificate and we use it if is valid
// no TLS host matching host name, try each TLS host for matching CN
for _, tls := range ing.Spec.TLS {
key := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName)
cert, err := getLocalSSLCert(key)
if err != nil {
glog.Warningf("ssl certificate \"%v\" does not exist in local store", key)
glog.Warningf("SSL certificate %q does not exist in local store.", key)
continue
}

View file

@ -29,14 +29,9 @@ import (
"k8s.io/ingress-nginx/internal/ingress/annotations/healthcheck"
)
// getEndpoints returns a list of <endpoint ip>:<port> for a given service/target port combination.
func getEndpoints(
s *corev1.Service,
port *corev1.ServicePort,
proto corev1.Protocol,
hz *healthcheck.Config,
getServiceEndpoints func(*corev1.Service) (*corev1.Endpoints, error),
) []ingress.Endpoint {
// getEndpoints returns a list of Endpoint structs for a given service/target port combination.
func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Protocol, hz *healthcheck.Config,
getServiceEndpoints func(*corev1.Service) (*corev1.Endpoints, error)) []ingress.Endpoint {
upsServers := []ingress.Endpoint{}
@ -44,26 +39,24 @@ func getEndpoints(
return upsServers
}
// avoid duplicated upstream servers when the service
// contains multiple port definitions sharing the same
// targetport.
adus := make(map[string]bool)
// using a map avoids duplicated upstream servers when the service
// contains multiple port definitions sharing the same targetport
processedUpstreamServers := make(map[string]struct{})
// ExternalName services
if s.Spec.Type == corev1.ServiceTypeExternalName {
glog.V(3).Infof("Ingress using a service %v of type=ExternalName : %v", s.Name)
glog.V(3).Infof("Ingress using Service %q of type ExternalName.", s.Name)
targetPort := port.TargetPort.IntValue()
// check for invalid port value
if targetPort <= 0 {
glog.Errorf("ExternalName service with an invalid port: %v", targetPort)
glog.Errorf("ExternalName Service %q has an invalid port (%v)", s.Name, targetPort)
return upsServers
}
if net.ParseIP(s.Spec.ExternalName) == nil {
_, err := net.LookupHost(s.Spec.ExternalName)
if err != nil {
glog.Errorf("unexpected error resolving host %v: %v", s.Spec.ExternalName, err)
glog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, err)
return upsServers
}
}
@ -76,10 +69,10 @@ func getEndpoints(
})
}
glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, port.String())
glog.V(3).Infof("Getting Endpoints for Service \"%v/%v\" and port %v", s.Namespace, s.Name, port.String())
ep, err := getServiceEndpoints(s)
if err != nil {
glog.Warningf("unexpected error obtaining service endpoints: %v", err)
glog.Warningf("Error obtaining Endpoints for Service \"%v/%v\": %v", s.Namespace, s.Name, err)
return upsServers
}
@ -99,14 +92,13 @@ func getEndpoints(
targetPort = epPort.Port
}
// check for invalid port value
if targetPort <= 0 {
continue
}
for _, epAddress := range ss.Addresses {
ep := fmt.Sprintf("%v:%v", epAddress.IP, targetPort)
if _, exists := adus[ep]; exists {
if _, exists := processedUpstreamServers[ep]; exists {
continue
}
ups := ingress.Endpoint{
@ -117,11 +109,11 @@ func getEndpoints(
Target: epAddress.TargetRef,
}
upsServers = append(upsServers, ups)
adus[ep] = true
processedUpstreamServers[ep] = struct{}{}
}
}
}
glog.V(3).Infof("endpoints found: %v", upsServers)
glog.V(3).Infof("Endpoints found for Service \"%v/%v\": %v", s.Namespace, s.Name, upsServers)
return upsServers
}

View file

@ -77,8 +77,6 @@ var (
)
// NewNGINXController creates a new NGINX Ingress controller.
// If the environment variable NGINX_BINARY exists it will be used
// as source for nginx commands
func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController {
ngx := os.Getenv("NGINX_BINARY")
if ngx == "" {
@ -93,7 +91,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
h, err := dns.GetSystemNameServers()
if err != nil {
glog.Warningf("unexpected error reading system nameservers: %v", err)
glog.Warningf("Error reading system nameservers: %v", err)
}
n := &NGINXController{
@ -116,8 +114,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
fileSystem: fs,
// create an empty configuration.
runningConfig: &ingress.Configuration{},
runningConfig: new(ingress.Configuration),
Proxy: &TCPProxy{},
}
@ -153,7 +150,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
UseNodeInternalIP: config.UseNodeInternalIP,
})
} else {
glog.Warning("Update of ingress status is disabled (flag --update-status=false was specified)")
glog.Warning("Update of Ingress status is disabled (flag --update-status)")
}
onTemplateChange := func() {
@ -162,20 +159,20 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
// this error is different from the rest because it must be clear why nginx is not working
glog.Errorf(`
-------------------------------------------------------------------------------
Error loading new template : %v
Error loading new template: %v
-------------------------------------------------------------------------------
`, err)
return
}
n.t = template
glog.Info("new NGINX template loaded")
glog.Info("New NGINX configuration template loaded.")
n.SetForceReload(true)
}
ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs)
if err != nil {
glog.Fatalf("invalid NGINX template: %v", err)
glog.Fatalf("Invalid NGINX configuration template: %v", err)
}
n.t = ngxTpl
@ -187,7 +184,7 @@ Error loading new template : %v
_, err = watch.NewFileWatcher(tmplPath, onTemplateChange)
if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err)
glog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err)
}
filesToWatch := []string{}
@ -205,16 +202,16 @@ Error loading new template : %v
})
if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err)
glog.Fatalf("Error creating file watchers: %v", err)
}
for _, f := range filesToWatch {
_, err = watch.NewFileWatcher(f, func() {
glog.Info("file %v changed. Reloading NGINX", f)
glog.Info("File %v changed. Reloading NGINX", f)
n.SetForceReload(true)
})
if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err)
glog.Fatalf("Error creating file watcher for %v: %v", f, err)
}
}
@ -223,7 +220,7 @@ Error loading new template : %v
return n
}
// NGINXController ...
// NGINXController describes a NGINX Ingress controller.
type NGINXController struct {
cfg *Configuration
@ -237,15 +234,15 @@ type NGINXController struct {
syncRateLimiter flowcontrol.RateLimiter
// stopLock is used to enforce only a single call to Stop is active.
// Needed because we allow stopping through an http endpoint and
// stopLock is used to enforce that only a single call to Stop send at
// a given time. We allow stopping through an HTTP endpoint and
// allowing concurrent stoppers leads to stack traces.
stopLock *sync.Mutex
stopCh chan struct{}
updateCh *channels.RingChannel
// ngxErrCh channel used to detect errors with the nginx processes
// ngxErrCh is used to detect errors with the NGINX processes
ngxErrCh chan error
// runningConfig contains the running configuration in the Backend
@ -261,7 +258,6 @@ type NGINXController struct {
stats *statsCollector
statusModule statusModule
// returns true if IPV6 is enabled in the pod
isIPV6Enabled bool
isShuttingDown bool
@ -273,9 +269,9 @@ type NGINXController struct {
fileSystem filesystem.Filesystem
}
// Start start a new NGINX master process running in foreground.
// Start starts a new NGINX master process running in the foreground.
func (n *NGINXController) Start() {
glog.Infof("starting Ingress controller")
glog.Infof("Starting NGINX Ingress controller")
n.store.Run(n.stopCh)
@ -285,7 +281,7 @@ func (n *NGINXController) Start() {
cmd := exec.Command(n.binary, "-c", cfgPath)
// put nginx in another process group to prevent it
// put NGINX in another process group to prevent it
// to receive signals meant for the controller
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
@ -296,7 +292,7 @@ func (n *NGINXController) Start() {
n.setupSSLProxy()
}
glog.Info("starting NGINX process...")
glog.Info("Starting NGINX process")
n.start(cmd)
go n.syncQueue.Run(time.Second, n.stopCh)
@ -339,7 +335,7 @@ func (n *NGINXController) Start() {
n.syncQueue.Enqueue(evt.Obj)
} else {
glog.Warningf("unexpected event type received %T", event)
glog.Warningf("Unexpected event type received %T", event)
}
case <-n.stopCh:
break
@ -354,20 +350,19 @@ func (n *NGINXController) Stop() error {
n.stopLock.Lock()
defer n.stopLock.Unlock()
// Only try draining the workqueue if we haven't already.
if n.syncQueue.IsShuttingDown() {
return fmt.Errorf("shutdown already in progress")
}
glog.Infof("shutting down controller queues")
glog.Infof("Shutting down controller queues")
close(n.stopCh)
go n.syncQueue.Shutdown()
if n.syncStatus != nil {
n.syncStatus.Shutdown()
}
// Send stop signal to Nginx
glog.Info("stopping NGINX process...")
// send stop signal to NGINX
glog.Info("Stopping NGINX process")
cmd := exec.Command(n.binary, "-c", cfgPath, "-s", "quit")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@ -376,7 +371,7 @@ func (n *NGINXController) Stop() error {
return err
}
// Wait for the Nginx process disappear
// wait for the NGINX process to terminate
timer := time.NewTicker(time.Second * 1)
for range timer.C {
if !process.IsNginxRunning() {
@ -393,7 +388,7 @@ func (n *NGINXController) start(cmd *exec.Cmd) {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
glog.Fatalf("nginx error: %v", err)
glog.Fatalf("NGINX error: %v", err)
n.ngxErrCh <- err
return
}
@ -416,7 +411,7 @@ func (n NGINXController) DefaultEndpoint() ingress.Endpoint {
// running the command "nginx -t" using a temporal file.
func (n NGINXController) testTemplate(cfg []byte) error {
if len(cfg) == 0 {
return fmt.Errorf("invalid nginx configuration (empty)")
return fmt.Errorf("Invalid NGINX configuration (empty)")
}
tmpfile, err := ioutil.TempFile("", "nginx-cfg")
if err != nil {
@ -443,14 +438,10 @@ Error: %v
return nil
}
// OnUpdate is called periodically by syncQueue to keep the configuration in sync.
//
// 1. converts configmap configuration to custom configuration object
// 2. write the custom template (the complexity depends on the implementation)
// 3. write the configuration file
//
// returning nil implies the backend will be reloaded.
// if an error is returned means requeue the update
// OnUpdate is called by the synchronization loop whenever configuration
// changes were detected. The received backend Configuration is merged with the
// configuration ConfigMap before generating the final configuration file.
// Returns nil in case the backend was successfully reloaded.
func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
cfg := n.store.GetBackendConfiguration()
cfg.Resolver = n.resolver
@ -460,7 +451,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
for _, pb := range ingressCfg.PassthroughBackends {
svc := pb.Service
if svc == nil {
glog.Warningf("missing service for PassthroughBackends %v", pb.Backend)
glog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend)
continue
}
port, err := strconv.Atoi(pb.Port.String())
@ -480,7 +471,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
}
}
//TODO: Allow PassthroughBackends to specify they support proxy-protocol
// TODO: Allow PassthroughBackends to specify they support proxy-protocol
servers = append(servers, &TCPServer{
Hostname: pb.Hostname,
IP: svc.Spec.ClusterIP,
@ -499,10 +490,10 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
n.setupMonitor(defaultStatusModule)
}
// NGINX cannot resize the hash tables used to store server names.
// For this reason we check if the defined size defined is correct
// for the FQDN defined in the ingress rules adjusting the value
// if is required.
// NGINX cannot resize the hash tables used to store server names. For
// this reason we check if the current size is correct for the host
// names defined in the Ingress rules and adjust the value if
// necessary.
// https://trac.nginx.org/nginx/ticket/352
// https://trac.nginx.org/nginx/ticket/631
var longestName int
@ -520,7 +511,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
} else {
n = fmt.Sprintf("www.%v", srv.Hostname)
}
glog.V(3).Infof("creating redirect from %v to %v", srv.Hostname, n)
glog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n)
if _, ok := redirectServers[n]; !ok {
found := false
for _, esrv := range ingressCfg.Servers {
@ -537,24 +528,24 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
}
if cfg.ServerNameHashBucketSize == 0 {
nameHashBucketSize := nginxHashBucketSize(longestName)
glog.V(3).Infof("adjusting ServerNameHashBucketSize variable to %v", nameHashBucketSize)
glog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %q", nameHashBucketSize)
cfg.ServerNameHashBucketSize = nameHashBucketSize
}
serverNameHashMaxSize := nextPowerOf2(serverNameBytes)
if cfg.ServerNameHashMaxSize < serverNameHashMaxSize {
glog.V(3).Infof("adjusting ServerNameHashMaxSize variable to %v", serverNameHashMaxSize)
glog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %q", serverNameHashMaxSize)
cfg.ServerNameHashMaxSize = serverNameHashMaxSize
}
// the limit of open files is per worker process
// and we leave some room to avoid consuming all the FDs available
wp, err := strconv.Atoi(cfg.WorkerProcesses)
glog.V(3).Infof("number of worker processes: %v", wp)
glog.V(3).Infof("Number of worker processes: %d", wp)
if err != nil {
wp = 1
}
maxOpenFiles := (sysctlFSFileMax() / wp) - 1024
glog.V(2).Infof("maximum number of open file descriptors : %v", maxOpenFiles)
glog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles)
if maxOpenFiles < 1024 {
// this means the value of RLIMIT_NOFILE is too low.
maxOpenFiles = 1024
@ -564,7 +555,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.ProxySetHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders)
if err != nil {
glog.Warningf("unexpected error reading configmap %v: %v", cfg.ProxySetHeaders, err)
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err)
}
setHeaders = cmap.Data
@ -574,7 +565,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.AddHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.AddHeaders)
if err != nil {
glog.Warningf("unexpected error reading configmap %v: %v", cfg.AddHeaders, err)
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err)
}
addHeaders = cmap.Data
@ -586,7 +577,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
secret, err := n.store.GetSecret(secretName)
if err != nil {
glog.Warningf("unexpected error reading secret %v: %v", secretName, err)
glog.Warningf("Error reading Secret %q from local store: %v", secretName, err)
}
nsSecName := strings.Replace(secretName, "/", "-", -1)
@ -595,7 +586,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if ok {
pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem)
if err != nil {
glog.Warningf("unexpected error adding or updating dhparam %v file: %v", nsSecName, err)
glog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err)
} else {
sslDHParam = pemFileName
}
@ -652,16 +643,13 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return err
}
// executing diff can return exit code != 0
// TODO: executing diff can return exit code != 0
diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput()
glog.Infof("NGINX configuration diff\n")
glog.Infof("%v\n", string(diffOutput))
glog.Infof("NGINX configuration diff:\n%v", string(diffOutput))
// Do not use defer to remove the temporal file.
// This is helpful when there is an error in the
// temporal configuration (we can manually inspect the file).
// Only remove the file when no error occurred.
// we do not defer the deletion of temp files in order
// to keep them around for inspection in case of error
os.Remove(tmpfile.Name())
}
}
@ -679,9 +667,10 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return nil
}
// nginxHashBucketSize computes the correct nginx hash_bucket_size for a hash with the given longest key
// nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash
// with the given longest key.
func nginxHashBucketSize(longestString int) int {
// See https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation
// see https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation
wordSize := 8 // Assume 64 bit CPU
n := longestString + 2
aligned := (n + wordSize - 1) & ^(wordSize - 1)
@ -708,7 +697,7 @@ func (n *NGINXController) setupSSLProxy() {
sslPort := n.cfg.ListenPorts.HTTPS
proxyPort := n.cfg.ListenPorts.SSLProxy
glog.Info("starting TLS proxy for SSL passthrough")
glog.Info("Starting TLS proxy for SSL Passthrough")
n.Proxy = &TCPProxy{
Default: &TCPServer{
Hostname: "localhost",
@ -725,32 +714,33 @@ func (n *NGINXController) setupSSLProxy() {
proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout}
// start goroutine that accepts tcp connections in port 443
// accept TCP connections on the configured HTTPS port
go func() {
for {
var conn net.Conn
var err error
if n.store.GetBackendConfiguration().UseProxyProtocol {
// we need to wrap the listener in order to decode
// proxy protocol before handling the connection
// wrap the listener in order to decode Proxy
// Protocol before handling the connection
conn, err = proxyList.Accept()
} else {
conn, err = listener.Accept()
}
if err != nil {
glog.Warningf("unexpected error accepting tcp connection: %v", err)
glog.Warningf("Error accepting TCP connection: %v", err)
continue
}
glog.V(3).Infof("remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
glog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
go n.Proxy.Handle(conn)
}
}()
}
// IsDynamicConfigurationEnough decides if the new configuration changes can be dynamically applied without reloading
// IsDynamicConfigurationEnough returns whether a Configuration can be
// dynamically applied, without reloading the backend.
func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool {
copyOfRunningConfig := *n.runningConfig
copyOfPcfg := *pcfg
@ -761,8 +751,8 @@ func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configurati
return copyOfRunningConfig.Equal(&copyOfPcfg)
}
// configureDynamically JSON encodes new Backends and POSTs it to an internal HTTP endpoint
// that is handled by Lua
// configureDynamically encodes new Backends in JSON format and POSTs the
// payload to an internal HTTP endpoint handled by Lua.
func configureDynamically(pcfg *ingress.Configuration, port int) error {
backends := make([]*ingress.Backend, len(pcfg.Backends))
@ -796,7 +786,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error {
return err
}
glog.V(2).Infof("posting backends configuration: %s", buf)
glog.V(2).Infof("Posting backends configuration: %s", buf)
url := fmt.Sprintf("http://localhost:%d/configuration/backends", port)
resp, err := http.Post(url, "application/json", bytes.NewReader(buf))
@ -806,7 +796,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error {
defer func() {
if err := resp.Body.Close(); err != nil {
glog.Warningf("error while closing response body: \n%v", err)
glog.Warningf("Error while closing response body:\n%v", err)
}
}()

View file

@ -26,7 +26,7 @@ import (
"github.com/paultag/sniff/parser"
)
// TCPServer describes a server that works in passthrough mode
// TCPServer describes a server that works in passthrough mode.
type TCPServer struct {
Hostname string
IP string
@ -34,13 +34,13 @@ type TCPServer struct {
ProxyProtocol bool
}
// TCPProxy describes the passthrough servers and a default as catch all
// TCPProxy describes the passthrough servers and a default as catch all.
type TCPProxy struct {
ServerList []*TCPServer
Default *TCPServer
}
// Get returns the TCPServer to use
// Get returns the TCPServer to use for a given host.
func (p *TCPProxy) Get(host string) *TCPServer {
if p.ServerList == nil {
return p.Default
@ -63,19 +63,19 @@ func (p *TCPProxy) Handle(conn net.Conn) {
length, err := conn.Read(data)
if err != nil {
glog.V(4).Infof("error reading the first 4k of the connection: %s", err)
glog.V(4).Infof("Error reading the first 4k of the connection: %s", err)
return
}
proxy := p.Default
hostname, err := parser.GetHostname(data[:])
if err == nil {
glog.V(4).Infof("parsed hostname from TLS Client Hello: %s", hostname)
glog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname)
proxy = p.Get(hostname)
}
if proxy == nil {
glog.V(4).Infof("there is no configured proxy for SSL connections")
glog.V(4).Infof("There is no configured proxy for SSL connections.")
return
}
@ -86,7 +86,7 @@ func (p *TCPProxy) Handle(conn net.Conn) {
defer clientConn.Close()
if proxy.ProxyProtocol {
//Write out the proxy-protocol header
// write out the Proxy Protocol header
localAddr := conn.LocalAddr().(*net.TCPAddr)
remoteAddr := conn.RemoteAddr().(*net.TCPAddr)
protocol := "UNKNOWN"
@ -96,16 +96,16 @@ func (p *TCPProxy) Handle(conn net.Conn) {
protocol = "TCP6"
}
proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port)
glog.V(4).Infof("Writing proxy protocol header - %s", proxyProtocolHeader)
glog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader)
_, err = fmt.Fprintf(clientConn, proxyProtocolHeader)
}
if err != nil {
glog.Errorf("unexpected error writing proxy-protocol header: %s", err)
glog.Errorf("Error writing Proxy Protocol header: %s", err)
clientConn.Close()
} else {
_, err = clientConn.Write(data[:length])
if err != nil {
glog.Errorf("unexpected error writing first 4k of proxy data: %s", err)
glog.Errorf("Error writing the first 4k of proxy data: %s", err)
clientConn.Close()
}
}

View file

@ -41,27 +41,26 @@ func newUpstream(name string) *ingress.Backend {
}
}
// sysctlSomaxconn returns the value of net.core.somaxconn, i.e.
// maximum number of connections that can be queued for acceptance
// sysctlSomaxconn returns the maximum number of connections that can be queued
// for acceptance (value of net.core.somaxconn)
// http://nginx.org/en/docs/http/ngx_http_core_module.html#listen
func sysctlSomaxconn() int {
maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn")
if err != nil || maxConns < 512 {
glog.V(3).Infof("system net.core.somaxconn=%v (using system default)", maxConns)
glog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns)
return 511
}
return maxConns
}
// sysctlFSFileMax returns the value of fs.file-max, i.e.
// maximum number of open file descriptors
// sysctlFSFileMax returns the maximum number of open file descriptors (value
// of fs.file-max) or 0 in case of error.
func sysctlFSFileMax() int {
var rLimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)
if err != nil {
glog.Errorf("unexpected error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err)
// returning 0 means don't render the value
glog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err)
return 0
}
glog.V(2).Infof("rlimit.max=%v", rLimit.Max)

View file

@ -36,6 +36,15 @@ import (
"k8s.io/ingress-nginx/test/e2e/framework"
)
const (
logDynamicConfigSuccess = "Dynamic reconfiguration succeeded"
logDynamicConfigFailure = "Dynamic reconfiguration failed"
logRequireBackendReload = "Configuration changes detected, backend reload required"
logBackendReloadSuccess = "Backend successfully reloaded"
logSkipBackendReload = "Changes handled by the dynamic configuration, skipping backend reload"
logInitialConfigSync = "Initial synchronization of the NGINX configuration"
)
var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
f := framework.NewDefaultFramework("dynamic-configuration")
@ -69,8 +78,8 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
log, err := f.NginxLogs()
Expect(err).ToNot(HaveOccurred())
Expect(log).ToNot(ContainSubstring("could not dynamically reconfigure"))
Expect(log).To(ContainSubstring("first sync of Nginx configuration"))
Expect(log).ToNot(ContainSubstring(logDynamicConfigFailure))
Expect(log).To(ContainSubstring(logDynamicConfigSuccess))
})
Context("when only backends change", func() {
@ -94,14 +103,14 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
restOfLogs := log[index:]
By("POSTing new backends to Lua endpoint")
Expect(restOfLogs).To(ContainSubstring("dynamic reconfiguration succeeded"))
Expect(restOfLogs).ToNot(ContainSubstring("could not dynamically reconfigure"))
Expect(restOfLogs).To(ContainSubstring(logDynamicConfigSuccess))
Expect(restOfLogs).ToNot(ContainSubstring(logDynamicConfigFailure))
By("skipping Nginx reload")
Expect(restOfLogs).ToNot(ContainSubstring("backend reload required"))
Expect(restOfLogs).ToNot(ContainSubstring("ingress backend successfully reloaded"))
Expect(restOfLogs).To(ContainSubstring("skipping reload"))
Expect(restOfLogs).ToNot(ContainSubstring("first sync of Nginx configuration"))
Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess))
Expect(restOfLogs).To(ContainSubstring(logSkipBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring(logInitialConfigSync))
})
It("should be able to update endpoints even when the update POST size(request body) > size(client_body_buffer_size)", func() {
@ -164,14 +173,14 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
restOfLogs := log[index:]
By("POSTing new backends to Lua endpoint")
Expect(restOfLogs).To(ContainSubstring("dynamic reconfiguration succeeded"))
Expect(restOfLogs).ToNot(ContainSubstring("could not dynamically reconfigure"))
Expect(restOfLogs).To(ContainSubstring(logDynamicConfigSuccess))
Expect(restOfLogs).ToNot(ContainSubstring(logDynamicConfigFailure))
By("skipping Nginx reload")
Expect(restOfLogs).ToNot(ContainSubstring("backend reload required"))
Expect(restOfLogs).ToNot(ContainSubstring("ingress backend successfully reloaded"))
Expect(restOfLogs).To(ContainSubstring("skipping reload"))
Expect(restOfLogs).ToNot(ContainSubstring("first sync of Nginx configuration"))
Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess))
Expect(restOfLogs).To(ContainSubstring(logSkipBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring(logInitialConfigSync))
})
})
@ -208,10 +217,10 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
Expect(log).ToNot(BeEmpty())
By("reloading Nginx")
Expect(log).To(ContainSubstring("ingress backend successfully reloaded"))
Expect(log).To(ContainSubstring(logBackendReloadSuccess))
By("POSTing new backends to Lua endpoint")
Expect(log).To(ContainSubstring("dynamic reconfiguration succeeded"))
Expect(log).To(ContainSubstring(logDynamicConfigSuccess))
By("still be proxying requests through Lua balancer")
err = f.WaitForNginxServer("foo.com",